Micronaut FunctionInitializer override application properties - java

#Singleton
public class TestFunction extends FunctionInitializer {
Logger log = LoggerFactory.getLogger(TestFunction.class);
public TestFunction() {
}
public String execute() {
return "Hello";
}
}
I want to override datasource properties in application.yml file programmatically, but without using bean created event listener. Is there a way to do that. Like creating a custom application context with properties.
I have used the below approach for Micronaut API gateway proxy.
public class StreamLambdaHandler implements RequestStreamHandler {
.......
public StreamLambdaHandler() {
try {
log.info("Initializing Lambda Container");
this.dbCredentialService = new DBCredentialService();
// Get updated database credential map
Map<String, Object> props = this.dbCredentialService.getDbCredential();
// Create application context builder with updated properties
// i.e Override datasources properties in application.yml
builder = ApplicationContext.build().properties(props);
handler = new MicronautLambdaContainerHandler(builder);
}....
........
}
Can we do something similar with FunctionInitializer?

If you plan to override only datasource credentials properties it could be done this way.
#Factory
public class HikariDataSourceFactory {
#Bean
#Primary
public DataSource dataSource(DBCredentialService credentialService) throws URISyntaxException {
Map<String, Object> credentials = this.dbCredentialService.getDbCredential();
String username = "user";
String password = credentials.get("username");
HikariConfig config = new HikariConfig();
config.setJdbcUrl("jdbc:postgresql://localhost:5432/postgres");
config.setUsername(username);
config.setPassword(password);
config.setDriverClassName("org.postgresql.Driver");
return new HikariUrlDataSource(config);
}
}

Related

(FIXED) Multi-tenant application: Can't set the desired dataSource (Separated Schema, Shared Database)

I have an application where I want to use different DataSources. All the requests coming from the front-end will user the primary DataSource (this works so far), but I also have to perform operations every certain amount of minutes on another database with different schemas.
By looking in here, I found this approach:
Application.yml
datasource:
primary:
url: jdbc:mysql://SERVER_IP:3306/DATABASE?useSSL=false&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC
username: MyUser
password: MyPassword
driver-class-name: com.mysql.cj.jdbc.Driver
secondary:
url: jdbc:mysql://localhost:3306/
urlEnd: ?useSSL=false&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC
username: root
password: root
driver-class-name: com.mysql.cj.jdbc.Driver
Here i separate "url" and "urlEnd" because in the middle I will paste the name of the schema to use in each case as shown later.
ContextHolder
public abstract class ContextHolder {
private static final Logger logger = LoggerFactory.getLogger(ContextHolder.class);
private static final ThreadLocal<String> contextHolder = new ThreadLocal<>();
public static void setClient(String context) {
contextHolder.set(context);
}
public static String getClient() {
return contextHolder.get();
}
}
CustomRoutingDataSource
#Component
public class CustomRoutingDataSource extends AbstractRoutingDataSource {
private org.slf4j.Logger logger = LoggerFactory.getLogger(CustomRoutingDataSource.class);
#Autowired
DataSourceMap dataSources;
#Autowired
private Environment env;
public void setCurrentLookupKey() {
determineCurrentLookupKey();
}
#Override
protected Object determineCurrentLookupKey() {
String key = ContextHolder.getClient();
if(key == null || key.equals("primary")) {
DriverManagerDataSource ds = new DriverManagerDataSource();
ds.setDriverClassName(env.getProperty("spring.datasource.primary.driver-class-name"));
ds.setPassword(env.getProperty("spring.datasource.primary.password"));
ds.setUsername(env.getProperty("spring.datasource.primary.username"));
ds.setUrl(env.getProperty("spring.datasource.primary.url"));
dataSources.addDataSource("primary", ds);
setDataSources(dataSources);
afterPropertiesSet();
return "primary";
}
else {
DriverManagerDataSource ds = new DriverManagerDataSource();
ds.setDriverClassName(env.getProperty("spring.datasource.secondary.driver-class-name"));
ds.setPassword(env.getProperty("spring.datasource.secondary.password"));
ds.setUsername(env.getProperty("spring.datasource.secondary.username"));
ds.setUrl(env.getProperty("spring.datasource.secondary.url") + key + env.getProperty("spring.datasource.secondary.urlEnd"));
dataSources.addDataSource(key, ds);
setDataSources(dataSources);
afterPropertiesSet();
}
return key;
}
#Autowired
public void setDataSources(DataSourceMap dataSources) {
setTargetDataSources(dataSources.getDataSourceMap());
}
}
DatabaseSwitchInterceptor (Not used so far AFAIK)
#Component
public class DatabaseSwitchInterceptor implements HandlerInterceptor {
#Autowired
private CustomRoutingDataSource customRoutingDataSource;
private static final Logger logger = LoggerFactory
.getLogger(DatabaseSwitchInterceptor.class);
#Override
public boolean preHandle(HttpServletRequest request,
HttpServletResponse response, Object handler) throws Exception {
String hostname = request.getServerName();
ContextHolder.setClient(hostname);
return true;
}
}
DataSourceMap
#Component
public class DataSourceMap {
private static final Logger logger = LoggerFactory
.getLogger(DataSourceMap.class);
private Map<Object, Object> dataSourceMap = new ConcurrentHashMap<>();
public void addDataSource(String session, DataSource dataSource) {
this.dataSourceMap.put(session, dataSource);
}
public Map<Object, Object> getDataSourceMap() {
return dataSourceMap;
}
}
And last but not least, the controller where I am doing my test
#RestController
#RequestMapping("/open/company")
public class CompanyOpenController extends GenericCoreController<Company, Integer> {
#Autowired
private CompanyService companyService;
#Autowired
private CompltpvRepository compltpvRepository;
#Autowired
private CustomRoutingDataSource customRoutingDataSource;
#GetMapping("/prueba/{companyId}")
public List<CompltpvDTO> getAll(#PathVariable Integer companyId) throws ServiceException{
List<CompltpvDTO> response = new ArrayList<>();
ContextHolder.setClient(companyService.getById(companyId).getSchema());
for(Compltpv e : compltpvRepository.findAll()) {
response.add(new CompltpvDTO(e));
}
return response;
}
}
What I want all this to do is that, when I call "/open/company/test/3" it searches (in the main database) for the company with ID = 3. Then it retrieves its "schema" attribute value (let's say its "12345678" and then switches to the secondary datasource with the following url:
url = env.getProperty("spring.datasource.secondary.url") + key + env.getProperty("spring.datasource.secondary.urlEnd")
which is something like:
jdbc:mysql://localhost:3306/1245678?useSSL=false&useUnicode=true&useLegacyDatetimeCode=false&serverTimezone=UTC
When I try this and look into the DataSource pool, both exist with keys "primary" and "12345678", but it's always using the "primary" one.
How can I tell Spring to use the DataSource I need it to use?
EDIT: Found the solution
I finally got a deeper understaing of what was happening and also found the problem.
In case someone is interested on this approach, the problem I was having was this line in my application.yml:
spring:
jpa:
open-in-view: true
which does the following:
Default: true
Register OpenEntityManagerInViewInterceptor. Binds a JPA EntityManager to the thread for the entire processing of the request.
And that was the reason that, despite creating the datasource for every company (tenant), it wasn't using it. So if you are reading this and are in my situation, find that line and set it to false. If you don't find that property, notice that by default it'll be set to true.

Spring Boot-Log4J2: Is there any way to use the values in application.yml for JDBC appender?

I'm trying to create a JDBC appender to log into a DB table. I've succeeded so far by creating a ConnectionFactory and specifying it in log4j2-spring.xml. However, the DB credentials are all hardcoded and I would like to use the credentials I have in my application.yml but since logging seems to be loaded before Spring ApplicationContext, using the #Value annotation doesn't work.
I've also tried building the appender programmatically with this guide as a reference but it's requiring me to define the extended LifeCycle methods in ConnectionSource and I'm not sure what to do with that.
I've tried defining the ConnectionSource class as this but it returns a NullPointerException on this line
Appender jdbcAppender = JdbcAppender.newBuilder()
.setBufferSize(0)
.setColumnConfigs(columnConfigs)
.setColumnMappings()
.setConnectionSource(connectionSource)
.setTableName("LOG")
.setName("databaseAppender")
.setIgnoreExceptions(true)
.setFilter(null)
.build();
This is the ConnectionSource I made:
public class LoggerDatabaseSource implements ConnectionSource
{
private DataSource dataSource;
public LoggerDatabaseSource(String url, String userName, String password, String validationQuery)
{
Properties properties = new Properties();
properties.setProperty("user", userName);
properties.setProperty("password", password);
GenericObjectPool<PoolableConnection> pool = new GenericObjectPool<>();
DriverManagerConnectionFactory cf = new DriverManagerConnectionFactory(url, properties);
new PoolableConnectionFactory(cf, pool, null, validationQuery, 3, false, false, Connection.TRANSACTION_READ_COMMITTED);
this.dataSource = new PoolingDataSource(pool);
}
#Override
public Connection getConnection() throws SQLException
{
return dataSource.getConnection();
}
#Override
public State getState() {
return null;
}
#Override
public void initialize() {
}
#Override
public void start() {
}
#Override
public void stop() {
}
#Override
public boolean isStarted() {
return false;
}
#Override
public boolean isStopped() {
return false;
}
}
Any help would be appreciated. Thanks in advance!
Is there a reason you aren't using one of Log4j's database appenders? If you want access to properties defined in Spring's configuration, if you are using Spring Boot you can use the Log4j Spring Cloud Config Client which provides the Spring Lookup. This will let you reference properties from your application.yml file and use them in your log4j configuration.

Setting Hazelcast Cache for Multi-tenancy

I am currently using the JHipster generator for really boiler plate code which involves HazelCast as a second level cache. I was able to get Multi-tenancy (schema per tenant) working with a header based tenant context. The problem I have now, is that the #Cacheable annotations all share a context. If the cache is hot, I end up with cross-schema data. For example, tenant1 pulls all records from their table which is cached. Tenant 2 goes to pull the same items from their table, the cache is read, and it never goes to the actual tenant db. An easy fix would be disable caching all together but I would like to not do that. I can not for the life of me figure out how to make hazelcast aware of the tenant context - documentation is lacking. Some others have solved this with using custom name resolvers but it doesn't appear to be as dynamic as I was hoping (i.e. you have to know all of the tenants ahead of time). Thoughts?
Current cache config:
#Configuration
#EnableCaching
public class CacheConfiguration implements DisposableBean {
private final Logger log = LoggerFactory.getLogger(CacheConfiguration.class);
private final Environment env;
private final ServerProperties serverProperties;
private final DiscoveryClient discoveryClient;
private Registration registration;
public CacheConfiguration(Environment env, ServerProperties serverProperties, DiscoveryClient discoveryClient) {
this.env = env;
this.serverProperties = serverProperties;
this.discoveryClient = discoveryClient;
}
#Autowired(required = false)
public void setRegistration(Registration registration) {
this.registration = registration;
}
#Override
public void destroy() throws Exception {
log.info("Closing Cache Manager");
Hazelcast.shutdownAll();
}
#Bean
public CacheManager cacheManager(HazelcastInstance hazelcastInstance) {
log.debug("Starting HazelcastCacheManager");
return new com.hazelcast.spring.cache.HazelcastCacheManager(hazelcastInstance);
}
#Bean
public HazelcastInstance hazelcastInstance(JHipsterProperties jHipsterProperties) {
log.debug("Configuring Hazelcast");
HazelcastInstance hazelCastInstance = Hazelcast.getHazelcastInstanceByName("SampleApp");
if (hazelCastInstance != null) {
log.debug("Hazelcast already initialized");
return hazelCastInstance;
}
Config config = new Config();
config.setInstanceName("SampleApp");
config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
if (this.registration == null) {
log.warn("No discovery service is set up, Hazelcast cannot create a cluster.");
} else {
// The serviceId is by default the application's name,
// see the "spring.application.name" standard Spring property
String serviceId = registration.getServiceId();
log.debug("Configuring Hazelcast clustering for instanceId: {}", serviceId);
// In development, everything goes through 127.0.0.1, with a different port
if (env.acceptsProfiles(Profiles.of(JHipsterConstants.SPRING_PROFILE_DEVELOPMENT))) {
log.debug("Application is running with the \"dev\" profile, Hazelcast " +
"cluster will only work with localhost instances");
System.setProperty("hazelcast.local.localAddress", "127.0.0.1");
config.getNetworkConfig().setPort(serverProperties.getPort() + 5701);
config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
for (ServiceInstance instance : discoveryClient.getInstances(serviceId)) {
String clusterMember = "127.0.0.1:" + (instance.getPort() + 5701);
log.debug("Adding Hazelcast (dev) cluster member {}", clusterMember);
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(clusterMember);
}
} else { // Production configuration, one host per instance all using port 5701
config.getNetworkConfig().setPort(5701);
config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
for (ServiceInstance instance : discoveryClient.getInstances(serviceId)) {
String clusterMember = instance.getHost() + ":5701";
log.debug("Adding Hazelcast (prod) cluster member {}", clusterMember);
config.getNetworkConfig().getJoin().getTcpIpConfig().addMember(clusterMember);
}
}
}
config.getMapConfigs().put("default", initializeDefaultMapConfig(jHipsterProperties));
// Full reference is available at: http://docs.hazelcast.org/docs/management-center/3.9/manual/html/Deploying_and_Starting.html
config.setManagementCenterConfig(initializeDefaultManagementCenterConfig(jHipsterProperties));
config.getMapConfigs().put("com.test.sampleapp.domain.*", initializeDomainMapConfig(jHipsterProperties));
return Hazelcast.newHazelcastInstance(config);
}
private ManagementCenterConfig initializeDefaultManagementCenterConfig(JHipsterProperties jHipsterProperties) {
ManagementCenterConfig managementCenterConfig = new ManagementCenterConfig();
managementCenterConfig.setEnabled(jHipsterProperties.getCache().getHazelcast().getManagementCenter().isEnabled());
managementCenterConfig.setUrl(jHipsterProperties.getCache().getHazelcast().getManagementCenter().getUrl());
managementCenterConfig.setUpdateInterval(jHipsterProperties.getCache().getHazelcast().getManagementCenter().getUpdateInterval());
return managementCenterConfig;
}
private MapConfig initializeDefaultMapConfig(JHipsterProperties jHipsterProperties) {
MapConfig mapConfig = new MapConfig();
/*
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. Valid numbers are 0 (no backup), 1, 2, 3.
*/
mapConfig.setBackupCount(jHipsterProperties.getCache().getHazelcast().getBackupCount());
/*
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
*/
mapConfig.setEvictionPolicy(EvictionPolicy.LRU);
/*
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
*/
mapConfig.setMaxSizeConfig(new MaxSizeConfig(0, MaxSizeConfig.MaxSizePolicy.USED_HEAP_SIZE));
return mapConfig;
}
private MapConfig initializeDomainMapConfig(JHipsterProperties jHipsterProperties) {
MapConfig mapConfig = new MapConfig();
mapConfig.setTimeToLiveSeconds(jHipsterProperties.getCache().getHazelcast().getTimeToLiveSeconds());
return mapConfig;
}
}
Sample Repository using cacheNames...
#Repository
public interface UserRepository extends JpaRepository<User, Long> {
String USERS_BY_LOGIN_CACHE = "usersByLogin";
String USERS_BY_EMAIL_CACHE = "usersByEmail";
String USERS_BY_ID_CACHE = "usersById";
Optional<User> findOneByActivationKey(String activationKey);
List<User> findAllByActivatedIsFalseAndActivationKeyIsNotNullAndCreatedDateBefore(Instant dateTime);
Optional<User> findOneByResetKey(String resetKey);
Optional<User> findOneByEmailIgnoreCase(String email);
Optional<User> findOneByLogin(String login);
#EntityGraph(attributePaths = "roles")
#Cacheable(cacheNames = USERS_BY_ID_CACHE)
Optional<User> findOneWithRolesById(Long id);
#EntityGraph(attributePaths = "roles")
#Cacheable(cacheNames = USERS_BY_LOGIN_CACHE)
Optional<User> findOneWithRolesByLogin(String login);
#EntityGraph(attributePaths = { "roles", "roles.permissions" })
#Cacheable(cacheNames = USERS_BY_LOGIN_CACHE)
Optional<User> findOneWithRolesAndPermissionsByLogin(String login);
#EntityGraph(attributePaths = "roles")
#Cacheable(cacheNames = USERS_BY_EMAIL_CACHE)
Optional<User> findOneWithRolesByEmail(String email);
Page<User> findAllByLoginNot(Pageable pageable, String login);
}
I am using tenant per database (MySQL), but as long as you are setting a thread context above here is what I'm doing - I'm using Spring Boot. I've created a custom Cache Key generator which combines the tenant name + class + and method. You can really choose any combination. Whenever I pass that tenant back it pulls the correct entries. In the Hazelcast command center for my AppointmentType map type I see the number of entries increment per tenant.
Some other references that may be helpful:
https://www.javadevjournal.com/spring/spring-cache-custom-keygenerator/
https://docs.spring.io/spring-framework/docs/4.3.x/spring-framework-reference/html/cache.html (search for keyGenerator="myKeyGenerator")
In your class where you want to cache (mine is a service class):
#Service
public class AppointmentTypeService {
private static final Logger LOGGER = LoggerFactory.getLogger(AppointmentTypeService.class);
private final AppointmentTypeRepository appointmentTypeRepository;
#Autowired
AppointmentTypeService(AppointmentTypeRepository appointmentTypeRepository) {
this.appointmentTypeRepository = appointmentTypeRepository;
}
//ADD keyGenerator value. Name is the name of the bean of the class
#Cacheable(value="appointmentType", keyGenerator = "multiTenantCacheKeyGenerator")
public List<AppointmentType> list() {
return this.appointmentTypeRepository.findAll();
}
#CacheEvict(value="appointmentType", allEntries=true)
public Long create(AppointmentType request) {
this.appointmentTypeRepository.saveAndFlush(request);
return request.getAppointmentTypeId();
}
#CacheEvict(value="appointmentType", allEntries=true)
public void delete(Long id) {
this.appointmentTypeRepository.deleteById(id);
}
public Optional<AppointmentType> findById(Long id) {
return this.appointmentTypeRepository.findById(id);
}
}
Create key generator class
//setting the bean name here
#Component("multiTenantCacheKeyGenerator")
public class MultiTenantCacheKeyGenerator implements KeyGenerator {
#Override
public Object generate(Object o, Method method, Object... os) {
StringBuilder sb = new StringBuilder();
sb.append(TenantContext.getCurrentTenantInstanceName()) //my tenant context class which is using local thread. I set the value in the Spring filter.
.append("_")
.append(o.getClass().getSimpleName())
.append("-")
.append(method.getName());
}
return sb.toString();
}
}
One approach to defining different cache keys for the tenants is to override the method getCache in org.springframework.cache.CacheManager, as suggested here: Extended spring cache...
As of Jhipster 7.0.1, the CacheManager for Hazelcast is defined in the class CacheConfiguration as stated bellow:
#Configuration
#EnableCaching
public class CacheConfiguration {
//...
#Bean
public CacheManager cacheManager(HazelcastInstance hazelcastInstance) {
return new com.hazelcast.spring.cache.HazelcastCacheManager(hazelcastInstance);
}
//...
}
To have the cache keys prefixed with the tenant id, the following code may be used as a starting point:
#Configuration
#EnableCaching
public class CacheConfiguration {
#Bean
public CacheManager cacheManager(HazelcastInstance hazelcastInstance) {
return new com.hazelcast.spring.cache.HazelcastCacheManager(hazelcastInstance){
#Override
public Cache getCache(String name) {
String tenantId = TenantStorage.getTenantId();
if (StringUtils.isNotBlank(tenantId)){
return super.getCache(String.format("%s:%s", tenantId, name));
}
return super.getCache(name);
}
};
}
}
Note: in the above code, TenantStorage.getTenantId() is a static function one should implement and that returns the current tenant id.
Consider the class posted by the OP:
#Cacheable(cacheNames = "usersByLogin")
Optional<User> findOneWithRolesByLogin(String login);
The following cache values will be used by HazelCast:
tenant1 => tenant1:usersByLogin
tenant2 => tenant2:usersByLogin
null => usersByLogin

Spring Properties Decryption

We have mix of some legacy spring apps which are not yet migrated to spring-boot or spring cloud and also spring boot apps. I am working on creating a Spring component that will automatically decrypt spring properties when the environment is loaded if the property value is encrypted and has a prefix. The properties can be in .properties files(for legacy apps) or in .yaml files(newer spring boot apps).
The component should be able to decrypt any spring property regardless of the source, and should work with any spring version and not tied to spring boot.The component should also transparently decrypt properties. It should read passphrase from a property file, so the passphrase file needs to be loaded in the beginning.
We have our own ecrypt/decrypt and don't want to use jaspyt.
Things tried so far:
I liked this approach of creating an ApplicationListener, but this is tied up with spring boot(ApplicationEnvironmentPreparedEvent). With Spring events like ContextRefreshed or ContextStart , i don't see how can i get ConfigurableApplicationContext/ConfigurableEnvironment. Anyone created a Listener for encrypt/decrypt withouth spring boot/cloud?
I also created a custom ApplicationContextInitializer, and added it to web.xml's context-param, but this doesn't seems to be working. When i debug into it, i don't think it is loading/reading properties from my app.properties file.
#Component
public class DecryptingPropertyContextInitializer implements ApplicationContextInitializer<ConfigurableApplicationContext> {
#Override
public void initialize( ConfigurableApplicationContext applicationContext ) {
ConfigurableEnvironment environment = applicationContext.getEnvironment();
for ( PropertySource<?> propertySource : environment.getPropertySources() ) {
Map<String, Object> propertyOverrides = new LinkedHashMap<>();
decodePasswords( propertySource, propertyOverrides );
if ( !propertyOverrides.isEmpty() ) {
PropertySource<?> decodedProperties = new MapPropertySource( "decoded " + propertySource.getName(),
propertyOverrides );
environment.getPropertySources().addBefore( propertySource.getName(), decodedProperties );
}
}
}
private void decodePasswords(PropertySource<?> source, Map<String, Object> propertyOverrides) {
if ( source instanceof EnumerablePropertySource ) {
EnumerablePropertySource<?> enumerablePropertySource = (EnumerablePropertySource<?>) source;
for ( String key : enumerablePropertySource.getPropertyNames() ) {
Object rawValue = source.getProperty( key );
if ( rawValue instanceof String ) {
//decrypt logic here
propertyOverrides.put( key, decryptedValue );
}
}
}
}
}
Does anyone had to do something similar or has any better ideas ? Is there a way i can listen to application events and then process?
Appreciate your help
You can write your own PropertiesFactoryBean and override createProperties to decrypt encrypted values:
public class DecryptingPropertiesFactoryBean extends PropertiesFactoryBean {
#Override
protected Properties createProperties() throws IOException {
final Properties encryptedProperties = super.createProperties();
final Properties decryptedProperties = decrypt(encryptedProperties);
return decryptedProperties;
}
}
and a PropertySourcesPlaceholderConfigurer bean using these properties:
#Configuration
public class PropertiesConfiguration {
#Bean
public static DecryptingPropertiesFactoryBean propertyFactory() {
final DecryptingPropertiesFactoryBean factory = new DecryptingPropertiesFactoryBean();
final Resource[] propertyLocations = new Resource[] {
new FileSystemResource(new File("path/to/file.properties"))
};
factory.setLocations(propertyLocations);
return factory;
}
#Bean
public static Properties properties() throws Exception {
return propertyFactory().getObject();
}
#Bean
public static PropertySourcesPlaceholderConfigurer propertySourcesPlaceholderConfigurer() {
final PropertySourcesPlaceholderConfigurer bean = new PropertySourcesPlaceholderConfigurer();
bean.setIgnoreResourceNotFound(true);
bean.setIgnoreUnresolvablePlaceholders(false);
bean.setProperties(properties());
return bean;
}
}

Enabling mongo exception translation in spring-data-rest

Following this tutorial, working with complete code, how do I enable exception translation for mongo?
When my mongo db is down, i'm getting 500 error from com.mongodb.MongoServerSelectionException. Shouldn't this be translated into DataAccessResourceFailureException with MongoExceptionTranslator? Am i supposed to register this bean somehow? I've tried with:
#Bean
public MongoExceptionTranslator mongoExceptionTranslator() {
return new MongoExceptionTranslator();
}
but still no change
EDIT:
I've created a demo with the suggestions from Stackee007 but still can't get this to work
MongoExceptionTranslator is already registered if your configuration registers MongoFactoryBean or SimpleMongoDbFactory. You could configure mongo something like below which registers SimpleMongoDbFactory.
#Configuration
#EnableMongoRepositories
public class ApplicationConfig extends AbstractMongoConfiguration {
#Override
protected String getDatabaseName() {
return "yyy";
}
#Override
protected UserCredentials getUserCredentials() {
return new UserCredentials("abc", "***");
}
#Override
#Bean
public Mongo mongo() throws Exception {
List<ServerAddress> seeds = new ArrayList<ServerAddress>();
seeds.add(new ServerAddress("xxxx"));
seeds.add(new ServerAddress("xxx"));
seeds.add(new ServerAddress("xx"));
MongoClient mongo = new MongoClient(seeds);
mongo.setReadPreference(ReadPreference.secondaryPreferred());
mongo.setWriteConcern(WriteConcern.ACKNOWLEDGED);
return mongo;
}
#Bean
public GridFsTemplate gridFsTemplate() throws Exception {
return new GridFsTemplate(mongoDbFactory(), mappingMongoConverter());
}
}

Categories