ActiveMQ dequeue not consumed messages - java

I'm pretty new to ActiveMQ and messaging brokers. I am facing a problem and I would like to get some tips and informations from you.
So I have multiple queues, there is only one listener for every queue, everything works fine, until the connection is lost. When the connection is established, the connection between the queue and the subscriber is established as well, but the messages that weren't consumed are no longer sent after re-connection.
How can I re-send the messages that weren't consumed yet?
I am using Spring and I was looking for a configuration to make it work, but didn't get to find any and afterwards I was looking for a work-around or something, but still not able to find a solution.
Any information, solution, link sent related to my problem would be very helpful!
here is my JMSConfig class:
#Configuration
class JMSConfig {
private final String BROKER_URL = ConfigurationManager.getConfig().getString("broker.url");
private final String BROKER_USERNAME = ConfigurationManager.getConfig().getString("broker.username");
private final String BROKER_PASSWORD = ConfigurationManager.getConfig().getString("broker.password");
private final long SENDER_RECEIVE_TIMEOUT = ConfigurationManager.getConfig().getLong("sender.receive.timeout");
private final long SENDING_TIME_TO_LIVE = ConfigurationManager.getConfig().getLong("sending.time.to.live");
private final String RECEIVER_LISTENER_CONCURRENCY = ConfigurationManager.getConfig().getString("receiver.listener.concurrency");
private final boolean USE_POOLED_CONNECTION_FACTORY = ConfigurationManager.getConfig().getBoolean("use.pooled.connection.factory");
private final int POOL_MAX_CONNECTIONS = ConfigurationManager.getConfig().getInt("pool.max.connections");
private final boolean USE_TOPICS = ConfigurationManager.getConfig().getBoolean("use.topics");
private final static boolean OVERRIDE_REDELIVERY = false;
static {
ConfigurationManager.registerConfiguration("messaging", ConfigurationPriority.LOWEST);
}
#Bean
public ConnectionFactory connectionFactory() {
LoggerUtilities.logger().info("Registering connectionFactory");
// Setup the JMS connection
ActiveMQConnectionFactory connectionFactory = new ActiveMQConnectionFactory();
connectionFactory.setBrokerURL(BROKER_URL);
connectionFactory.setPassword(BROKER_USERNAME);
connectionFactory.setUserName(BROKER_PASSWORD);
// Does not work with spring.activemq.packages.trust-all=true so we need to setup manually
// Avoiding "This class is not trusted to be serialized as ObjectMessage payload"
connectionFactory.setTrustAllPackages(true);
if (OVERRIDE_REDELIVERY) {
overrideRedelivery(connectionFactory);
}
if (USE_POOLED_CONNECTION_FACTORY) {
// Using PooledConnectionFactory will ensure that on the ActiveMQ server does not get "Exceeded the maximum number of allowed client connections."
// This setup is also found in /apache-activemq-5.15.6/conf/activemq.xml > maximumConnections
PooledConnectionFactory pooledConnectionFactory = new PooledConnectionFactory(connectionFactory);
pooledConnectionFactory.setMaxConnections(POOL_MAX_CONNECTIONS);
return pooledConnectionFactory;
}
return connectionFactory;
}
#Bean
public JmsTemplate jmsTemplate() {
LoggerUtilities.logger().info("Registering jmsTemplate");
JmsTemplate template = new JmsTemplate();
template.setConnectionFactory(connectionFactory());
// Qos needs to be enabled when getDeliveryMode(), getPriority(), getTimeToLive() is needed
template.setExplicitQosEnabled(true);
template.setDeliveryPersistent(false);
template.setSessionAcknowledgeMode(Session.AUTO_ACKNOWLEDGE);
template.setTimeToLive(SENDING_TIME_TO_LIVE);
template.setReceiveTimeout(SENDER_RECEIVE_TIMEOUT);
if (USE_TOPICS) {
template.setPubSubDomain(true);
}
return template;
}
#Bean
public DefaultJmsListenerContainerFactory jmsListenerContainerFactory() {
LoggerUtilities.logger().info("Registering jmsListenerContainerFactory");
DefaultJmsListenerContainerFactory factory = new DefaultJmsListenerContainerFactory();
if (USE_TOPICS) {
factory.setPubSubDomain(true);
}
factory.setConnectionFactory(connectionFactory());
factory.setConcurrency(RECEIVER_LISTENER_CONCURRENCY);
factory.setErrorHandler(throwable -> {
String exceptionMessage = "[RECEIVER] Error on the listener = " + throwable.getMessage();
LoggerUtilities.logger().warn(exceptionMessage);
});
return factory;
}
#Bean
public MessageConverter jacksonJmsMessageConverter() {
LoggerUtilities.logger().info("Registering jacksonJmsMessageConverter");
// Serializing message content to json
MappingJackson2MessageConverter converter = new MappingJackson2MessageConverter();
converter.setTargetType(MessageType.TEXT);
converter.setTypeIdPropertyName("_type");
return converter;
}
private void overrideRedelivery(ActiveMQConnectionFactory connectionFactory) {
LoggerUtilities.logger().info("Registering overrideRedelivery");
// Overriding redelivery policy
RedeliveryPolicy policy = connectionFactory.getRedeliveryPolicy();
policy.setInitialRedeliveryDelay(0);
policy.setBackOffMultiplier(0);
policy.setUseExponentialBackOff(false);
policy.setMaximumRedeliveries(RedeliveryPolicy.NO_MAXIMUM_REDELIVERIES);
}
}
Here are the properties:
spring.jms.listener.acknowledge-mode= auto
spring.jms.listener.auto-startup= true
spring.jms.listener.concurrency= 3
spring.jms.listener.max-concurrency= 9
spring.jms.listener.pub-sub-domain= false
spring.jms.listener.receive-timeout=${BROKER_TIMEOUT}
spring.jms.template.delivery-mode= non_persistent
spring.jms.template.priority= 5
spring.jms.template.qos-enabled= true
spring.jms.template.receive-timeout= ${BROKER_TIMEOUT}
spring.jms.template.time-to-live= ${BROKER_TIMEOUT}

Related

How To Stop Polling InboundChannelAdapter

Im polling files from 2 different directories in 1 server using RotatingServerAdvice and that´s working fine, the problem is that I can´t stop polling once time I start the inboundtest.start (). The main idea is retrive all the files in those directories, and then send inboundtest.stop (), this is the code.
#Bean
public SessionFactory<LsEntry> sftpSessionFactory() {
DefaultSftpSessionFactory factory = new DefaultSftpSessionFactory(false);
factory.setHost(host);
factory.setPort(port);
factory.setUser(user);
factory.setPassword(password);
factory.setAllowUnknownKeys(true);
//factory.setTestSession(true);
return factory;
}
#Bean
public SftpInboundFileSynchronizer sftpInboundFileSynchronizer() {
SftpInboundFileSynchronizer fileSynchronizer = new SftpInboundFileSynchronizer(sftpSessionFactory());
fileSynchronizer.setDeleteRemoteFiles(true);
fileSynchronizer.setRemoteDirectory(sftpRemoteDirectory);
fileSynchronizer.setFilter(new SftpRegexPatternFileListFilter(".*?\\.(txt|TXT?)"));
return fileSynchronizer;
}
#Bean(name = "sftpMessageSource")
#EndpointId("inboundtest")
#InboundChannelAdapter(channel = "sftpChannel",poller = #Poller("fileReadingMessageSourcePollerMetadata"), autoStartup = "false")
public MessageSource<File> sftpMessageSource() {
SftpInboundFileSynchronizingMessageSource source =
new SftpInboundFileSynchronizingMessageSource(sftpInboundFileSynchronizer());
source.setLocalDirectory(new File(sftpLocalDirectoryDownloadUpload));
source.setAutoCreateLocalDirectory(true);
source.setLocalFilter(new AcceptOnceFileListFilter<File>());
return source;
}
#Bean
public DelegatingSessionFactory<LsEntry> sessionFactory() {
Map<Object, SessionFactory<LsEntry>> factories = new LinkedHashMap<>();
factories.put("one", sftpSessionFactory());
// use the first SF as the default
return new DelegatingSessionFactory<LsEntry>(factories, factories.values().iterator().next());
}
#Bean
public RotatingServerAdvice advice() {
List<RotationPolicy.KeyDirectory> keyDirectories = new ArrayList<>();
keyDirectories.add(new RotationPolicy.KeyDirectory("one", sftpRemoteDirectory));
keyDirectories.add(new RotationPolicy.KeyDirectory("one", sftpRemoteDirectoryNonUpload));
return new RotatingServerAdvice(sessionFactory(), keyDirectories, false);
}
#Bean
MessageChannel controlChannel() {
return new DirectChannel();
}
#Bean
#ServiceActivator(inputChannel = "controlChannel")
ExpressionControlBusFactoryBean controlBus() {
return new ExpressionControlBusFactoryBean();
}
#Bean
public PollerMetadata fileReadingMessageSourcePollerMetadata() {
PollerMetadata meta = new PollerMetadata();
meta.setTrigger(new PeriodicTrigger(1000));
meta.setAdviceChain(List.of(advice()));
meta.setMaxMessagesPerPoll(1);
meta.setErrorHandler(throwable -> new IOException());
return meta;
}
Allways is waiting for a new file in one of the 2 directories, but thats no the idea, the idea is stop polling when all the files be retrived
From another class I call inbound.start() trouhg the control chanel here the code:
#Autowired
private MessageChannel controlChannel;
public void startProcessingFiles() throws InterruptedException {
controlChannel.send(new GenericMessage<>("#inboundtest.start()"));
}
I was tryong stop with this class but doesn´t works
#Component
public class StopPollingAdvice implements ReceiveMessageAdvice {
#Autowired
private MessageChannel controlChannel;
#Override
public Message<?> afterReceive(Message<?> message, Object o) {
System.out.println("There is no more files, stopping connection" + message.getPayload());
if(message == null) {
System.out.println("There is no more files, stopping connection" + message.getPayload());
Message operation = MessageBuilder.withPayload("#inboundtest.stop()").build();
controlChannel.send(operation);
}
return message;
}
}
OK. Now I see your point. The RotatingServerAdvice does move to other server only when the first is exhausted (by default, see that fair option). So, when you stop it in the advice it cannot go to other dir for fetching any more. You need to think about some other stopping solution. Something what is not tied to the advice and this afterReceive(), somewhere downstream in your flow...
Or you can provide a custom RotationPolicy (extension of StandardRotationPolicy) and in its overridden afterReceive() check for all the dirs processed and then send stop command.

I can't organize delayed sending of messages with spring amp quot

The configuration class(part):
public static RabbitQueueConfig clubProNotAvailableConfig =
new RabbitQueueConfig("club-pro-not-available", "club-pro-not-available", "club-pro-not-available-status", "3-3");
#Bean
public SimpleMessageListenerContainer listenerContainer5(ClubProNotAvailableListener listener, ConnectionFactory connectionFactory) {
return initListenerContainer(listener, clubProNotAvailableConfig, connectionFactory);
}
private SimpleMessageListenerContainer initListenerContainer(
ChannelAwareMessageListener listener,
RabbitQueueConfig config,
ConnectionFactory connectionFactory
) {
SimpleMessageListenerContainer listenerContainer = new SimpleMessageListenerContainer();
listenerContainer.setConnectionFactory(connectionFactory);
listenerContainer.setQueueNames(config.getQueue());
listenerContainer.setMessageListener(listener);
listenerContainer.setAcknowledgeMode(AcknowledgeMode.MANUAL);
listenerContainer.setConcurrency(config.getThreadPoolSize());
listenerContainer.setPrefetchCount(1);
return listenerContainer;
}
Method of sending a message:
try {
success = clientRepository.updateAnketa(privatePersonProfile.getProfileId(), clubProAnketa, null);
} catch (ClubProNotAvailableException e) {
ClubProNotAvailableRabbit clubProNotAvailableRabbit = new ClubProNotAvailableRabbit();
clubProNotAvailableRabbit.setRequestContextRabbit(RequestContextRabbit.createContext(requestContextService.getContext()));
clubProNotAvailableRabbit.setCountRetry(0L);
clubProNotAvailableRabbit.setProfileId(privatePersonProfile.getProfileId());
clubProNotAvailableRabbit.setNameMethod(ChangeMethod.CHANGE_ANKETA);
clubProNotAvailableRabbit.setChangeAnketaData(anketa);
rabbitTemplate.convertAndSend(config.getExchange(), config.getRoutingKey(), clubProNotAvailableRabbit, new MessagePostProcessor() {
#Override
public Message postProcessMessage(Message message) throws AmqpException {
message.getMessageProperties().setHeader("x-delay", 10000);
return message;
}
});
throw new ClubProNotAvailableException();
}
Configuration in the broker:
Queue configuration:
configuration of the exchanger:
I've read the documentation, tried a couple of options, but I can't apply it to my code.
What am I doing wrong? I will be very grateful for your help.
It looks like you don't have the delayed exchange plugin; you have also declared the exchange as a simple fanout; this is what the exchange should look like this:
Also, to set the delay when sending, you should use:
template.convertAndSend(exchangeName, queue.getName(), "foo", message -> {
message.getMessageProperties().setDelay(1000);
return message;
});

How to increase the performance of a Spring Batch that read data from an Oracle AQ?

I have a Spring Batch code that read (dequeue) data from an Oracle AQ.
But to read only 100 rows, it takes 4 minutes!!
What it can be the problem with this reader? ¿How can i improve the performance?
This is the code from my custom reader that dequeue the data:
final String queueOwner = "OWNER";
final String queueName = "QUE_NAME";
try (Connection aqconn = dataSource.getConnection()) {
aqconn.setAutoCommit(false);
Class.forName("oracle.AQ.AQOracleDriver");
AQSession aqSession = AQDriverManager.createAQSession(aqconn);
AQQueue queue;
AQMessage message = null;
AQDequeueOption deqOption = new AQDequeueOption();
AQObjectPayload payload;
Data data= new Data();
queue = aqSession.getQueue(queueOwner, queueName);
message = ((AQOracleQueue) queue).dequeue(deqOption, DataAq.getORADataFactory());
payload = message.getObjectPayload();
DataAq dataAq= (DataAq) payload.getPayloadData();
--Some validation
aqconn.commit();
aqSession.close();
return saldo;
}
I've try already using JmsItemReader, but it was always giving me this error "Payload factory must be specified for destinations with ADT payloads" even if i set a message listener, so i've give up with that configuration.
To read from Oracle AQ with SpringBtach I'm using JmsTemplate.
Here is a sample of the configuration i'm using :
#Bean
public ConnectionFactory connectionFactory() throws JMSException, SQLException {
return AQjmsFactory.getQueueConnectionFactory(dataSource());
}
#Bean
public JmsTemplate jmsTemplate(ConnectionFactory connectionFactory) {
JmsTemplate jmsTemplate = new JmsTemplate();
jmsTemplate.setConnectionFactory(connectionFactory);
SimpleMessageConverter converter = new SimpleMessageConverter();
jmsTemplate.setMessageConverter(converter);
jmsTemplate.setSessionTransacted(true);
jmsTemplate.setDefaultDestinationName("YOUR_QUEUE_NAME");
return jmsTemplate;
}
#Bean
public DataSource dataSource() throws SQLException {
OracleConnectionPoolDataSource dataSource = new OracleConnectionPoolDataSource();
dataSource.setURL("jdbc:oracle:thin:#host:port:database");
dataSource.setUser("usr");
dataSource.setPassword("pwd");
return dataSource;
}
After you can use the JmsItemReader from SpringBatch

Spring Batch Integration Remote Chunking error - Message contained wrong job instance id [25] should have been [24]

I'm running into this bug (more info here) which appears to mean that for multi-threaded batches using remote chunking you can't use a common response channel. I'm not exactly sure how to proceed to get this working. Surely there's a way to get this working, because without it I can't see much benefit to remote chunking.
Here's my DSL config that creates a JobRequest:
#Bean
IntegrationFlow newPollingJobsAdapter(JobLaunchingGateway jobLaunchingGateway) {
// Start by polling the DB for new PollingJobs according to the polling rate
return IntegrationFlows.from(jdbcPollingChannelAdapter(),
c -> c.poller(Pollers.fixedRate(10000)
// Do the polling on one of 10 threads.
.taskExecutor(Executors.newFixedThreadPool(10))
// pull out up to 100 new ids for each poll.
.maxMessagesPerPoll(100)))
.log(LoggingHandler.Level.WARN)
// The polling adapter above returns a list of ids. Split them out into
// individual ids
.split()
// Now push each one onto a separate thread for batch processing.
.channel(MessageChannels.executor(Executors.newFixedThreadPool(10)))
.log(LoggingHandler.Level.WARN)
// Transform each one into a JobLaunchRequest
.<Long, JobLaunchRequest>transform(id -> {
logger.warn("Creating job for ID {}", id);
JobParametersBuilder builder = new JobParametersBuilder()
.addLong("polling-job-id", id, true);
return new JobLaunchRequest(job, builder.toJobParameters());
})
.handle(jobLaunchingGateway)
// TODO: Notify somebody? No idea yet
.<JobExecution>handle(exec -> System.out.println("GOT EXECUTION: " + exec))
.get();
}
Nothing in here is particularly special, no odd configs that I'm aware of.
The job itself is pretty straight-forward, too:
/**
* This is the definition of the entire batch process that runs polling.
* #return
*/
#Bean
Job pollingJobJob() {
return jobBuilderFactory.get("pollingJobJob")
.incrementer(new RunIdIncrementer())
// Ship it down to the slaves for actual processing
.start(remoteChunkingStep())
// Now mark it as complete
.next(markCompleteStep())
.build();
}
/**
* Sends the job to a remote slave via an ActiveMQ-backed JMS queue.
*/
#Bean
TaskletStep remoteChunkingStep() {
return stepBuilderFactory.get("polling-job-step-remote-chunking")
.<Long, String>chunk(20)
.reader(runningPollingJobItemReader)
.processor(toJsonProcessor())
.writer(chunkWriter)
.build();
}
/**
* This step just marks the PollerJob as Complete.
*/
#Bean
Step markCompleteStep() {
return stepBuilderFactory.get("polling-job-step-mark-complete")
// We want each PollerJob instance to be a separate job in batch, and the
// reader is using the id passed in via job params to grab the one we want,
// so we don't need a large chunk size. One at a time is fine.
.<Long, Long>chunk(1)
.reader(runningPollingJobItemReader)
.processor(new PassThroughItemProcessor<Long>())
.writer(this.completeStatusWriter)
.build();
}
Here's the chunk writer config:
/**
* This is part of the bridge between the spring-batch and spring-integration. Nothing special or weird is going
* on, so see the RemoteChunkHandlerFactoryBean for a description.
*/
#Bean
RemoteChunkHandlerFactoryBean<PollerJob> remoteChunkHandlerFactoryBean() {
RemoteChunkHandlerFactoryBean<PollerJob> factory = new RemoteChunkHandlerFactoryBean<>();
factory.setChunkWriter(chunkWriter);
factory.setStep(remoteChunkingStep());
return factory;
}
/**
* This is the writer that will actually send the chunk to the slaves. Note that it also configures the
* internal channel on which replies are expected.
*/
#Bean
#StepScope
ChunkMessageChannelItemWriter<String> chunkWriter() {
ChunkMessageChannelItemWriter<String> writer = new ChunkMessageChannelItemWriter<>();
writer.setMessagingOperations(batchMessagingTemplate());
writer.setReplyChannel(batchResponseChannel());
writer.setThrottleLimit(1000);
return writer;
}
The problem seems to be that last section sets up the ChunkMessageChannelItemWriter such that the replyChannel is the same one used by all of the writers, despite each writer being step-scoped. It would seem that I need to add a replyChannel header to one of the messages, but I'm not sure where in the chain to do that or how to process that (if I need to at all?).
Also, this is being sent to the slaves via JMS/ActiveMQ and I'd like to avoid having just a stupid number of nearly-identical queues on ActiveMQ just to support this.
What are my options?
Given that you are using a shared JMS infrastructure, you will need a router to get the responses back to the correct chunk writer.
If you use prototype scope on the batchResponseChannel() #Bean; you'll get a unique channel for each writer.
I don't have time to figure out how to set up a chunked batch job so the following simulates your environment (non-singleton bean that needs a unique reply channel for each instance). Hopefully it's self-explanatory...
#SpringBootApplication
public class So44806067Application {
public static void main(String[] args) {
ConfigurableApplicationContext context = SpringApplication.run(So44806067Application.class, args);
SomeNonSingletonNeedingDistinctRequestAndReplyChannels chunker1 = context
.getBean(SomeNonSingletonNeedingDistinctRequestAndReplyChannels.class);
SomeNonSingletonNeedingDistinctRequestAndReplyChannels chunker2 = context
.getBean(SomeNonSingletonNeedingDistinctRequestAndReplyChannels.class);
if (chunker1.equals(chunker2)) {
throw new IllegalStateException("Expected different instances");
}
chunker1.sendSome();
chunker2.sendSome();
ChunkResponse results = chunker1.getResults();
if (results == null) {
throw new IllegalStateException("No results1");
}
if (results.getJobId() != 1L) {
throw new IllegalStateException("Incorrect routing1");
}
results = chunker2.getResults();
if (results == null) {
throw new IllegalStateException("No results2");
}
if (results.getJobId() != 2L) {
throw new IllegalStateException("Incorrect routing2");
}
context.close();
}
#Bean
public Map<Long, PollableChannel> registry() {
// TODO: should clean up entry for jobId when job completes.
return new ConcurrentHashMap<>();
}
#Bean
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public SomeNonSingletonNeedingDistinctRequestAndReplyChannels chunker() {
MessagingTemplate template = template();
final PollableChannel replyChannel = replyChannel();
SomeNonSingletonNeedingDistinctRequestAndReplyChannels bean =
new SomeNonSingletonNeedingDistinctRequestAndReplyChannels(template, replyChannel);
AbstractSubscribableChannel requestChannel = (AbstractSubscribableChannel) template.getDefaultDestination();
requestChannel.addInterceptor(new ChannelInterceptorAdapter() {
#Override
public Message<?> preSend(Message<?> message, MessageChannel channel) {
registry().putIfAbsent(((ChunkRequest<?>) message.getPayload()).getJobId(), replyChannel);
return message;
}
});
BridgeHandler bridge = bridge();
requestChannel.subscribe(bridge);
return bean;
}
#Bean
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public MessagingTemplate template() {
MessagingTemplate messagingTemplate = new MessagingTemplate();
messagingTemplate.setDefaultChannel(requestChannel());
return messagingTemplate;
}
#Bean
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public DirectChannel requestChannel() {
return new DirectChannel();
}
#Bean
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public PollableChannel replyChannel() {
return new QueueChannel();
}
#Bean
#Scope(ConfigurableBeanFactory.SCOPE_PROTOTYPE)
public BridgeHandler bridge() {
BridgeHandler bridgeHandler = new BridgeHandler();
bridgeHandler.setOutputChannel(outboundChannel());
return bridgeHandler;
}
#Bean
public DirectChannel outboundChannel() {
return new DirectChannel();
}
#Bean
public DirectChannel masterReplyChannel() {
return new DirectChannel();
}
#ServiceActivator(inputChannel = "outboundChannel")
public void simulateJmsChannelAdapterPair(ChunkRequest<?> request) {
masterReplyChannel()
.send(new GenericMessage<>(new ChunkResponse(request.getSequence(), request.getJobId(), null)));
}
#Router(inputChannel = "masterReplyChannel")
public MessageChannel route(ChunkResponse reply) {
// TODO: error checking - missing reply channel for jobId
return registry().get(reply.getJobId());
}
public static class SomeNonSingletonNeedingDistinctRequestAndReplyChannels {
private final static AtomicLong jobIds = new AtomicLong();
private final long jobId = jobIds.incrementAndGet();
private final MessagingTemplate template;
private final PollableChannel replyChannel;
public SomeNonSingletonNeedingDistinctRequestAndReplyChannels(MessagingTemplate template,
PollableChannel replyChannel) {
this.template = template;
this.replyChannel = replyChannel;
}
public void sendSome() {
ChunkRequest<String> cr = new ChunkRequest<>(0, Collections.singleton("foo"), this.jobId, null);
this.template.send(new GenericMessage<>(cr));
}
public ChunkResponse getResults() {
#SuppressWarnings("unchecked")
Message<ChunkResponse> received = (Message<ChunkResponse>) this.replyChannel.receive(10_000);
if (received != null) {
if (received.getPayload().getJobId().equals(this.jobId)) {
System.out.println("Got the right one");
}
else {
System.out.println(
"Got the wrong one " + received.getPayload().getJobId() + " instead of " + this.jobId);
}
return received.getPayload();
}
return null;
}
}
}

How to use JMS to knows when the last message of a list has been processed

1500 records that I'm breaking up with asynchronous processing with JMS into smaller groups (~250).
1500 too is not a fixed value though. For each client can be more or less. In some cases there can be a 8000 products, or more. I will have N clients doing this operation one, two, three, or four times per day.
I have been breaking the records into smaller groups to avoid having a transaction with 1500 records.
I need to start some task only when all parts have been processed (all 1500).
How can I do this? I'm using Spring 4, JMS 2, HornetQ, and for now using config by annotations.
Maybe I'm not doing the right thing using JMS for that problem. I need help with that too. I have an XML file (from a webservice) with 1500 products (code, price, stock, stock_local, title) and I have to persist all of them.
After, and only after all of them are processed I need to start the task that will update Stock and Price values of each (into a remote system), based on the newly stored values (along with some other conditions)
The code:
// in some RestController i have
Lists.partition(newProducts, 250).forEach(listPart->
myQueue.add(createMessage(Lists.newArrayList(listPart))));
//called some times. Each message contains a list of 250 products to persist
public void add(ProductsMessage message) {
this.jmsTemplate.send(QUEUE_NAME, session -> session.createObjectMessage(message));
}
#JmsListener(destination = QUEUE_NAME, )
public void importProducts(ProductsMessage message) {
....
//at this method i get message.getList and persist all 250 products
}
Actual config JMS:
#Configuration
#EnableJms
public class JmsConfig {
public static final int DELIVERY_DELAY = 1000;
public static final int SESSION_CACHE_SIZE = 10;
#Bean
#Autowired
public DefaultJmsListenerContainerFactory jmsListenerContainerFactory(PlatformTransactionManager transactionManager) {
DefaultJmsListenerContainerFactory factory =
new DefaultJmsListenerContainerFactory();
factory.setConnectionFactory(connectionFactory());
factory.setDestinationResolver(destinationResolver());
factory.setConcurrency("1-2");
factory.setTransactionManager(transactionManager);
return factory;
}
#Bean
public DestinationResolver destinationResolver() {
return new DynamicDestinationResolver();
}
#Bean
public ConnectionFactory connectionFactory() {
TransportConfiguration transport = new TransportConfiguration(InVMConnectorFactory.class.getName());
ConnectionFactory originalConnectionFactory = HornetQJMSClient.createConnectionFactoryWithoutHA(JMSFactoryType.CF, transport);
CachingConnectionFactory connectionFactory = new CachingConnectionFactory();
connectionFactory.setTargetConnectionFactory(originalConnectionFactory);
connectionFactory.setSessionCacheSize(SESSION_CACHE_SIZE);
return connectionFactory;
}
#Bean
public JmsTemplate template(ConnectionFactory connectionFactory) {
JmsTemplate template = new JmsTemplate();
template.setConnectionFactory(connectionFactory);
template.setDeliveryDelay(DELIVERY_DELAY);
template.setSessionTransacted(true);
return template;
}
/**
* Inicializa um broker JMS embarcado
*/
#Bean(initMethod = "start", destroyMethod = "stop")
public EmbeddedJMS startJmsBroker() {
return new EmbeddedJMS();
}
}

Categories