when I using this code to acknowlege redis(v6.2.5) stream items in java 11 spring boot project:
#Override
public void onMessage(MapRecord<String, String, String> message) {
try {
log.debug("receive message from redis:" + JSON.toJSONString(body));
this.stringRedisTemplate.opsForStream().acknowledge(groupName, message);
} catch (Exception e) {
log.error("handle redis stream message error", e);
}
}
the stream element still exists in redis after running this code to acknowlege items, am I doing the wrong? what should I do to acknowlege the redis stream elements? This is the full code that consume element from redis streams:
package com.dolphin.soa.post.common.mq;
import com.alibaba.fastjson.JSON;
import com.dolphin.soa.post.service.IArticleService;
import com.dolphin.soa.post.service.ISubRelationService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.connection.stream.MapRecord;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.stream.StreamListener;
import org.springframework.stereotype.Component;
import java.util.Map;
/**
* #author dolphin
*/
#Component
#Slf4j
public class StreamMessageListener implements StreamListener<String, MapRecord<String, String, String>> {
#Value("${dolphin.redis.stream.group}")
private String groupName;
#Autowired
private StreamMessageHandler streamMessageHandler;
private final StringRedisTemplate stringRedisTemplate;
private final RedisTemplate<String, Object> articleRedisTemplate;
private final RedisTemplate<String, Long> redisLongTemplate;
private final ISubRelationService subRelationService;
private final IArticleService articleService;
public StreamMessageListener(StringRedisTemplate stringRedisTemplate,
#Qualifier("redisObjectTemplate") RedisTemplate<String, Object> articleRedisTemplate,
ISubRelationService subRelationService,
#Qualifier("redisLongTemplate") RedisTemplate<String, Long> redisLongTemplate,
IArticleService articleService) {
this.stringRedisTemplate = stringRedisTemplate;
this.articleRedisTemplate = articleRedisTemplate;
this.subRelationService = subRelationService;
this.redisLongTemplate = redisLongTemplate;
this.articleService = articleService;
}
#Override
public void onMessage(MapRecord<String, String, String> message) {
try {
Map<String, String> body = message.getValue();
log.debug("receive message from redis:" + JSON.toJSONString(body));
streamMessageHandler.handleArticle(body);
this.stringRedisTemplate.opsForStream().acknowledge(groupName, message);
} catch (Exception e) {
log.error("handle redis stream message error", e);
}
}
}
Related
This is producer config.
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.HashMap;
import java.util.Map;
#Configuration
public class KafkaProducerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootStrapServers;
public Map<String, Object> producerConfig() {
HashMap<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootStrapServers);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "20971520");
return props;
}
#Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<String, String>(producerConfig());
}
#Bean
public KafkaTemplate<String, String> kafkaTemplate(ProducerFactory<String, String> producerFactory) {
return new KafkaTemplate<String, String>(producerFactory);
}
}
This is consumer config
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import java.util.HashMap;
import java.util.Map;
#Configuration
public class KafkaConsumerConfig {
#Value("${spring.kafka.bootstrap-servers}")
private String bootStrapServers;
public Map<String, Object> consumerConfig() {
HashMap<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootStrapServers);
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<String, String>(consumerConfig());
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> factory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
}
This is kafka template.
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Service;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
#Service
public class KafkaSender {
#Autowired
private KafkaTemplate<String, String> kafkaTemplate;
String kafkaTopic = "testTopic";
public void send() {
byte[] array = null;
try {
array = Files.readAllBytes(Paths.get("Test.webm"));
String kafkaTopic = "testTopic";
String encoded = java.util.Base64.getEncoder().encodeToString(array);
kafkaTemplate.send(kafkaTopic, encoded);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
This is listener.
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;
import java.io.FileOutputStream;
import java.io.IOException;
import java.lang.reflect.Array;
import java.nio.charset.StandardCharsets;
import java.util.Base64;
#Component
public class Listener {
#KafkaListener(topics = "testTopic", groupId = "foo")
public void listenGroupFoo(String message){
byte[] decoded = java.util.Base64.getDecoder().decode(message);
try {
FileOutputStream out;
out = new FileOutputStream("video1.mp4");
out.write(decoded);
out.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
Currently I am sending whole byte array but it there is size limitation on kafka I could not sent larger size for eg: 1GB
Please let me know how can we implementation so that i can send byte by byte of a video from producer and collect at consumer and convert all the bytes to a array.
send byte by byte of a video from producer
Literally? Don't use StringSerializer. You'd loop over the array and use ByteArraySerializer
byte[] array = Files.readAllBytes(Paths.get("Test.webm"));
String kafkaTopic = "testTopic";
for (byte b : bytes) {
kafkaTemplate.send(kafkaTopic, new byte[] {b});
}
But
You can only ever produce one file into the same topic at a time - multiple producers will have mixed file bytes
You must modify Kafka producer properties to use transactions, no retries, and only one in flight request max. Otherwise, bytes get dropped, duplicated, or reordered.
Your topic can only have one partition. Otherwise, bytes get reordered
Now, you could chunk the file into larger byte slices, but then re-ordering matters even more.
As far as the consumer goes - there's no straightforward way to know which byte is the end of the file/stream, but you'd need to have some if statement in the listener/poll loop.
Ultimately, Kafka is not designed for file transfers or A/V streaming, and largest reasonable record size would only be a few MB.
I'm trying to figure out how to include custom headers in the Spring Message<?> used in Spring Cloud Stream with the Kafka Binder. My goal is to include some custom header data that would be added on in one producer (function) class, passed to kafka and then consumed by another class in a different service (with the customer header data).
I feel like I am missing something as I can seem to get it to work using the TestChannelBinder e.g.
import org.springframework.messaging.Message;
import org.springframework.stereotype.Component;
import java.util.function.Function;
#Component
#Slf4j
public class BaseStream implements Function<Message<String>, String> {
#Override
public String apply(Message<String> transactionMessage) {
log.debug("Converted Message: {} ", transactionMessage);
return transactionMessage.getPayload();
}
}
Test class with Test Binder:
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.cloud.stream.binder.test.InputDestination;
import org.springframework.cloud.stream.binder.test.OutputDestination;
import org.springframework.cloud.stream.binder.test.TestChannelBinderConfiguration;
import org.springframework.context.annotation.Import;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.test.context.TestPropertySource;
#SpringBootTest
#TestPropertySource("classpath:testStream.properties")
#Import(TestChannelBinderConfiguration.class)
public class TestForStream {
#Autowired
InputDestination inputDestination;
#Autowired
OutputDestination outputDestination;
#Test
void contextLoads() {
inputDestination.send(MessageBuilder
.withPayload("Test Payload")
.setHeader("customHeader", "headerSpecificData")
.build());
}
}
testStream.properties
spring.cloud.function.definition=baseStream
spring.cloud.stream.bindings.baseStream-in-0.destination=test-in
spring.cloud.stream.bindings.baseStream-out-0.destination=test-out
spring.cloud.stream.bindings.baseStream-in-0.group=test-group-base
Log when running:
Converted Message: GenericMessage [payload=Test Payload, headers={id=5c6d1082-c084-0b25-4afc-b5d97bf537f9, customHeader=headerSpecificData, contentType=application/json, timestamp=1639398696800, target-protocol=kafka}]
Which is what I am looking to do. But when I try to test it for the kafka bider it seems to include the Message<String> object in the payload as a JSON string, which I thought would be parsed into the requested input of the function BaseStream.
Just wondering if someone could maybe see where i'm going wrong with my testing as I have tried various things to get this to work, and seeing as it works with a test binder I would assume it works for the Kafka Binder.
Test Class for Kafka Binder Test:
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.integration.support.MessageBuilder;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.support.serializer.JsonSerializer;
import org.springframework.kafka.test.EmbeddedKafkaBroker;
import org.springframework.kafka.test.context.EmbeddedKafka;
import org.springframework.kafka.test.utils.KafkaTestUtils;
import org.springframework.test.context.TestPropertySource;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
#EmbeddedKafka(partitions = 1, brokerProperties = { "listeners=PLAINTEXT://localhost:9092", "port=9092"})
#SpringBootTest
#TestPropertySource("classpath:testStream.properties")
public class TestForStream {
public static CountDownLatch latch = new CountDownLatch(1);
#Autowired
public EmbeddedKafkaBroker broker;
#Test
void contextLoads() {
sleep(5);//Included this as it takes some time to init>
sendMessage("test-in", MessageBuilder
.withPayload("Test Payload")
.setHeader("customHeader", "headerSpecificData")
.build());
}
public <T> ProducerFactory<String, T> createProducerFactory() {
Map<String, Object> configs = new HashMap<>(KafkaTestUtils.producerProps(broker));
configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
//Is JsonSerializer correct for a message?
return new DefaultKafkaProducerFactory<>(configs);
}
public <T> void sendMessage(String topic, T listObj) {
try {
KafkaTemplate<String, T> kafkaTemplate = new KafkaTemplate<>(createProducerFactory());
kafkaTemplate.send(new ProducerRecord<>(topic, listObj));
}catch (Exception e){
e.printStackTrace();
}
}
public void sleep(long time){
try {
latch.await(time, TimeUnit.SECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
Log of kafka binder test for message:
Converted Message: GenericMessage [payload={"payload":"Test Payload","headers":{"customHeader":"headerSpecificData","id":"d540a3ca-28db-b137-fc86-c25cc4b7eb8b","timestamp":1639399810476}}, headers={deliveryAttempt=1, kafka_timestampType=CREATE_TIME, kafka_receivedTopic=test-in, target-protocol=kafka, kafka_offset=0, scst_nativeHeadersPresent=true, kafka_consumer=org.apache.kafka.clients.consumer.KafkaConsumer#79580279, id=1cf2d382-df29-2672-4180-07da99e58244, kafka_receivedPartitionId=0, kafka_receivedTimestamp=1639399810526, contentType=application/json, __TypeId__=[B#24c79350, kafka_groupId=test-group-base, timestamp=1639399810651}]
So here the message has been included in the payload and the kafka headers included in the headers as expected.
I have tried spring.cloud.stream.kafka.binder.headers and headerMode to see if they would change anything but to no avail.
Edit:
Using springCloudVersion = 2020.0.3
I was using:
public <T> void sendMessage(String topic, T listObj) {
try {
KafkaTemplate<String, T> kafkaTemplate = new KafkaTemplate<>(createProducerFactory());
kafkaTemplate.send(new ProducerRecord<>(topic, listObj));
}catch (Exception e){
e.printStackTrace();
}
}
To send the message which was putting the message as the value.
What I should've been using:
public void sendMessage(String topic, Message<?> listObj) {
try {
KafkaTemplate<String, Message<?>> kafkaTemplate = new KafkaTemplate<>(createProducerFactory());
kafkaTemplate.setDefaultTopic(topic);
kafkaTemplate.send(listObj);
}catch (Exception e){
e.printStackTrace();
}
}
I am trying to download files from multiple SFTP servers then handle those files. But I can not get the information of remote SFTP server such as: IpAddress, remoteDirectory depending on which file MessageHandler handling. Instead Payload only contains the information of the dowloaded files at local. Here the source code I use from the guide:
How to dynamically define file filter pattern for Spring Integration SFTP Inbound Adapter?
SFTIntegration.java
import com.jcraft.jsch.ChannelSftp.LsEntry;
import java.io.File;
import java.time.Instant;
import java.time.ZoneId;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.integration.channel.DirectChannel;
import org.springframework.integration.channel.NullChannel;
import org.springframework.integration.dsl.IntegrationFlow;
import org.springframework.integration.dsl.IntegrationFlows;
import org.springframework.integration.dsl.Pollers;
import org.springframework.integration.dsl.SourcePollingChannelAdapterSpec;
import org.springframework.integration.expression.FunctionExpression;
import org.springframework.integration.file.remote.aop.RotatingServerAdvice;
import org.springframework.integration.file.remote.session.DelegatingSessionFactory;
import org.springframework.integration.file.remote.session.SessionFactory;
import org.springframework.integration.scheduling.PollerMetadata;
import org.springframework.integration.sftp.dsl.Sftp;
import org.springframework.integration.sftp.dsl.SftpInboundChannelAdapterSpec;
import org.springframework.integration.sftp.session.DefaultSftpSessionFactory;
import org.springframework.messaging.MessageChannel;
import org.springframework.stereotype.Component;
/**
* flow.
*/
#Configuration
#Component
public class SFTIntegration {
public static final String TIMEZONE_UTC = "UTC";
public static final String TIMESTAMP_FORMAT_OF_FILES = "yyyyMMddHHmmssSSS";
public static final String TEMPORARY_FILE_SUFFIX = ".part";
public static final int POLLER_FIXED_PERIOD_DELAY = 60000;
public static final int MAX_MESSAGES_PER_POLL = 100;
private static final Logger LOG = LoggerFactory.getLogger(SFTIntegration.class);
private static final String CHANNEL_INTERMEDIATE_STAGE = "intermediateChannel";
#Autowired
private ImportHandler importHandler;
/** database access repository */
private final SFTPServerConfigRepo SFTPServerConfigRepo;
#Value("${sftp.local.directory.download:${java.io.tmpdir}/localDownload}")
private String localTemporaryPath;
public SFTIntegration(final SFTPServerConfigRepo SFTPServerConfigRepo) {
this.SFTPServerConfigRepo = SFTPServerConfigRepo;
}
/**
* The default poller with 5s, 100 messages, RotatingServerAdvice and transaction.
*
* #return default poller.
*/
#Bean(name = PollerMetadata.DEFAULT_POLLER)
public PollerMetadata poller() {
return Pollers
.fixedDelay(POLLER_FIXED_PERIOD_DELAY)
.advice(advice())
.maxMessagesPerPoll(MAX_MESSAGES_PER_POLL)
.transactional()
.get();
}
/**
* The direct channel for the flow.
*
* #return MessageChannel
*/
#Bean
public MessageChannel stockIntermediateChannel() {
return new DirectChannel();
}
/**
* Get the files from a remote directory. Add a timestamp to the filename
* and write them to a local temporary folder.
*
* #return IntegrationFlow
*/
#Bean
public IntegrationFlow collectionInboundFlowFromSFTPServer() {
// Source definition
final SftpInboundChannelAdapterSpec sourceSpec = Sftp.inboundAdapter(delegatingSFtpSessionFactory())
.preserveTimestamp(true)
.patternFilter("*.*")
.deleteRemoteFiles(true)
.maxFetchSize(MAX_MESSAGES_PER_POLL)
.remoteDirectory("/")
.localDirectory(new File(localTemporaryPath))
.temporaryFileSuffix(TEMPORARY_FILE_SUFFIX)
.localFilenameExpression(new FunctionExpression<String>(s -> {
final int fileTypeSepPos = s.lastIndexOf('.');
return
DateTimeFormatter
.ofPattern(TIMESTAMP_FORMAT_OF_FILES)
.withZone(ZoneId.of(TIMEZONE_UTC))
.format(Instant.now())
+ "_"
+ s.substring(0, fileTypeSepPos)
+ s.substring(fileTypeSepPos);
}));
// Poller definition
final Consumer<SourcePollingChannelAdapterSpec> collectionInboundPoller = endpointConfigurer -> endpointConfigurer
.id("collectionInboundPoller")
.autoStartup(true)
.poller(poller());
return IntegrationFlows
.from(sourceSpec, collectionInboundPoller)
.transform(File.class, p -> {
// log step
LOG.info("flow=collectionInboundFlowFromSFTPServer, message=incoming file: " + p);
return p;
})
.channel(CHANNEL_INTERMEDIATE_STAGE)
.get();
}
#Bean
public IntegrationFlow collectionIntermediateStageChannel() {
return IntegrationFlows
.from(CHANNEL_INTERMEDIATE_STAGE)
.handle(importHandler)
.channel(new NullChannel())
.get();
}
public DefaultSftpSessionFactory createNewSftpSessionFactory(final SFTPServerConfig pc) {
final DefaultSftpSessionFactory factory = new DefaultSftpSessionFactory(
false);
factory.setHost(pc.getServerIp());
factory.setPort(pc.getPort());
factory.setUser(pc.getUsername());
factory.setPassword(pc.getPassword());
factory.setAllowUnknownKeys(true);
return factory;
}
#Bean
public DelegatingSessionFactory<LsEntry> delegatingSFtpSessionFactory() {
final List<SFTPServerConfig> partnerConnections = SFTPServerConfigRepo.findAll();
if (partnerConnections.isEmpty()) {
return null;
}
final Map<Object, SessionFactory<LsEntry>> factories = new LinkedHashMap<>(10);
for (SFTPServerConfig pc : partnerConnections) {
// create a factory for every key containing server type, url and port
if (factories.get(pc.getKey()) == null) {
factories.put(pc.getKey(), createNewSftpSessionFactory(pc));
}
}
// use the first SF as the default
return new DelegatingSessionFactory<>(factories, factories.values().iterator().next());
}
#Bean
public RotatingServerAdvice advice() {
final List<SFTPServerConfig> sftpConnections = SFTPServerConfigRepo.findAll();
final List<RotatingServerAdvice.KeyDirectory> keyDirectories = new ArrayList<>();
for (SFTPServerConfig pc : sftpConnections) {
keyDirectories
.add(new RotatingServerAdvice.KeyDirectory(pc.getKey(), pc.getServerPath()));
}
return new RotatingServerAdvice(delegatingSFtpSessionFactory(), keyDirectories, true);
}
}
ImportHandler.java
import org.springframework.messaging.Message;
import org.springframework.stereotype.Service;
#Service
public class ImportHandler {
public void handle(Message<?> message) {
System.out.println("Hello " + message);
System.out.println(message.getPayload());
System.out.println(message.getHeaders());
//How can I get the information of remote server Ip address, remoteDirectory here where the file comes from
}
}
If you have any ideas, please let me know. Thank you so much!.
It's not currently supported; please open a new feature request.
I found a tutorial on spring-kafka where they created a producer and consumer. However, the program was run through a test case. As the test case ends, the consumer stops.
How to ensure the consumer keeps running in the backgorund so that I can test some messages from my terminal command line.
SpringKafkaExampleApplication.java
package com.howtoprogram.kafka;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
#SpringBootApplication
public class SpringKafkaExampleApplication {
public static void main(String[] args) {
SpringApplication.run(SpringKafkaExampleApplication.class,
args);
}
}
KafkaProducerConfig.java
package com.howtoprogram.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
#Configuration
#EnableKafka
public class KafkaProducerConfig {
#Bean
public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
"localhost:9092");
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
StringSerializer.class);
return props;
}
#Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
KafkaConsumerConfig.java
package com.howtoprogram.kafka;
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
#Configuration
#EnableKafka
public class KafkaConsumerConfig {
#Bean
KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, "group1");
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
return propsMap;
}
#Bean
public Listener listener() {
return new Listener();
}
}
Listener.java
package com.howtoprogram.kafka;
import java.util.concurrent.CountDownLatch;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
public class Listener {
public final CountDownLatch countDownLatch1 = new CountDownLatch(1);
#KafkaListener(id = "foo", topics = "topic1", group = "group1")
public void listen(ConsumerRecord<?, ?> record) {
System.out.println(record);
countDownLatch1.countDown();
}
}
SpringKafkaExampleApplicationTests.java
package com.howtoprogram.kafka;
import static org.assertj.core.api.Assertions.assertThat;
import java.util.concurrent.TimeUnit;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.test.rule.KafkaEmbedded;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;
#RunWith(SpringRunner.class)
#SpringBootTest
public class SpringKafkaExampleApplicationTests {
#Autowired
private KafkaTemplate<String, String> kafkaTemplate;
#Autowired
private Listener listener;
#Test
public void contextLoads() throws InterruptedException {
ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send("topic1", "ABC");
future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
#Override
public void onSuccess(SendResult<String, String> result) {
System.out.println("success");
}
#Override
public void onFailure(Throwable ex) {
System.out.println("failed");
}
});
System.out.println(Thread.currentThread().getId());
assertThat(this.listener.countDownLatch1.await(60, TimeUnit.SECONDS)).isTrue();
}
}
Please help!
We run our Kafka application in a while(true) loop with an #Scheduled on a Spring Bean : https://docs.spring.io/spring/docs/current/spring-framework-reference/html/scheduling.html
This way you can also delay the consumption of messages when the rest of your application is initialising.
#Scheduled(initialDelay = 5000L, fixedDelay = 10000L)
public void process() {
while (keepRunning) {
try {
ConsumerRecords<String, String> records = consumer.poll(500);
// do processing here
}
}
}
The fixedDelay is a bit strange. This value needs to be available, but is effectively ignored.
It might be tempting to start the consumer in the #PostConstruct but this way Spring keeps thinking the bean is in the init-phase. (so don't do this as Artem Bilan mentions below)
Add this code to your main after SpringApplication.run():
System.out.println("Hit 'Enter' to terminate");
System.in.read();
ctx.close();
System.exit(0);
And your program won't exit until you hit the Enter button in console.
I have a java app with redis, and it throws the exception.
Here are classes.
Main class:
public class App {
public static void main( String[] args ) {
ManipulatingData manData = new ManipulatingData();
manData.addData();
}
}
ApplicationConfig:
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package com.mycompany.springredisdatabook;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.core.StringRedisTemplate;
import org.springframework.data.redis.serializer.StringRedisSerializer;
#Configuration
public class ApplicationConfig {
#Bean
public JedisConnectionFactory connectionFactory() {
JedisConnectionFactory connectionFactory = new JedisConnectionFactory();
connectionFactory.setHostName("localhost");
connectionFactory.setPort(6379);
return connectionFactory;
}
#Bean
public StringRedisTemplate redisTemplate() {
StringRedisTemplate redisTemplate = new StringRedisTemplate();
redisTemplate.setConnectionFactory(connectionFactory());
return redisTemplate;
}
#Bean
public RedisTemplate<String, Long> longTemplate() {
StringRedisSerializer STRING_SERIALIZER = new StringRedisSerializer();
RedisTemplate<String, Long> redisTemplate = new RedisTemplate<String, Long>();
redisTemplate.setConnectionFactory(connectionFactory());
redisTemplate.setKeySerializer(STRING_SERIALIZER);
redisTemplate.setValueSerializer(LongSerializer.INSTANCE);
return redisTemplate;
}
}
ManipulatingData:
package com.mycompany.springredisdatabook;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.core.StringRedisTemplate;
public class ManipulatingData {
public ManipulatingData() {}
#Autowired
StringRedisTemplate redisTemplate;
public void addData() {
double start = System.currentTimeMillis();
for (int i=1; i<=1000; i++) {
redisTemplate.opsForSet().add("k" + i, "v" + i);
}
double end = System.currentTimeMillis();
System.out.println("Add data time: " + (end-start));
}
public String getData (String key) {
return redisTemplate.opsForValue().get(key);
}
public void deleteData(String key) {
redisTemplate.opsForValue().getOperations().delete(key);
}
}
The Exception:
Exception in thread "main" java.lang.NullPointerException
at com.mycompany.springredisdatabook.ManipulatingData.addData(ManipulatingData.java:25)
at com.mycompany.springredisdatabook.App.main(App.java:11)
Java Result: 1
So, what is it? I have no idea. I'm using spring, by the way
your redisTemplate instance is null. check your spring.xml for the configuration of redisTemplate and possibilities which causes it to be null