Get topic from kafka message - java

How can I identify the topic name from a message in kafka.
String[] topics = { "test", "test1", "test2" };
for (String t : topics) {
topicMap.put(t, new Integer(3));
}
SparkConf conf = new SparkConf().setAppName("KafkaReceiver")
.set("spark.streaming.receiver.writeAheadLog.enable", "false")
.setMaster("local[4]")
.set("spark.cassandra.connection.host", "localhost");
;
final JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(
1000));
/* Receive Kafka streaming inputs */
JavaPairReceiverInputDStream<String, String> messages = KafkaUtils
.createStream(jssc, "localhost:2181", "test-group",
topicMap);
JavaDStream<MessageAndMetadata> data =
messages.map(new Function<Tuple2<String, String>, MessageAndMetadata>()
{
public MessageAndMetadata call(Tuple2<String, String> message)
{
System.out.println("message ="+message._2);
return null;
}
}
);
I can fetch message from kafka producer. But since the consumer now consuming from three topic, it is needed to identify topic name.

As of Spark 1.5.0, official documentation encourages using no-receiver/direct approach starting from recent releases, which has graduated from experimental in recent 1.5.0.
This new Direct API allows you to easily obtain message and its metadata apart from other good things.

Unfortunately, this is not straightforward as KafkaReceiver and ReliableKafkaReceiver in Spark's source code only store MessageAndMetadata.key and message.
There are two open tickets related to this issue in Spark's JIRA:
https://issues.apache.org/jira/browse/SPARK-3146
https://issues.apache.org/jira/browse/SPARK-4960
which have been opened for a while.
A dirty copy/paste/modify of Spark's source code to solve your issue:
package org.apache.spark.streaming.kafka
import java.lang.{Integer => JInt}
import java.util.{Map => JMap, Properties}
import kafka.consumer.{KafkaStream, Consumer, ConsumerConfig, ConsumerConnector}
import kafka.serializer.{Decoder, StringDecoder}
import kafka.utils.VerifiableProperties
import org.apache.spark.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.util.WriteAheadLogUtils
import org.apache.spark.util.ThreadUtils
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.reflect._
object MoreKafkaUtils {
def createStream(
jssc: JavaStreamingContext,
zkQuorum: String,
groupId: String,
topics: JMap[String, JInt],
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): JavaReceiverInputDStream[(String, String, String)] = {
val kafkaParams = Map[String, String](
"zookeeper.connect" -> zkQuorum, "group.id" -> groupId,
"zookeeper.connection.timeout.ms" -> "10000")
val walEnabled = WriteAheadLogUtils.enableReceiverLog(jssc.ssc.conf)
new KafkaInputDStreamWithTopic[String, String, StringDecoder, StringDecoder](jssc.ssc, kafkaParams, topics.asScala.mapValues(_.intValue()), walEnabled, storageLevel)
}
}
private[streaming]
class KafkaInputDStreamWithTopic[
K: ClassTag,
V: ClassTag,
U <: Decoder[_] : ClassTag,
T <: Decoder[_] : ClassTag](
#transient ssc_ : StreamingContext,
kafkaParams: Map[String, String],
topics: Map[String, Int],
useReliableReceiver: Boolean,
storageLevel: StorageLevel
) extends ReceiverInputDStream[(K, V, String)](ssc_) with Logging {
def getReceiver(): Receiver[(K, V, String)] = {
if (!useReliableReceiver) {
new KafkaReceiverWithTopic[K, V, U, T](kafkaParams, topics, storageLevel)
} else {
new ReliableKafkaReceiverWithTopic[K, V, U, T](kafkaParams, topics, storageLevel)
}
}
}
private[streaming]
class KafkaReceiverWithTopic[
K: ClassTag,
V: ClassTag,
U <: Decoder[_] : ClassTag,
T <: Decoder[_] : ClassTag](
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel
) extends Receiver[(K, V, String)](storageLevel) with Logging {
// Connection to Kafka
var consumerConnector: ConsumerConnector = null
def onStop() {
if (consumerConnector != null) {
consumerConnector.shutdown()
consumerConnector = null
}
}
def onStart() {
logInfo("Starting Kafka Consumer Stream with group: " + kafkaParams("group.id"))
// Kafka connection properties
val props = new Properties()
kafkaParams.foreach(param => props.put(param._1, param._2))
val zkConnect = kafkaParams("zookeeper.connect")
// Create the connection to the cluster
logInfo("Connecting to Zookeeper: " + zkConnect)
val consumerConfig = new ConsumerConfig(props)
consumerConnector = Consumer.create(consumerConfig)
logInfo("Connected to " + zkConnect)
val keyDecoder = classTag[U].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[K]]
val valueDecoder = classTag[T].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[V]]
// Create threads for each topic/message Stream we are listening
val topicMessageStreams = consumerConnector.createMessageStreams(
topics, keyDecoder, valueDecoder)
val executorPool =
ThreadUtils.newDaemonFixedThreadPool(topics.values.sum, "KafkaMessageHandler")
try {
// Start the messages handler for each partition
topicMessageStreams.values.foreach { streams =>
streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) }
}
} finally {
executorPool.shutdown() // Just causes threads to terminate after work is done
}
}
// Handles Kafka messages
private class MessageHandler(stream: KafkaStream[K, V])
extends Runnable {
def run() {
logInfo("Starting MessageHandler.")
try {
val streamIterator = stream.iterator()
while (streamIterator.hasNext()) {
val msgAndMetadata = streamIterator.next()
store((msgAndMetadata.key, msgAndMetadata.message, msgAndMetadata.topic))
}
} catch {
case e: Throwable => reportError("Error handling message; exiting", e)
}
}
}
}

Related

How to chain 2 Uni<?> in unit test using Panache.withTransaction() without getting a java.util.concurrent.TimeoutException

I'm struggling using Panache.withTransaction() in unit tests, whatever I do, I get a java.util.concurrent.TimeoutException.
Note: It works without transaction but I have to delete the inserts manually.
I want to chain insertKline and getOhlcList inside a transaction so I can benefit from the rollback:
#QuarkusTest
#Slf4j
class KlineServiceTest {
#Inject
KlineRepository klineRepository;
#Inject
CurrencyPairRepository currencyPairRepository;
#Inject
KlineService service;
#Test
#DisplayName("ohlc matches inserted kline")
void ohlcMatchesInsertedKline() {
// GIVEN
val volume = BigDecimal.valueOf(1d);
val closeTime = LocalDateTime.now().withSecond(0).withNano(0);
val currencyPair = new CurrencyPair("BTC", "USDT");
val currencyPairEntity = currencyPairRepository
.findOrCreate(currencyPair)
.await().indefinitely();
val kline = KlineEntity.builder()
.id(new KlineId(currencyPairEntity, closeTime))
.volume(volume)
.build();
val insertKline = Uni.createFrom().item(kline)
.call(klineRepository::persistAndFlush);
val getOhlcList = service.listOhlcByCurrencyPairAndTimeWindow(currencyPair, ofMinutes(5));
// WHEN
val ohlcList = Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(insertKline)
.chain(() -> getOhlcList))
.await().indefinitely();
// THEN
assertThat(ohlcList).hasSize(1);
val ohlc = ohlcList.get(0);
assertThat(ohlc).extracting(Ohlc::getCloseTime, Ohlc::getVolume)
.containsExactly(closeTime, volume);
}
}
I get this exception:
java.lang.RuntimeException: java.util.concurrent.TimeoutException
at io.quarkus.hibernate.reactive.panache.common.runtime.AbstractJpaOperations.executeInVertxEventLoop(AbstractJpaOperations.java:52)
at io.smallrye.mutiny.operators.uni.UniRunSubscribeOn.subscribe(UniRunSubscribeOn.java:25)
at io.smallrye.mutiny.operators.AbstractUni.subscribe(AbstractUni.java:36)
And looking at AbstractJpaOperations, I can see:
public abstract class AbstractJpaOperations<PanacheQueryType> {
// FIXME: make it configurable?
static final long TIMEOUT_MS = 5000;
...
}
Also, same issue when I tried to use runOnContext():
#Test
#DisplayName("ohlc matches inserted kline")
void ohlcMatchesInsertedKline() throws ExecutionException, InterruptedException {
// GIVEN
val volume = BigDecimal.valueOf(1d);
val closeTime = LocalDateTime.now().withSecond(0).withNano(0);
val currencyPair = new CurrencyPair("BTC", "USDT");
val currencyPairEntity = currencyPairRepository
.findOrCreate(currencyPair)
.await().indefinitely();
val kline = KlineEntity.builder()
.id(new KlineId(currencyPairEntity, closeTime))
.volume(volume)
.build();
val insertKline = Uni.createFrom().item(kline)
.call(klineRepository::persist);
val getOhlcList = service.listOhlcByCurrencyPairAndTimeWindow(currencyPair, ofMinutes(5));
val insertAndGet = insertKline.chain(() -> getOhlcList);
// WHEN
val ohlcList = runAndRollback(insertAndGet)
.runSubscriptionOn(action -> vertx.getOrCreateContext()
.runOnContext(action))
.await().indefinitely();
// THEN
assertThat(ohlcList).hasSize(1);
val ohlc = ohlcList.get(0);
assertThat(ohlc).extracting(Ohlc::getCloseTime, Ohlc::getVolume)
.containsExactly(closeTime, volume);
}
private static Uni<List<Ohlc>> runAndRollback(Uni<List<Ohlc>> getOhlcList) {
return Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(getOhlcList));
}
Annotation #TestReactiveTransaction
Quarkus provides the annotation #TestReactiveTransaction: it will wrap the test method in a transaction and rollback the transaction at the end.
I'm going to use quarkus-test-vertx for testing the reactive code:
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-test-vertx</artifactId>
<scope>test</scope>
</dependency>
Here's an example of a test class that can be used with the Hibernate Reactive quickstart with Panache (after adding the quarkus-test-vertx dependency):
The entity:
#Entity
public class Fruit extends PanacheEntity {
#Column(length = 40, unique = true)
public String name;
...
}
The test class:
package org.acme.hibernate.orm.panache;
import java.util.List;
import org.junit.jupiter.api.Test;
import io.quarkus.test.TestReactiveTransaction;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.vertx.UniAsserter;
import io.smallrye.mutiny.Uni;
import org.assertj.core.api.Assertions;
#QuarkusTest
public class ExampleReactiveTest {
#Test
#TestReactiveTransaction
public void test(UniAsserter asserter) {
printThread( "Start" );
Uni<List<Fruit>> listAllUni = Fruit.<Fruit>listAll();
Fruit mandarino = new Fruit( "Mandarino" );
asserter.assertThat(
() -> Fruit
.persist( mandarino )
.replaceWith( listAllUni ),
result -> {
Assertions.assertThat( result ).hasSize( 4 );
Assertions.assertThat( result ).contains( mandarino );
printThread( "End" );
}
);
}
private void printThread(String step) {
System.out.println( step + " - " + Thread.currentThread().getId() + ":" + Thread.currentThread().getName() );
}
}
#TestReactiveTransaction runs the method in a transaction that it's going to be rollbacked at the end of the test.
UniAsserter makes it possible to test reactive code without having to block anything.
Annotation #RunOnVertxContext
It's also possible to run a test in the Vert.x event loop using the annotation #RunOnVertxContext in the quarkus-vertx-test library:
This way you don't need to wrap the whole test in a trasaction:
import io.quarkus.test.vertx.RunOnVertxContext;
#QuarkusTest
public class ExampleReactiveTest {
#Test
#RunOnVertxContext
public void test(UniAsserter asserter) {
printThread( "Start" );
Uni<List<Fruit>> listAllUni = Fruit.<Fruit>listAll();
Fruit mandarino = new Fruit( "Mandarino" );
asserter.assertThat(
() -> Panache.withTransaction( () -> Panache
// This test doesn't have #TestReactiveTransaction
// we need to rollback the transaction manually
.currentTransaction().invoke( Mutiny.Transaction::markForRollback )
.call( () -> Fruit.persist( mandarino ) )
.replaceWith( listAllUni )
),
result -> {
Assertions.assertThat( result ).hasSize( 4 );
Assertions.assertThat( result ).contains( mandarino );
printThread( "End" );
}
);
}
I finally managed to get it working, the trick was to defer the Uni creation:
Like in:
#QuarkusTest
public class ExamplePanacheTest {
#Test
public void test() {
final var mandarino = new Fruit("Mandarino");
final var insertAndGet = Uni.createFrom()
.deferred(() -> Fruit.persist(mandarino)
.replaceWith(Fruit.<Fruit>listAll()));
final var fruits = runAndRollback(insertAndGet)
.await().indefinitely();
assertThat(fruits).hasSize(4)
.contains(mandarino);
}
private static Uni<List<Fruit>> runAndRollback(Uni<List<Fruit>> insertAndGet) {
return Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(insertAndGet));
}
}

How to use an Actor based source with an Akka Graph?

I'm attempting to send data via an actor to a runnable graph that contains a fan out.
I define the source as :
final Source<Integer, ActorRef> integerSource =
Source.actorRef(
elem -> {
if (elem == Done.done()) return Optional.of(CompletionStrategy.immediately());
else return Optional.empty();
},
elem -> Optional.empty(),
10,
OverflowStrategy.dropHead());
But I'm unsure how to get a handle on an ActoRef to send data via an actor to the source so that the runnable graph will process messages asynchronously as they are received :
RunnableGraph<CompletionStage<Done>> graph = RunnableGraph.fromGraph(
GraphDSL.create(sink, (builder, out) -> {
SourceShape<Integer> sourceShape = builder.add(integerSource);
FlowShape<Integer, Integer> flow1Shape = builder.add(flow1);
FlowShape<Integer, Integer> flow2Shape = builder.add(flow1);
UniformFanOutShape<Integer, Integer> broadcast =
builder.add(Broadcast.create(2));
UniformFanInShape<Integer, Integer> merge =
builder.add(Merge.create(2));
builder.from(sourceShape)
.viaFanOut(broadcast)
.via(flow1Shape);
builder.from(broadcast).via(flow2Shape);
builder.from(flow1Shape)
.viaFanIn(merge)
.to(out);
builder.from(flow2Shape).viaFanIn(merge);
return ClosedShape.getInstance();
} )
);
Entire src :
import akka.Done;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.typed.ActorSystem;
import akka.actor.typed.javadsl.Behaviors;
import akka.stream.*;
import akka.stream.javadsl.*;
import lombok.extern.slf4j.Slf4j;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
#Slf4j
public class GraphActorSource {
private final static ActorSystem actorSystem = ActorSystem.create(Behaviors.empty(), "flowActorSystem");
public void runFlow() {
final Source<Integer, ActorRef> integerSource =
Source.actorRef(
elem -> {
if (elem == Done.done()) return Optional.of(CompletionStrategy.immediately());
else return Optional.empty();
},
elem -> Optional.empty(),
10,
OverflowStrategy.dropHead());
Flow<Integer, Integer, NotUsed> flow1 = Flow.of(Integer.class)
.map (x -> {
System.out.println("Flow 1 is processing " + x);
return (x * 2);
});
Sink<Integer, CompletionStage<Done>> sink = Sink.foreach(x -> {
System.out.println(x);
});
RunnableGraph<CompletionStage<Done>> graph = RunnableGraph.fromGraph(
GraphDSL.create(sink, (builder, out) -> {
SourceShape<Integer> sourceShape = builder.add(integerSource);
FlowShape<Integer, Integer> flow1Shape = builder.add(flow1);
FlowShape<Integer, Integer> flow2Shape = builder.add(flow1);
UniformFanOutShape<Integer, Integer> broadcast =
builder.add(Broadcast.create(2));
UniformFanInShape<Integer, Integer> merge =
builder.add(Merge.create(2));
builder.from(sourceShape)
.viaFanOut(broadcast)
.via(flow1Shape);
builder.from(broadcast).via(flow2Shape);
builder.from(flow1Shape)
.viaFanIn(merge)
.to(out);
builder.from(flow2Shape).viaFanIn(merge);
return ClosedShape.getInstance();
} )
);
graph.run(actorSystem);
}
public static void main(String args[]){
new GraphActorSource().runFlow();
}
}
How to send data to the Runnable graph via an actor?
Something like ? :
integerSource.tell(1)
integerSource.tell(2)
integerSource.tell(3)
ActorRef.tell works. Construct the graph blueprint so the source ActorRef will be returned when the blueprint is materialized and run.
For just one materialized object, use that materialized type for the materialized type parameter of the Graph.
Here the materialized type parameter for integerSource is ActorRef.
The materialized type parameter for Graph is also ActorRef.
Only integerSource is passed to GraphDSL.create.
Source<Integer, ActorRef> integerSource = ...
Graph<ClosedShape, ActorRef> graph =
GraphDSL.create(integerSource, (builder, src) -> {
...
});
RunnableGraph<ActorRef> runnableGraph = RunnableGraph.fromGraph(graph);
ActorRef actorRef = runnableGraph.run(actorSystem);
actorRef.tell(1, ActorRef.noSender());
To access more than one materialized object, a tuple must be constructed to capture them. If two objects from the materialized graph are desired, say src and snk, then Pair<A,B> can capture both types.
Here both integersource and sink are passed to GraphDSL.create.
The materialized ActorRef and CompletionStage are paired for the result of run with Pair::new.
The type Pair<ActorRef,CompletionStage<Done>> is the materialized type parameter of the Graph.
Source<Integer, ActorRef> integerSource = ...
Sink<Integer, CompletionStage<Done>> sink = ...
Graph<ClosedShape, Pair<ActorRef, CompletionStage<Done>>> graph =
GraphDSL.create(integerSource, sink, Pair::new, (builder, src, snk) -> {
....
});
RunnableGraph<Pair<ActorRef, CompletionStage<Done>>> runnableGraph =
RunnableGraph.fromGraph(graph);
Pair<ActorRef, CompletionStage<Done>> pair =
runnableGraph.run(actorSystem);
ActorRef actorRef = pair.first();
CompletionStage<Done> completionStage = pair.second();
actorRef.tell(1, ActorRef.noSender());
Full example:
(build.gradle)
apply plugin: "java"
apply plugin: "application"
mainClassName = "GraphActorSource"
repositories {
mavenCentral()
}
dependencies {
implementation "com.typesafe.akka:akka-actor-typed_2.13:2.6.19"
implementation "com.typesafe.akka:akka-stream-typed_2.13:2.6.19"
implementation 'org.slf4j:slf4j-jdk14:1.7.36'
}
compileJava {
options.compilerArgs << "-Xlint:unchecked"
}
(src/main/java/GraphActorSource.java)
import akka.Done;
import akka.NotUsed;
import akka.actor.ActorRef;
import akka.actor.Status.Success;
import akka.actor.typed.ActorSystem;
import akka.actor.typed.javadsl.Behaviors;
import akka.japi.Pair;
import akka.stream.*;
import akka.stream.javadsl.*;
import akka.util.Timeout;
import java.util.Optional;
import java.util.concurrent.CompletionStage;
import java.util.concurrent.TimeUnit;
public class GraphActorSource {
private final static ActorSystem actorSystem =
ActorSystem.create(Behaviors.empty(), "flowActorSystem");
public void runFlow() {
// 1. Create graph (blueprint)
// 1a. Define source, flows, and sink
final Source<Integer, ActorRef> integerSource =
Source.actorRef
(
elem -> {
if (elem == Done.done()) return Optional.of(CompletionStrategy.immediately());
else return Optional.empty();
},
elem -> Optional.empty(),
10,
OverflowStrategy.dropHead()
);
Flow<Integer, Integer, NotUsed> flow1 = Flow.of(Integer.class)
.map (x -> {
System.out.println("Flow 1 is processing " + x);
return (100 + x);
});
Flow<Integer, Integer, NotUsed> flow2 = Flow.of(Integer.class)
.map (x -> {
System.out.println("Flow 2 is processing " + x);
return (200 + x);
});
Sink<Integer, CompletionStage<Done>> sink = Sink.foreach(x -> {
System.out.println("Sink received "+x);
});
// 1b. Connect nodes and flows into a graph.
// Inputs and output nodes (source, sink) will be produced at run start.
Graph<ClosedShape, Pair<ActorRef, CompletionStage<Done>>> graph =
GraphDSL.create(integerSource, sink, Pair::new, (builder, src, snk) -> {
UniformFanOutShape<Integer, Integer> broadcast =
builder.add(Broadcast.create(2));
FlowShape<Integer, Integer> flow1Shape = builder.add(flow1);
FlowShape<Integer, Integer> flow2Shape = builder.add(flow2);
UniformFanInShape<Integer, Integer> merge =
builder.add(Merge.create(2));
builder.from(src)
.viaFanOut(broadcast);
builder.from(broadcast.out(0))
.via(flow1Shape)
.toInlet(merge.in(0));
builder.from(broadcast.out(1))
.via(flow2Shape)
.toInlet(merge.in(1));
builder.from(merge)
.to(snk);
return ClosedShape.getInstance();
} );
RunnableGraph<Pair<ActorRef, CompletionStage<Done>>> runnableGraph =
RunnableGraph.fromGraph(graph);
// 2. Start run,
// which produces materialized source ActorRef and sink CompletionStage.
Pair<ActorRef, CompletionStage<Done>> pair =
runnableGraph.run(actorSystem);
ActorRef actorRef = pair.first();
CompletionStage<Done> completionStage = pair.second();
// On completion, terminates actor system (optional).
completionStage.thenRun(() -> {
System.out.println("Done, terminating.");
actorSystem.terminate();
});
// 3. Send messages to source actor
actorRef.tell(1, ActorRef.noSender());
actorRef.tell(2, ActorRef.noSender());
// The stream completes successfully with the following message
actorRef.tell(Done.done(), ActorRef.noSender());
}
public static void main(String args[]){
new GraphActorSource().runFlow();
}
}
Reference Akka Documentation (accessed Version 2.6.19)
Streams / Operators / Source.actorRef
Streams / Streams Cookbook / Working with operators

Pass sleuth BaggageField when span and TraceContext is null

I have very simple scenario. I have a listener, that listens queue:
import brave.Span
import brave.baggage.BaggageField
import brave.propagation.CurrentTraceContext
import brave.propagation.TraceContext
import brave.propagation.TraceContextOrSamplingFlags
import com.fasterxml.jackson.databind.ObjectMapper
import com.bla-bla.aggregatorzoo.api.shared.logging.updateTraceId
import com.bla-bla.content.api.provider.ContentType
import com.bla-bla.content.api.service.ESService
import com.bla-bla.shared.constants.CommonConstants
import org.apache.commons.lang3.StringUtils
import org.apache.logging.log4j.kotlin.Logging
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.cloud.aws.messaging.listener.SqsMessageDeletionPolicy
import org.springframework.cloud.aws.messaging.listener.annotation.SqsListener
import org.springframework.cloud.sleuth.BaggageInScope
import org.springframework.cloud.sleuth.Tracer
import org.springframework.messaging.handler.annotation.Headers
import org.springframework.stereotype.Component
#Component
class ESListener(
#Autowired private val esService: ESService,
#Autowired private var traceId: BaggageField,
#Autowired private var ctx: CurrentTraceContext,
#Autowired private var tracer : Tracer
) : Logging {
#Autowired
lateinit var objectMapper: ObjectMapper
#SqsListener(value = ["\${reindex.queue.show}"], deletionPolicy = SqsMessageDeletionPolicy.ALWAYS)
fun reIndexShows(message: String, #Headers headers: Map<String, String>){
processMessage(message, headers, ContentType.SHOW)
}
#SqsListener(value = ["\${reindex.queue.episodes}"], deletionPolicy = SqsMessageDeletionPolicy.ALWAYS)
fun reIndexEpisodes(message: String, #Headers headers: Map<String, String>){
processMessage(message, headers, ContentType.EPISODE)
}
private fun processMessage(message: String, headers: Map<String, String>, type: ContentType) {
try {
val currentSpan = tracer.currentSpan()
val secretBaggageField: BaggageInScope = tracer.getBaggage(CommonConstants.WS_HEADER_TRACED_ID)
val secretBaggage = if (secretBaggageField != null) secretBaggageField.get() else null
logger.info("Super secret baggage item for key [${CommonConstants.WS_HEADER_TRACED_ID}] is [$secretBaggage]" )
if (StringUtils.isNotEmpty(secretBaggage)) {
currentSpan?.event("secret_baggage_received")
currentSpan?.tag("baggage", secretBaggage)
}
val baggageKey = CommonConstants.WS_HEADER_TRACED_ID
val baggageValue = headers[CommonConstants.WS_HEADER_TRACED_ID]
val baggageField: BaggageInScope = tracer.createBaggage(baggageKey)
val context =currentSpan?.context()
baggageField.set(context, baggageValue)
currentSpan?.event("baggage_set")
currentSpan?.tag(baggageKey, baggageValue)
logger.info("Hello from service1. Calling service2")
logger.info("trace-id: ${traceId.value}")
logger.info("got message body: $message")
val traceContext: TraceContext = TraceContext.newBuilder()
.traceId(123456789)
.spanId(123456789)
.build()
val span= tracer.nextSpan(TraceContextOrSamplingFlags.create(traceContext))
.name("dummyContext").start()
// headers[CommonConstants.WS_HEADER_TRACED_ID]?.let {
// traceId.updateTraceId(span?.context())
// traceId.updateValue(it)
//
//// ThreadUtil.updateMDC(
//// hashMapOf(CommonConstants.WS_HEADER_TRACED_ID to it)
//// )
// }
logger.info("trace-id: ${traceId.value}")
logger.info("got message body: $message")
val model=objectMapper.readValue(message, SQSDetail::class.java)
model.payload?.let {
logger.info("Received new SQS message for $type and id: ${it.id}")
esService.doReindex(it.id, type)
}
} catch (e: Exception) {
throw RuntimeException("Cannot process message from SQS", e)
}
}
}
my BaggageFiled configuration:
#Bean
fun traceIdField(): BaggageField? {
return BaggageField.create(CommonConstants.WS_HEADER_TRACED_ID)
}
#Bean
fun mdcScopeDecorator(): CurrentTraceContext.ScopeDecorator? {
return MDCScopeDecorator.newBuilder()
.clear()
.add(
CorrelationScopeConfig.SingleCorrelationField.newBuilder(traceIdField())
.flushOnUpdate()
.build()
)
.build()
}
val currentSpan = tracer.currentSpan()
retruns null in ESListener. No span -> no traceContext -> No BaggageFiled to be propagated. I need to popululate the baggage to pass it else were as a bean.
I tried to create a new Span, start it, set the dummy context to Braver.Tracer and update the value of baggagefield:
headers[CommonConstants.WS_HEADER_TRACED_ID]?.let {
traceId.updateTraceId(span?.context())
traceId.updateValue(it)
}
-it does not work out. Is there any way to do it?
Actually I am able to do it using MDC map:
MDC.setContextMap(
mapOf(
CommonConstants.WS_HEADER_TRACED_ID to headers[CommonConstants.WS_HEADER_TRACED_ID]
))
But I have preference to use sleuth Api to pass over my trace-id as a #Bean.

Webflux: Logging Method Request and Response Using AOP

I am trying to log, method input/output using the aop approach in Web-flux. I was able to log request using following code ,but had trouble printing response and I see both request and response are printing as request,How can i print response.Is AOP is better solution for logging in reactive approach or filter/controller advice
#Aspect
#Component
class LogAspect(private val log: KLogger) {
#Around("#annotation(Loggable)")
#Throws(Throwable::class)
fun logAround(joinPoint: ProceedingJoinPoint): Any? {
val start = System.currentTimeMillis()
return when (val result: Any = joinPoint.proceed()) {
is Mono<*> -> {
val traceId = AtomicReference("")
result
.doOnSuccess { o ->
if (traceId.get().isNotEmpty()) {
MDC.put("correlationId", traceId.get())
}
var response: Any = ""
if (Objects.nonNull(o)) {
response = o.toString()
}
log.info(
"Enter: {}.{}() with argument[s] = {}",
joinPoint.signature.declaringTypeName, joinPoint.signature.name,
joinPoint.args
)
log.info(
"Exit: {}.{}() had arguments = {}, with Response = {}, Execution time = {} ms",
joinPoint.signature.declaringTypeName, joinPoint.signature.name,
joinPoint.args[0],
response, System.currentTimeMillis() - start
)
}
.subscriberContext { context ->
val contextTmp: Context = context as Context
if (contextTmp.hasKey("correlationId")) {
traceId.set(contextTmp.get("correlationId"))
MDC.put("correlationId", contextTmp.get("correlationId"))
}
context
}
}
else ->
{
log.warn(
"Body type is not Mono for {}.{}()",
joinPoint.signature.declaringTypeName,
joinPoint.signature.name
)
result
}
}
}
}

Clear kafka topics for unit testing

I need to perform unit testing on a kafka application avoiding third-party libraries.
My problem right now is that I would like to clear all the topics between tests but I don't know how.
This is my temporary solution: commit every message produced after each test and put all test consumers in the same consumer group.
override protected def afterEach():Unit={
val cleanerConsumer= newConsumer(Seq.empty)
val topics=cleanerConsumer.listTopics()
println("pulisco")
cleanerConsumer.subscribe(topics.keySet())
cleanerConsumer.poll(100)
cleanerConsumer.commitSync()
cleanerConsumer.close()
}
This doesn't work though and I don't know why.
For example, when I create a new consumer inside a test, messages contains the messages produced in the previous test.
val consumerProbe = newConsumer(SMSGatewayTopic)
val messages = consumerProbe.poll(1000)
How can I solve this?
You can also embed a Kafka/Zookeeper instances in your test sources, to have more controller over such isolated services.
trait Kafka { self: ZooKeeper =>
Kafka.start()
}
object Kafka {
import org.apache.hadoop.fs.FileUtil
import kafka.server.KafkaServer
#volatile private var started = false
lazy val logDir = java.nio.file.Files.createTempDirectory("kafka-log").toFile
lazy val kafkaServer: KafkaServer = {
val config = com.typesafe.config.ConfigFactory.
load(this.getClass.getClassLoader)
val (host, port) = {
val (h, p) = config.getString("kafka.servers").span(_ != ':')
h -> p.drop(1).toInt
}
val serverConf = new kafka.server.KafkaConfig({
val props = new java.util.Properties()
props.put("port", port.toString)
props.put("broker.id", port.toString)
props.put("log.dir", logDir.getAbsolutePath)
props.put(
"zookeeper.connect",
s"localhost:${config getInt "test.zookeeper.port"}"
)
props
})
new KafkaServer(serverConf)
}
def start(): Unit = if (!started) {
try {
kafkaServer.startup()
started = true
} catch {
case err: Throwable =>
println(s"fails to start Kafka: ${err.getMessage}")
throw err
}
}
def stop(): Unit = try {
if (started) kafkaServer.shutdown()
} finally {
FileUtil.fullyDelete(logDir)
}
}
trait ZooKeeper {
ZooKeeper.start()
}
object ZooKeeper {
import java.nio.file.Files
import java.net.InetSocketAddress
import org.apache.hadoop.fs.FileUtil
import org.apache.zookeeper.server.ZooKeeperServer
import org.apache.zookeeper.server.ServerCnxnFactory
#volatile private var started = false
lazy val logDir = Files.createTempDirectory("zk-log").toFile
lazy val snapshotDir = Files.createTempDirectory("zk-snapshots").toFile
lazy val (zkServer, zkFactory) = {
val srv = new ZooKeeperServer(
snapshotDir, logDir, 500
)
val config = com.typesafe.config.ConfigFactory.
load(this.getClass.getClassLoader)
val port = config.getInt("test.zookeeper.port")
srv -> ServerCnxnFactory.createFactory(
new InetSocketAddress("localhost", port), 1024
)
}
def start(): Unit = if (!zkServer.isRunning) {
try {
zkFactory.startup(zkServer)
started = true
while (!zkServer.isRunning) {
Thread.sleep(500)
}
} catch {
case err: Throwable =>
println(s"fails to start ZooKeeper: ${err.getMessage}")
throw err
}
}
def stop(): Unit = try {
if (started) zkFactory.shutdown()
} finally {
try { FileUtil.fullyDelete(logDir) } catch { case _: Throwable => () }
FileUtil.fullyDelete(snapshotDir)
}
}
The tests classes can extends Kafka with ZooKeeper to ensure this available.
If the test JVM is not forked, Tests.Cleanup in SBT testOptions in Test setting can be used to stop the embedded services after testing.
I would suggest, you simply recreate all topics before your tests. For example, this is the way kafka tests create/delete topics:
Kafka repository on GitHub

Categories