I have very simple scenario. I have a listener, that listens queue:
import brave.Span
import brave.baggage.BaggageField
import brave.propagation.CurrentTraceContext
import brave.propagation.TraceContext
import brave.propagation.TraceContextOrSamplingFlags
import com.fasterxml.jackson.databind.ObjectMapper
import com.bla-bla.aggregatorzoo.api.shared.logging.updateTraceId
import com.bla-bla.content.api.provider.ContentType
import com.bla-bla.content.api.service.ESService
import com.bla-bla.shared.constants.CommonConstants
import org.apache.commons.lang3.StringUtils
import org.apache.logging.log4j.kotlin.Logging
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.cloud.aws.messaging.listener.SqsMessageDeletionPolicy
import org.springframework.cloud.aws.messaging.listener.annotation.SqsListener
import org.springframework.cloud.sleuth.BaggageInScope
import org.springframework.cloud.sleuth.Tracer
import org.springframework.messaging.handler.annotation.Headers
import org.springframework.stereotype.Component
#Component
class ESListener(
#Autowired private val esService: ESService,
#Autowired private var traceId: BaggageField,
#Autowired private var ctx: CurrentTraceContext,
#Autowired private var tracer : Tracer
) : Logging {
#Autowired
lateinit var objectMapper: ObjectMapper
#SqsListener(value = ["\${reindex.queue.show}"], deletionPolicy = SqsMessageDeletionPolicy.ALWAYS)
fun reIndexShows(message: String, #Headers headers: Map<String, String>){
processMessage(message, headers, ContentType.SHOW)
}
#SqsListener(value = ["\${reindex.queue.episodes}"], deletionPolicy = SqsMessageDeletionPolicy.ALWAYS)
fun reIndexEpisodes(message: String, #Headers headers: Map<String, String>){
processMessage(message, headers, ContentType.EPISODE)
}
private fun processMessage(message: String, headers: Map<String, String>, type: ContentType) {
try {
val currentSpan = tracer.currentSpan()
val secretBaggageField: BaggageInScope = tracer.getBaggage(CommonConstants.WS_HEADER_TRACED_ID)
val secretBaggage = if (secretBaggageField != null) secretBaggageField.get() else null
logger.info("Super secret baggage item for key [${CommonConstants.WS_HEADER_TRACED_ID}] is [$secretBaggage]" )
if (StringUtils.isNotEmpty(secretBaggage)) {
currentSpan?.event("secret_baggage_received")
currentSpan?.tag("baggage", secretBaggage)
}
val baggageKey = CommonConstants.WS_HEADER_TRACED_ID
val baggageValue = headers[CommonConstants.WS_HEADER_TRACED_ID]
val baggageField: BaggageInScope = tracer.createBaggage(baggageKey)
val context =currentSpan?.context()
baggageField.set(context, baggageValue)
currentSpan?.event("baggage_set")
currentSpan?.tag(baggageKey, baggageValue)
logger.info("Hello from service1. Calling service2")
logger.info("trace-id: ${traceId.value}")
logger.info("got message body: $message")
val traceContext: TraceContext = TraceContext.newBuilder()
.traceId(123456789)
.spanId(123456789)
.build()
val span= tracer.nextSpan(TraceContextOrSamplingFlags.create(traceContext))
.name("dummyContext").start()
// headers[CommonConstants.WS_HEADER_TRACED_ID]?.let {
// traceId.updateTraceId(span?.context())
// traceId.updateValue(it)
//
//// ThreadUtil.updateMDC(
//// hashMapOf(CommonConstants.WS_HEADER_TRACED_ID to it)
//// )
// }
logger.info("trace-id: ${traceId.value}")
logger.info("got message body: $message")
val model=objectMapper.readValue(message, SQSDetail::class.java)
model.payload?.let {
logger.info("Received new SQS message for $type and id: ${it.id}")
esService.doReindex(it.id, type)
}
} catch (e: Exception) {
throw RuntimeException("Cannot process message from SQS", e)
}
}
}
my BaggageFiled configuration:
#Bean
fun traceIdField(): BaggageField? {
return BaggageField.create(CommonConstants.WS_HEADER_TRACED_ID)
}
#Bean
fun mdcScopeDecorator(): CurrentTraceContext.ScopeDecorator? {
return MDCScopeDecorator.newBuilder()
.clear()
.add(
CorrelationScopeConfig.SingleCorrelationField.newBuilder(traceIdField())
.flushOnUpdate()
.build()
)
.build()
}
val currentSpan = tracer.currentSpan()
retruns null in ESListener. No span -> no traceContext -> No BaggageFiled to be propagated. I need to popululate the baggage to pass it else were as a bean.
I tried to create a new Span, start it, set the dummy context to Braver.Tracer and update the value of baggagefield:
headers[CommonConstants.WS_HEADER_TRACED_ID]?.let {
traceId.updateTraceId(span?.context())
traceId.updateValue(it)
}
-it does not work out. Is there any way to do it?
Actually I am able to do it using MDC map:
MDC.setContextMap(
mapOf(
CommonConstants.WS_HEADER_TRACED_ID to headers[CommonConstants.WS_HEADER_TRACED_ID]
))
But I have preference to use sleuth Api to pass over my trace-id as a #Bean.
Related
I'm struggling using Panache.withTransaction() in unit tests, whatever I do, I get a java.util.concurrent.TimeoutException.
Note: It works without transaction but I have to delete the inserts manually.
I want to chain insertKline and getOhlcList inside a transaction so I can benefit from the rollback:
#QuarkusTest
#Slf4j
class KlineServiceTest {
#Inject
KlineRepository klineRepository;
#Inject
CurrencyPairRepository currencyPairRepository;
#Inject
KlineService service;
#Test
#DisplayName("ohlc matches inserted kline")
void ohlcMatchesInsertedKline() {
// GIVEN
val volume = BigDecimal.valueOf(1d);
val closeTime = LocalDateTime.now().withSecond(0).withNano(0);
val currencyPair = new CurrencyPair("BTC", "USDT");
val currencyPairEntity = currencyPairRepository
.findOrCreate(currencyPair)
.await().indefinitely();
val kline = KlineEntity.builder()
.id(new KlineId(currencyPairEntity, closeTime))
.volume(volume)
.build();
val insertKline = Uni.createFrom().item(kline)
.call(klineRepository::persistAndFlush);
val getOhlcList = service.listOhlcByCurrencyPairAndTimeWindow(currencyPair, ofMinutes(5));
// WHEN
val ohlcList = Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(insertKline)
.chain(() -> getOhlcList))
.await().indefinitely();
// THEN
assertThat(ohlcList).hasSize(1);
val ohlc = ohlcList.get(0);
assertThat(ohlc).extracting(Ohlc::getCloseTime, Ohlc::getVolume)
.containsExactly(closeTime, volume);
}
}
I get this exception:
java.lang.RuntimeException: java.util.concurrent.TimeoutException
at io.quarkus.hibernate.reactive.panache.common.runtime.AbstractJpaOperations.executeInVertxEventLoop(AbstractJpaOperations.java:52)
at io.smallrye.mutiny.operators.uni.UniRunSubscribeOn.subscribe(UniRunSubscribeOn.java:25)
at io.smallrye.mutiny.operators.AbstractUni.subscribe(AbstractUni.java:36)
And looking at AbstractJpaOperations, I can see:
public abstract class AbstractJpaOperations<PanacheQueryType> {
// FIXME: make it configurable?
static final long TIMEOUT_MS = 5000;
...
}
Also, same issue when I tried to use runOnContext():
#Test
#DisplayName("ohlc matches inserted kline")
void ohlcMatchesInsertedKline() throws ExecutionException, InterruptedException {
// GIVEN
val volume = BigDecimal.valueOf(1d);
val closeTime = LocalDateTime.now().withSecond(0).withNano(0);
val currencyPair = new CurrencyPair("BTC", "USDT");
val currencyPairEntity = currencyPairRepository
.findOrCreate(currencyPair)
.await().indefinitely();
val kline = KlineEntity.builder()
.id(new KlineId(currencyPairEntity, closeTime))
.volume(volume)
.build();
val insertKline = Uni.createFrom().item(kline)
.call(klineRepository::persist);
val getOhlcList = service.listOhlcByCurrencyPairAndTimeWindow(currencyPair, ofMinutes(5));
val insertAndGet = insertKline.chain(() -> getOhlcList);
// WHEN
val ohlcList = runAndRollback(insertAndGet)
.runSubscriptionOn(action -> vertx.getOrCreateContext()
.runOnContext(action))
.await().indefinitely();
// THEN
assertThat(ohlcList).hasSize(1);
val ohlc = ohlcList.get(0);
assertThat(ohlc).extracting(Ohlc::getCloseTime, Ohlc::getVolume)
.containsExactly(closeTime, volume);
}
private static Uni<List<Ohlc>> runAndRollback(Uni<List<Ohlc>> getOhlcList) {
return Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(getOhlcList));
}
Annotation #TestReactiveTransaction
Quarkus provides the annotation #TestReactiveTransaction: it will wrap the test method in a transaction and rollback the transaction at the end.
I'm going to use quarkus-test-vertx for testing the reactive code:
<dependency>
<groupId>io.quarkus</groupId>
<artifactId>quarkus-test-vertx</artifactId>
<scope>test</scope>
</dependency>
Here's an example of a test class that can be used with the Hibernate Reactive quickstart with Panache (after adding the quarkus-test-vertx dependency):
The entity:
#Entity
public class Fruit extends PanacheEntity {
#Column(length = 40, unique = true)
public String name;
...
}
The test class:
package org.acme.hibernate.orm.panache;
import java.util.List;
import org.junit.jupiter.api.Test;
import io.quarkus.test.TestReactiveTransaction;
import io.quarkus.test.junit.QuarkusTest;
import io.quarkus.test.vertx.UniAsserter;
import io.smallrye.mutiny.Uni;
import org.assertj.core.api.Assertions;
#QuarkusTest
public class ExampleReactiveTest {
#Test
#TestReactiveTransaction
public void test(UniAsserter asserter) {
printThread( "Start" );
Uni<List<Fruit>> listAllUni = Fruit.<Fruit>listAll();
Fruit mandarino = new Fruit( "Mandarino" );
asserter.assertThat(
() -> Fruit
.persist( mandarino )
.replaceWith( listAllUni ),
result -> {
Assertions.assertThat( result ).hasSize( 4 );
Assertions.assertThat( result ).contains( mandarino );
printThread( "End" );
}
);
}
private void printThread(String step) {
System.out.println( step + " - " + Thread.currentThread().getId() + ":" + Thread.currentThread().getName() );
}
}
#TestReactiveTransaction runs the method in a transaction that it's going to be rollbacked at the end of the test.
UniAsserter makes it possible to test reactive code without having to block anything.
Annotation #RunOnVertxContext
It's also possible to run a test in the Vert.x event loop using the annotation #RunOnVertxContext in the quarkus-vertx-test library:
This way you don't need to wrap the whole test in a trasaction:
import io.quarkus.test.vertx.RunOnVertxContext;
#QuarkusTest
public class ExampleReactiveTest {
#Test
#RunOnVertxContext
public void test(UniAsserter asserter) {
printThread( "Start" );
Uni<List<Fruit>> listAllUni = Fruit.<Fruit>listAll();
Fruit mandarino = new Fruit( "Mandarino" );
asserter.assertThat(
() -> Panache.withTransaction( () -> Panache
// This test doesn't have #TestReactiveTransaction
// we need to rollback the transaction manually
.currentTransaction().invoke( Mutiny.Transaction::markForRollback )
.call( () -> Fruit.persist( mandarino ) )
.replaceWith( listAllUni )
),
result -> {
Assertions.assertThat( result ).hasSize( 4 );
Assertions.assertThat( result ).contains( mandarino );
printThread( "End" );
}
);
}
I finally managed to get it working, the trick was to defer the Uni creation:
Like in:
#QuarkusTest
public class ExamplePanacheTest {
#Test
public void test() {
final var mandarino = new Fruit("Mandarino");
final var insertAndGet = Uni.createFrom()
.deferred(() -> Fruit.persist(mandarino)
.replaceWith(Fruit.<Fruit>listAll()));
final var fruits = runAndRollback(insertAndGet)
.await().indefinitely();
assertThat(fruits).hasSize(4)
.contains(mandarino);
}
private static Uni<List<Fruit>> runAndRollback(Uni<List<Fruit>> insertAndGet) {
return Panache.withTransaction(
() -> Panache.currentTransaction()
.invoke(Transaction::markForRollback)
.replaceWith(insertAndGet));
}
}
Spring Boot Version: 2.5.1,
Spring Cloud Version: 2020.0.3
Hello guys !!!
I need your help ...
My question is that I can't modify the request body in spring gateway. Follow:
I have a MobileGatewayFilterFactory class that extends from AbstractGatewayFilterFactory where the apply method returns a custom filter: MobileGatewayFilter.
#Component
class MobileGatewayFilterFactory :
AbstractGatewayFilterFactory<MobileGatewayFilterFactory.Config>(Config::class.java),
Ordered {
override fun apply(config: Config): GatewayFilter {
logger.info { "Loading MobileGatewayFilter with config ${config.className}, ${config.execution}, ${config.custom}" }
return MobileGatewayFilter(config)
}
override fun getOrder(): Int {
return NettyWriteResponseFilter.WRITE_RESPONSE_FILTER_ORDER - 1
}
data class Config(
val className: String,
val execution: String,
val custom: String?
)
}
So, inside the MobileGatewayFilter class I implement the business rules to determine which filter is running: PRE or POST filter. This is done in the filter method of the MobileGatewayFilter class where there is a condition to determine the type of decoration being executed, using reflection. If it is a request, the ServerHttpRequestDecorator is executed and a ServerHttpResponseDecorator otherwise.
class MobileGatewayFilter(private val config: MobileGatewayFilterFactory.Config) : GatewayFilter, Ordered {
override fun filter(exchange: ServerWebExchange, chain: GatewayFilterChain): Mono<Void> {
return when (config.execution) {
"PRE" -> chain.filter(exchange.mutate().request(decoratorRequest(exchange)).build())
"POST" -> chain.filter(exchange.mutate().response(decoratorResponse(exchange)).build())
else -> chain.filter(exchange)
}
}
override fun getOrder(): Int {
return NettyWriteResponseFilter.WRITE_RESPONSE_FILTER_ORDER - 1
}
private fun decoratorResponse(exchange: ServerWebExchange): ServerHttpResponse {
val aClass = Class.forName(config.className)
val obj = aClass.getConstructor(ServerHttpResponse::class.java, MobileGatewayFilterFactory.Config::class.java)
return obj.newInstance(exchange.response, config) as ServerHttpResponseDecorator
}
private fun decoratorRequest(exchange: ServerWebExchange): ServerHttpRequest {
val aClass = Class.forName(config.className)
val obj = aClass.getConstructor(ServerHttpRequest::class.java, MobileGatewayFilterFactory.Config::class.java)
return obj.newInstance(exchange.request, config) as ServerHttpRequestDecorator
}
}
Furthermore, I have a CustomerDataBodyDecorator that extends the ServerHttpRequestDecorator and overrides the getBody method. The getBody method is where the request body must be modified.
class CustomerDataBodyDecorator(
private val exchange: ServerHttpRequest,
private val config: MobileGatewayFilterFactory.Config
) : ServerHttpRequestDecorator(exchange) {
override fun getBody(): Flux<DataBuffer> {
logger.info { "getBody chamado ..." }
val body: Flux<DataBuffer> = exchange.body
var requestData = ""
body.subscribe {
val content = ByteArray(it.readableByteCount())
it.read(content)
DataBufferUtils.release(it)
requestData = String(content, Charset.forName("UTF-8"))
logger.info { "Request: $requestData" }
}
val factory = DefaultDataBufferFactory()
val buffer = factory.wrap(requestData.toByteArray())
return Flux.just(buffer)
}
}
However, the above code doesn't work because the return is executed first with empty requestData and after subscribe method is executed. I know that in Webflux the subscribe method is necessary to indicate to the publisher the information consumption needs
application.yml
id: opengw-mobile-simulation
uri: ${custom.resources.opengw}
predicates:
- Path=/opengw/v1/mobile/simulation
filters:
- name: Mobile
args:
className: br.com.decorator.CustomerDataBodyDecorator
execution: PRE
custom: ${custom.resources.customer}
- RewritePath=/opengw/v1/(?<segment>/?.*), /$\{segment}
I read several topics here but I couldn't find a solution that worked.
How can I read and then modify the request body of the Flux object in this scenario?
I use Retrofit 2 in my project. I need to handle the errors of parsing and log the request URL in case of an error.
I want to do everything in one place. So I made a wrapper for parsing at the retrofit level.
Factory:
import okhttp3.RequestBody
import okhttp3.ResponseBody
import retrofit2.Converter
import retrofit2.Retrofit
import retrofit2.converter.gson.GsonConverterFactory
import java.lang.reflect.Type
import javax.inject.Inject
class LogGsonConverterFactory #Inject constructor(private val factory: GsonConverterFactory) : Converter.Factory() {
override fun responseBodyConverter(
type: Type,
annotations: Array<out Annotation>,
retrofit: Retrofit
): Converter<ResponseBody, *>? {
val delegate: Converter<ResponseBody, *>? = factory.responseBodyConverter(type, annotations, retrofit)
return LogResponseBodyConverter(delegate ?: return null)
}
override fun requestBodyConverter(
type: Type, parameterAnnotations: Array<out Annotation>,
methodAnnotations: Array<out Annotation>,
retrofit: Retrofit
): Converter<*, RequestBody>? = factory.requestBodyConverter(type, parameterAnnotations, methodAnnotations, retrofit)
}
Converter:
import com.google.gson.JsonSyntaxException
import okhttp3.ResponseBody
import retrofit2.Converter
class LogResponseBodyConverter<T>(private val converter: Converter<ResponseBody, T>) : Converter<ResponseBody, T> {
override fun convert(value: ResponseBody): T? {
try {
return converter.convert(value)
} catch (parseException: JsonSyntaxException) {
// Here I want to get URL and log an exception. But how to get url?
throw parseException
}
}
}
Through reflection I can do it
((Http1ExchangeCodec.ChunkedSource) ((Exchange.ResponseBodySource) ((RealBufferedSource) ((ForwardingSource) ((RealBufferedSource) ((ResponseBody.BomAwareReader) value.reader).source).source).delegate).source).delegate).url
Or with an interceptor that I can provide to the parser
import android.util.LongSparseArray
import androidx.core.util.set
import okhttp3.Interceptor
import okhttp3.Response
import javax.inject.Inject
interface LastRequestSource {
fun getLastUrl(): String?
fun getLastCode(): Int?
fun clearCache()
}
private const val INIT_CACHE_CAPACITY = 5
#ApplicationScope
class LastRequestInterceptorImpl #Inject constructor() : Interceptor, LastRequestSource {
private val urlCache = LongSparseArray<String?>(INIT_CACHE_CAPACITY)
private val codeCache = LongSparseArray<Int?>(INIT_CACHE_CAPACITY)
override fun getLastUrl(): String? = urlCache[Thread.currentThread().id]
override fun getLastCode(): Int? = codeCache[Thread.currentThread().id]
override fun clearCache() {
val threadId = Thread.currentThread().id
urlCache.remove(threadId)
codeCache.remove(threadId)
}
override fun intercept(chain: Interceptor.Chain): Response =
chain.proceed(chain.request()).also {
synchronized(this) {
val threadId = Thread.currentThread().id
urlCache[threadId] = it.request.url.toString()
codeCache[threadId] = it.code
}
}
}
Is there a more correct way to achieve the desired result?
ZoomX — Android Logger Interceptor is a great interceptor can help you to solve your problem.
Object delegateObj = readField(value, "delegate");
Object sourceObj1 = readField(delegateObj, "source");
Object sourceObj2 = readField(sourceObj1, "source");
Object sourceObj3 = readField(sourceObj2, "source");
Object sourceObj4 = readField(sourceObj3, "source");
Object sourceObj5 = readField(sourceObj4, "source");
HttpUrl url = (HttpUrl) readField(sourceObj5, "url");
Object readField(Object obj, String fieldName) throws NoSuchFieldException, IllegalAccessException {
Field f = obj.getClass().getDeclaredField(fieldName);
f.setAccessible(true);
return f.get(obj);
}
I need help writing a unit test for class NotificationHandler. so I made NotificationHandlerTest (using junit4) but I don't know how to determine what I should expect as a result versus what the actual result is, so one or more simple test's (for some of its methods) would help me a lot!
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.integration.annotation.Poller;
import org.springframework.integration.annotation.ServiceActivator;
import org.springframework.messaging.Message;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.stream.Collectors;
#Component
class NotificationHandler {
private static Logger LOG = LoggerFactory.getLogger(NotificationHandler.class);
#Autowired
private NotificationRoutingRepository routingRepository;
#Autowired
private SendNotificationGateway gateway;
#Autowired
private AccessService accessService;
#Autowired
private EndpointService endpointService;
#ServiceActivator(inputChannel = Channels.ASSET_MODIFIED_CHANNEL, poller = #Poller("assetModifiedPoller"), outputChannel = Channels.NULL_CHANNEL)
public Message<?> handle(Message<EventMessage> message) {
final EventMessage event = message.getPayload();
LOG.debug("Generate notification messages: {}, {}", event.getOriginType(), event.getType());
routingRepository.findByOriginTypeAndEventType(event.getOriginType(), event.getType()).stream()
.filter(routing -> routing.getOriginId() == null || routing.getOriginId() == event.getOriginId())
.map(routing -> getNotificationMessages(event, routing))
.flatMap(List::stream)
.forEach(notificationMessage -> {
LOG.debug("Sending message {}", notificationMessage);
gateway.send(notificationMessage);
});
return message;
}enter code here
enter code here`enter code here`
private List<NotificationMessage> getNotificationMessages(EventMessage event, NotificationRouting routing) {
switch (routing.getDestinationType()) {
case "USERS":
LOG.trace("Getting endpoints for users");
return getEndpointsByUsers(routing, event.getOrigin(), event.getOriginType()).stream()
.map(endpoint -> new NotificationMessage(event.getOriginType(), event.getOrigin(), endpoint))
.collect(Collectors.toList());
default:
LOG.trace("Getting default endpoints");
return getEndpoints(routing, event.getOrigin(), event.getOriginType()).stream()
.map(endpoint -> new NotificationMessage(event.getOriginType(), event.getOrigin(), endpoint))
.collect(Collectors.toList());
}
}
private List<Endpoint> getEndpoints(NotificationRouting routing, Object origin, String originType) {
final Asset asset = getAssetForObject(origin, originType);
final List<Long> userIds = accessService.list(asset).stream()
.map(ResourceAccess::getUser)
.map(AbstractEntity::getId)
.collect(Collectors.toList());
userIds.add(asset.getCreatorId());
LOG.trace("getEndpoints usersIds {}", userIds);
final List<Endpoint> endpoints = endpointService.getEndpoints(userIds, routing.getEndpointType());
LOG.trace("Endpoints {}", endpoints.stream().map(Endpoint::getId).collect(Collectors.toList()));
return endpoints;
}
private List<Endpoint> getEndpointsByUsers(NotificationRouting routing, Object origin, String originType) {
final Asset asset = getAssetForObject(origin, originType);
final List<Long> userIds = accessService.list(asset).stream()
.map(ResourceAccess::getUser)
.map(AbstractEntity::getId)
.filter(routing.getDestinations()::contains)
.collect(Collectors.toList());
routing.setDestinations(userIds);
routingRepository.save(routing);
LOG.trace("getEndpointsByUsers usersIds {}", userIds);
final List<Endpoint> endpoints = endpointService.getEndpoints(userIds, routing.getEndpointType());
LOG.trace("Endpoints {}", endpoints.stream().map(Endpoint::getId).collect(Collectors.toList()));
return endpoints;
}
private Asset getAssetForObject(Object origin, String originType) {
switch (originType) {
case EventMessage.POINT:
return (Point) origin;
case EventMessage.FEED:
return ((Feed) origin).getPoint();
case EventMessage.ACTUATOR:
return ((Actuator)origin).getPoint();
case EventMessage.DEVICE:
return (Device) origin;
case EventMessage.ALARM:
return ((Alarm) origin).getPoint();
default:
throw new IllegalArgumentException("Unsupported type: " + originType);
}
}
}
I'd say you start with a simple test if you're not sure what to test. One test that verifies you don't get any exception if you send null as an argument.
E.g.
#Test
public void shouldNotThrowAnyExceptionIfArgumentIsNull() {
// given
NotificationHandler handler = new NotificationHandler();
// when
handler.handle(null);
// then no exception is thrown.
}
After that, you can analyze line by line what the method handle is doing and write tests that verify its behavior.
You can, for example, verify that the method gateway.send(...); was executed or not depending on what you sent in the parameter.
For dependency mocking and behavior verification, I'd recommend you use mockito or a similar tool.
You can follow this tutorial to learn how to do it.
How can I identify the topic name from a message in kafka.
String[] topics = { "test", "test1", "test2" };
for (String t : topics) {
topicMap.put(t, new Integer(3));
}
SparkConf conf = new SparkConf().setAppName("KafkaReceiver")
.set("spark.streaming.receiver.writeAheadLog.enable", "false")
.setMaster("local[4]")
.set("spark.cassandra.connection.host", "localhost");
;
final JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext jssc = new JavaStreamingContext(sc, new Duration(
1000));
/* Receive Kafka streaming inputs */
JavaPairReceiverInputDStream<String, String> messages = KafkaUtils
.createStream(jssc, "localhost:2181", "test-group",
topicMap);
JavaDStream<MessageAndMetadata> data =
messages.map(new Function<Tuple2<String, String>, MessageAndMetadata>()
{
public MessageAndMetadata call(Tuple2<String, String> message)
{
System.out.println("message ="+message._2);
return null;
}
}
);
I can fetch message from kafka producer. But since the consumer now consuming from three topic, it is needed to identify topic name.
As of Spark 1.5.0, official documentation encourages using no-receiver/direct approach starting from recent releases, which has graduated from experimental in recent 1.5.0.
This new Direct API allows you to easily obtain message and its metadata apart from other good things.
Unfortunately, this is not straightforward as KafkaReceiver and ReliableKafkaReceiver in Spark's source code only store MessageAndMetadata.key and message.
There are two open tickets related to this issue in Spark's JIRA:
https://issues.apache.org/jira/browse/SPARK-3146
https://issues.apache.org/jira/browse/SPARK-4960
which have been opened for a while.
A dirty copy/paste/modify of Spark's source code to solve your issue:
package org.apache.spark.streaming.kafka
import java.lang.{Integer => JInt}
import java.util.{Map => JMap, Properties}
import kafka.consumer.{KafkaStream, Consumer, ConsumerConfig, ConsumerConnector}
import kafka.serializer.{Decoder, StringDecoder}
import kafka.utils.VerifiableProperties
import org.apache.spark.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.util.WriteAheadLogUtils
import org.apache.spark.util.ThreadUtils
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.reflect._
object MoreKafkaUtils {
def createStream(
jssc: JavaStreamingContext,
zkQuorum: String,
groupId: String,
topics: JMap[String, JInt],
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): JavaReceiverInputDStream[(String, String, String)] = {
val kafkaParams = Map[String, String](
"zookeeper.connect" -> zkQuorum, "group.id" -> groupId,
"zookeeper.connection.timeout.ms" -> "10000")
val walEnabled = WriteAheadLogUtils.enableReceiverLog(jssc.ssc.conf)
new KafkaInputDStreamWithTopic[String, String, StringDecoder, StringDecoder](jssc.ssc, kafkaParams, topics.asScala.mapValues(_.intValue()), walEnabled, storageLevel)
}
}
private[streaming]
class KafkaInputDStreamWithTopic[
K: ClassTag,
V: ClassTag,
U <: Decoder[_] : ClassTag,
T <: Decoder[_] : ClassTag](
#transient ssc_ : StreamingContext,
kafkaParams: Map[String, String],
topics: Map[String, Int],
useReliableReceiver: Boolean,
storageLevel: StorageLevel
) extends ReceiverInputDStream[(K, V, String)](ssc_) with Logging {
def getReceiver(): Receiver[(K, V, String)] = {
if (!useReliableReceiver) {
new KafkaReceiverWithTopic[K, V, U, T](kafkaParams, topics, storageLevel)
} else {
new ReliableKafkaReceiverWithTopic[K, V, U, T](kafkaParams, topics, storageLevel)
}
}
}
private[streaming]
class KafkaReceiverWithTopic[
K: ClassTag,
V: ClassTag,
U <: Decoder[_] : ClassTag,
T <: Decoder[_] : ClassTag](
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel
) extends Receiver[(K, V, String)](storageLevel) with Logging {
// Connection to Kafka
var consumerConnector: ConsumerConnector = null
def onStop() {
if (consumerConnector != null) {
consumerConnector.shutdown()
consumerConnector = null
}
}
def onStart() {
logInfo("Starting Kafka Consumer Stream with group: " + kafkaParams("group.id"))
// Kafka connection properties
val props = new Properties()
kafkaParams.foreach(param => props.put(param._1, param._2))
val zkConnect = kafkaParams("zookeeper.connect")
// Create the connection to the cluster
logInfo("Connecting to Zookeeper: " + zkConnect)
val consumerConfig = new ConsumerConfig(props)
consumerConnector = Consumer.create(consumerConfig)
logInfo("Connected to " + zkConnect)
val keyDecoder = classTag[U].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[K]]
val valueDecoder = classTag[T].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[V]]
// Create threads for each topic/message Stream we are listening
val topicMessageStreams = consumerConnector.createMessageStreams(
topics, keyDecoder, valueDecoder)
val executorPool =
ThreadUtils.newDaemonFixedThreadPool(topics.values.sum, "KafkaMessageHandler")
try {
// Start the messages handler for each partition
topicMessageStreams.values.foreach { streams =>
streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) }
}
} finally {
executorPool.shutdown() // Just causes threads to terminate after work is done
}
}
// Handles Kafka messages
private class MessageHandler(stream: KafkaStream[K, V])
extends Runnable {
def run() {
logInfo("Starting MessageHandler.")
try {
val streamIterator = stream.iterator()
while (streamIterator.hasNext()) {
val msgAndMetadata = streamIterator.next()
store((msgAndMetadata.key, msgAndMetadata.message, msgAndMetadata.topic))
}
} catch {
case e: Throwable => reportError("Error handling message; exiting", e)
}
}
}
}