Storm KafkaSpout does not get key (only value) - java

By using org.apache.kafka.clients.producer.* I try to send kafka-messages to a Storm Kafka Spout wit key:long value:String.
By checking the created record befor sending, key and value are set but at the receiving kafka spout, just the values are received. The value of the key is nothing or a tab.
Does anybody know such an issue?
My producer looks like:
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
props.put(ProducerConfig.CLIENT_ID_CONFIG, "KafkaDataProducer");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
The record is created and sent by
final ProducerRecord<Long, String> record = new ProducerRecord<>(TOPIC, key, value);
RecordMetadata metadata = producer.send(record).get();
At the CLI I receive messages with kafka-console-consumer.sh --topic taxi --from-beginning --property print.key=true --property key.separator=" : " --bootstrap-server kafka1:9092
EDIT1:
The Storm Kafka Consumer looks like that
Properties props = new Properties();
props.put(ConsumerConfig.GROUP_ID_CONFIG, "1");
KafkaSpoutConfig spoutConfig = KafkaSpoutConfig.
builder("PLAINTEXT://kafka1:9092,PLAINTEXT://kafka2:9092,PLAINTEXT://kafka3:9092,", TOPIC)
.setProp(props)
.setFirstPollOffsetStrategy(FirstPollOffsetStrategy.EARLIEST)
.setProcessingGuarantee(KafkaSpoutConfig.ProcessingGuarantee.AT_MOST_ONCE)
.setOffsetCommitPeriodMs(100)
.build();
builder.setSpout("kafka_spout", new KafkaSpout(spoutConfig), 1);
EDIT2:
The procedure of the producer data is like:
Select from database and add to an ArrayList:
private static ArrayList<Pair<Long, String>> selectData(String start, String end) {
Statement statement = null;
ResultSet resultSet;
ArrayList<Pair<Long, String>> results = new ArrayList<>();
try {
if (conn != null) {
statement = conn.createStatement();
}
resultSet = statement.executeQuery("SELECT data1, data2, data3, data4 FROM " +
"tdrive " +
"where date_time between '" +
start +
"' and '" +
end +
"' order by date_time asc;");
while ( resultSet.next() ) {
int id = resultSet.getInt("data1");
String result = "";
result += id;
result += ";";
result += resultSet.getTimestamp("data2");
result += ";";
result += resultSet.getDouble("data3");
result += ";";
result += resultSet.getDouble("data4");
results.add(new Pair<>((long) id, result));
}
} catch (SQLException e) {
e.printStackTrace();
}
return results;
After the data were stored into the ArrayList all data are sent by Kafka:
for (Pair data : selectData(covertTime(selectStartTime), covertTime(selectEndTime))) {
String result = (String) data.getValue1();
produceMessage((Long) data.getValue0(), result);
}
producer.flush();
produceMessage is like:
private static void produceMessage(long key, String value) {
long time = System.currentTimeMillis();
try {
final ProducerRecord<Long, String> record = new ProducerRecord<>(TOPIC, key, value);
RecordMetadata metadata = producer.send(record).get();
long elapsedTime = System.currentTimeMillis() - time;
System.out.printf("sent record(key=%s value=%s) " +
"meta(partition=%d, offset=%d) time=%d\n", // key:id, value:"id;timestamp;long;lat"
record.key(), record.value(), metadata.partition(),
metadata.offset(), elapsedTime);
} catch (Exception e) {
System.err.println(e);
}
}
I hope after EDIT2, there is not too much code.
Thank you in advance

Related

NIFI custom processor return multipleflow files in recursion

I have to do some custom preprocessing tasks on a huge data file (~200GB).
currently, its works as below way.
select * from table
preprocessing line by line
return a new single flow file
so I decided to convert the above approach to the below way.
get the row count from the user (let's assume the user gives 1000)
execute select * query as resultSet
read the results line by line (rs.next())
when the line count reaches 1000 return the flow file and continues to other lines
So my approach is as below
onTrigger
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
try {
final Long rowLimit = context.getProperty(ProcessorUtils.MAX_RECORD).evaluateAttributeExpressions(flowFile).asLong();
Connection conn = DriverManager.getConnection(
// db connection properties
);
Statement stm = conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
ResultSet rs = stm.executeQuery("sql query");
Map<String, String> flowFileAttributes = flowFile.getAttributes();
process(
rs,
session,
flowFileAttributes,
rowLimit,
);
FlowFile stateFlowFile = session.create();
session.putAttribute(stateFlowFile, "processing_status", "end");
session.putAttribute(stateFlowFile, "record_count", "0");
session.transfer(stateFlowFile, GPReaderProcessorUtils.STATUS); // working line
} catch (Exception e) {
logger.warn(" conn " + e);
session.transfer(flowFile, GPReaderProcessorUtils.FAILURE);
}
}
Recursion Approach for termination based on line count
private void process(ResultSet rs, ProcessSession session, Map<String, String> flowFileAttributes, Long rowLimit) throws SQLException {
try{
logger.info("-> start processing with row limit = " + rowLimit);
AtomicInteger mainI = new AtomicInteger(0);
FlowFile flowFile =
session.write(session.putAllAttributes(session.create(), flowFileAttributes), (OutputStream out) -> {
int i = 0;
Map<String, String> preProcessResults = null;
try {
String res = "";
while (i < rowLimit && rs.next()) {
//preprocessing happens here
i++;
mainI.set(i);
out.write(preprocess results.toString().getBytes(StandardCharsets.UTF_8));
}
}catch (SQLException e) {
e.printStackTrace();
}
}
logger.info("gp-log ->"+ (String.valueOf(i)));
out.close();
});
FlowFile stateFlowFile = session.create();
session.putAttribute(stateFlowFile, "processing_status", "processing");
session.putAttribute(stateFlowFile, "record_count", mainI.toString());
session.transfer(stateFlowFile, GPReaderProcessorUtils.STATUS); // state relationship
session.transfer(flowFile, GPReaderProcessorUtils.SUCCESS); // preprocessed flow files returns
if(!rs.isAfterLast() && mainI != 0 && !rs.isLast()){ // recurrsion call
logger.info("gp-log -> recursion call" );
process(rs, session,flowFileAttributes,column,rowLimit);
}
}catch (Exception e){
logger.info(e.getMessage());
logger.error(e.getMessage());
session.transfer(session.putAllAttributes(session.create(),flowFileAttributes), GPReaderProcessorUtils.FAILURE);
}
}
Expected Behaviour -> while processing this one return completed rows as flow files
Current Behaviour -> after finishing all return all flow files (generated in recursion) once.
please advise on this.
your processor should extend AbstractSessionFactoryProcessor and create/commit sessions for incoming file and for each outgoing file.
files going to output queue as soon as session been committed.

get the unprocessed message count in spring kafka

we are migrating to Kafka, I need to create a monitoring POC service that will periodically check the unprocessed message count in the Kafka queue and based on the count take some action. but this service must not read or process the message, designated consumers will do that, with every cron this service just needs the count of unprocessed messages present in the queue.
so far I have done this, from multiple examples
public void stats() throws ExecutionException, InterruptedException {
Map<String, Object> props = new HashMap<>();
// list of host:port pairs used for establishing the initial connections to the Kafka cluster
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
try (final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props)) {
consumer.subscribe(Arrays.asList(topicName));
while (true) {
Thread.sleep(1000);
ConsumerRecords<String, String> records = consumer.poll(1000);
if (!records.isEmpty()) {
System.out.println("records is not empty = " + records.count() + " " + records);
}
for (ConsumerRecord<String, String> record : records) {
System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
Set<TopicPartition> partitions = consumer.assignment();
//consumer.seekToBeginning(partitions);
Map<TopicPartition, Long> offsets = consumer.endOffsets(partitions);
for (TopicPartition partition : offsets.keySet()) {
OffsetAndMetadata commitOffset = consumer.committed(new TopicPartition(partition.topic(), partition.partition()));
Long lag = commitOffset == null ? offsets.get(partition) : offsets.get(partition) - commitOffset.offset();
System.out.println("lag = " + lag);
System.out.printf("partition %s is at %d\n", partition.topic(), offsets.get(partition));
}
}
}
}
}
the code is working fine some times and some times gives wrong output, please let me know
Don't subscribe to the topic; just create a consumer with the same group to get the endOffsets.
See this answer for an example.

java.lang.ClassCastException: java.lang.String cannot be cast to org.apache.avro.generic.GenericRecord

i am try to use KafkaConsumer to consume the records, but i got the below exception
public void kafkaConsumerRun() {
Properties prop = getProperties();
try (KafkaConsumer<String, GenericRecord> kafkaConsumer = new KafkaConsumer<>(prop)) {
kafkaConsumer.subscribe(Arrays.asList(topicName));
Schema schema = Common.SCHEMA;
log.info("SCHEMA = " + schema.toString());
Map<String, Object> eventMap = new LinkedHashMap<>();
while (true) {
ConsumerRecords<String, GenericRecord> records = kafkaConsumer.poll(Duration.ofSeconds(pollInterval));
JSONObject payloadJson = new JSONObject();
if (records.count() > 0) {
log.info(records.count() + " records in partition");
for (ConsumerRecord<String, GenericRecord> rec : records) {
GenericRecord record = SpecificData.get().deepCopy(schema, rec.value());
log.info("Priting Events before camel case conversion : " + record.toString());
for (Iterator<Schema.Field> fieldItr = record.getSchema().getFields().iterator(); fieldItr
.hasNext();) {
String fieldName = fieldItr.next().name();
Object value = record.get(fieldName);
getFieldName(fieldName, eventMap, value);
}
log.info("JSON String: " + new JSONObject(eventMap).toString());
callOrchestrator(eventMap, payloadJson);
}
}
}
} catch (Exception ex) {
log.error("Exception Caught: ", ex);
}
The Exception stack trace should point you at exactly which line is wrong, and it is telling you exactly what is happening: there is an instance of String that you are trying to cast to GenericRecord. Make sure that when you are passing an object as an Object that you cast it back to the proper type when you need to.

How to get all the queues and topics from solace

I want to discover all the destinations from solace (queues and topics)
I tried using MBeanServerConnection and query after names (but I didn't find a proper way to use this) or JNDI lookups Destination dest = (Destination) context.lookup(Dest_name), but I don't have the names of the queues/topics.
I am using solace - jms library.
I am searching for smth like this: (but for solace, not activeMq)
get all Queue from activeMQ
You will need to make use of SEMP over the management interface for this.
Sample commands:
curl -d '<rpc><show><queue><name>*</name></queue></show></rpc>' -u semp_username:semp_password http://your_management_ip:your_management_port/SEMP
curl -d '<rpc><show><topic-endpoint><name>*</name></topic-endpoint></show></rpc>' -u semp_username:semp_password http://your_management_ip:your_management_port/SEMP
Note that I'm using curl for simplicity, but any application can perform HTTP POSTs to execute these commands.
If you are using Java, you can refer to the SempHttpSetRequest sample found within the Solace API samples.
Documentation on SEMP can be found here.
However, the larger question here is why do you need to discover all destinations?
One of the features of the message broker is to decouple the publishers and consumers.
If you need to know if your persistent message is being published to a topic with no consumers, you can make use of the reject-msg-to-sender-on-no-subscription-match setting in the publishing application's client-profile.
This means that the publisher will obtain a negative acknowledgement in the event that it tries to publish a message on a topic that has no matching subscribers.
You can refer to "Handling Guaranteed Messages with No Matches" at https://docs.solace.com/Configuring-and-Managing/Configuring-Client-Profiles.htm for further details.
Here is some source code that might help. With the appliance configured correctly, SEMP is also available over JMS on topic "#SEMP/(router)/SHOW".
/**
* Return the SolTopicInfo for this topic (or all topics if 'topic' is null).
*
* #param session
* #param endpointName
* #return
*/
public static SolTopicInfo[] getTopicInfo(JCSMPSession session, String endpointName, String vpn,
String sempVersion) {
XMLMessageConsumer cons = null;
XMLMessageProducer prod = null;
Map<String, SolTopicInfo> tiMap = new HashMap<String, SolTopicInfo>();
try {
// Create a producer and a consumer, and connect to appliance.
prod = session.getMessageProducer(new PubCallback());
cons = session.getMessageConsumer(new SubCallback());
cons.start();
if (vpn == null) vpn = (String) session.getProperty(JCSMPProperties.VPN_NAME);
if (sempVersion == null) sempVersion = getSempVersion(session);
// Extract the router name.
final String SEMP_SHOW_TE_TOPICS = "<rpc semp-version=\""
+ sempVersion
+ "\"><show><topic-endpoint><name>"
+ endpointName
+ "</name><vpn-name>"+ vpn + "</vpn-name></topic-endpoint></show></rpc>";
RpcReply teTopics = sendRequest(session, SEMP_SHOW_TE_TOPICS);
for (TopicEndpoint2 te : teTopics.getRpc().getShow().getTopicEndpoint().getTopicEndpoints()
.getTopicEndpointArray()) {
SolTopicInfo ti = new SolTopicInfo();
ti.setBindCount(te.getInfo().getBindCount());
//qi.setDescription(qt.getInfo().getNetworkTopic());
ti.setEndpoint(te.getName());
ti.setMessageVPN(te.getInfo().getMessageVpn());
ti.setTopic(te.getInfo().getDestination());
ti.setDurable(te.getInfo().getDurable());
ti.setInSelPres(te.getInfo().getIngressSelectorPresent());
ti.setHwmMB(formatter.format(te.getInfo().getHighWaterMarkInMb()));
ti.setSpoolUsageMB(formatter.format(te.getInfo().getCurrentSpoolUsageInMb()));
ti.setMessagesSpooled(te.getInfo().getNumMessagesSpooled().longValue());
String status = te.getInfo().getIngressConfigStatus().substring(0, 1).toUpperCase();
status += " " + te.getInfo().getEgressConfigStatus().substring(0, 1).toUpperCase();
status += " " + te.getInfo().getIngressSelectorPresent().substring(0, 1).toUpperCase();
status += " " + te.getInfo().getType().substring(0, 1).toUpperCase();
ti.setStatus(status);
tiMap.put(ti.getEndpoint(), ti);
}
} catch (JCSMPException e) {
throw new RuntimeException(e.getMessage(), e);
} finally {
if (cons != null)
cons.close();
if (prod != null)
prod.close();
}
return tiMap.values().toArray(new SolTopicInfo[0]);
}
/**
* Return the SolQueueInfo for this queue (or all queues if 'queue' is null).
*
* #param session
* #param queue
* #param vpn (if null, use the session's vpn name)
* #param sempVersion, if null use 'soltr/7_1_1'
* #return
*/
public static SolQueueInfo[] getQueueInfo(JCSMPSession session, String queue, String vpn,
String sempVersion) {
XMLMessageConsumer cons = null;
XMLMessageProducer prod = null;
Map<String, SolQueueInfo> qiMap = new HashMap<String, SolQueueInfo>();
try {
// Create a producer and a consumer, and connect to appliance.
prod = session.getMessageProducer(new PubCallback());
cons = session.getMessageConsumer(new SubCallback());
cons.start();
if (vpn == null) vpn = (String) session.getProperty(JCSMPProperties.VPN_NAME);
if (sempVersion == null) sempVersion = getSempVersion(session);
// Extract the router name.
final String SEMP_SHOW_QUEUE_SUBS = "<rpc semp-version=\""
+ sempVersion
+ "\"><show><queue><name>"
+ queue
+ "</name><vpn-name>"+ vpn + "</vpn-name><subscriptions/><count/><num-elements>200</num-elements></queue></show></rpc>";
RpcReply queueSubs = sendRequest(session, SEMP_SHOW_QUEUE_SUBS);
for (QueueType qt : queueSubs.getRpc().getShow().getQueue().getQueues().getQueueArray()) {
SolQueueInfo qi = new SolQueueInfo();
qi.setBindCount(qt.getInfo().getBindCount());
//qi.setDescription(qt.getInfo().getNetworkTopic());
qi.setName(qt.getName());
qi.setMessageVPN(qt.getInfo().getMessageVpn());
qi.setDurable(qt.getInfo().getDurable());
qi.setEgSelPres(qt.getInfo().getEgressSelectorPresent());
qi.setHwmMB(formatter.format(qt.getInfo().getHighWaterMarkInMb()));
qi.setMessagesSpooled(qt.getInfo().getNumMessagesSpooled().longValue());
qi.setSpoolUsageMB(formatter.format(qt.getInfo().getCurrentSpoolUsageInMb()));
String status = qt.getInfo().getIngressConfigStatus().substring(0, 1).toUpperCase();
status += " " + qt.getInfo().getEgressConfigStatus().substring(0, 1).toUpperCase();
status += " " + qt.getInfo().getAccessType().substring(0, 1).toUpperCase();
status += " " + qt.getInfo().getEgressSelectorPresent().substring(0, 1).toUpperCase();
status += " " + qt.getInfo().getType().substring(0, 1).toUpperCase();
status += qt.getInfo().getDurable() ? " D" : " N";
qi.setStatus(status);
for (Subscription sub : qt.getSubscriptions().getSubscriptionArray()) {
qi.addSubscription(sub.getTopic());
}
qiMap.put(qi.getName(), qi);
}
} catch (JCSMPException e) {
throw new RuntimeException(e.getMessage(), e);
} finally {
if (cons != null)
cons.close();
if (prod != null)
prod.close();
}
return qiMap.values().toArray(new SolQueueInfo[0]);
}
private static String getSempVersion(JCSMPSession session)
{
String retval = "soltr/7_1_1";
try {
String peerVersion = (String)session.getCapability(CapabilityType.PEER_SOFTWARE_VERSION);
if (peerVersion != null)
{
retval = "soltr/";
String[] version = peerVersion.split("\\.");
retval += version[0];
retval += "_" + version[1];
if (!version[2].equals("0")) retval += "_" + version[2];
}
} catch (Throwable e) {
System.err.println(e);
}
return retval;
}
private static RpcReply sendRequest(JCSMPSession session,
final String requestStr) {
try {
// Set up the requestor and request message.
String routerName = (String) session
.getCapability(CapabilityType.PEER_ROUTER_NAME);
final String SEMP_TOPIC_STRING = String.format("#SEMP/%s/SHOW",
routerName);
final Topic SEMP_TOPIC = JCSMPFactory.onlyInstance().createTopic(
SEMP_TOPIC_STRING);
Requestor requestor = session.createRequestor();
BytesXMLMessage requestMsg = JCSMPFactory.onlyInstance().createMessage(
BytesXMLMessage.class);
requestMsg.writeAttachment(requestStr.getBytes());
BytesXMLMessage replyMsg = requestor
.request(requestMsg, 5000, SEMP_TOPIC);
String replyStr = new String();
if (replyMsg.getAttachmentContentLength() > 0) {
byte[] bytes = new byte[replyMsg.getAttachmentContentLength()];
replyMsg.readAttachmentBytes(bytes);
replyStr = new String(bytes, "US-ASCII");
}
RpcReplyDocument doc = RpcReplyDocument.Factory.parse(replyStr);
RpcReply reply = doc.getRpcReply();
if (reply.isSetPermissionError()) {
throw new RuntimeException(
"Permission Error: Make sure SEMP over message bus SHOW commands are enabled for this VPN");
}
if( reply.isSetParseError() ) {
throw new RuntimeException( "SEMP Parse Error: " + reply.getParseError() );
}
if( reply.isSetLimitError() ) {
throw new RuntimeException( "SEMP Limit Error: " + reply.getLimitError() );
}
if( reply.isSetExecuteResult() && reply.getExecuteResult().isSetReason() ) { // axelp: encountered this error on invalid 'queue' name
throw new RuntimeException( "SEMP Execution Error: " + reply.getExecuteResult().getReason() );
}
return reply;
} catch (JCSMPException e) {
throw new RuntimeException(e.getMessage(), e);
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e.getMessage(), e);
} catch (XmlException e) {
throw new RuntimeException(e.getMessage(), e);
}
}
You can get message VPN specific queues and topics using following SEMPv2 command.
curl -s -X GET -u semp_user:semp_pass management_host:management_port/SEMP/v2/monitor/msgVpns/{vpn-name}/queues?select="queueName"
curl -s -X GET -u semp_user:semp_pass management_host:management_port/SEMP/v2/monitor/msgVpns/{vpn-name}/topicEndpoints?select="topicEndpointName"

How to remove deadlocks in orientdb

I am getting too many deadlocks on OrientDb while I am using Java API to query the vertices. After the deadlock happens, the entire database becomes unresponsive and I have to kill the daemon and start again. As example, the error that I get from deadlocks is :
com.orientechnologies.common.concur.OTimeoutException: Can not lock record for 2000 ms. seems record is deadlocked by other record
at com.orientechnologies.orient.core.storage.impl.local.OAbstractPaginatedStorage.acquireReadLock(OAbstractPaginatedStorage.java:1300)
at com.orientechnologies.orient.core.tx.OTransactionAbstract.lockRecord(OTransactionAbstract.java:120)
at com.orientechnologies.orient.core.id.ORecordId.lock(ORecordId.java:282)
at com.orientechnologies.orient.core.storage.impl.local.OAbstractPaginatedStorage.lockRecord(OAbstractPaginatedStorage.java:1776)
at com.orientechnologies.orient.core.storage.impl.local.OAbstractPaginatedStorage.readRecord(OAbstractPaginatedStorage.java:1416)
at com.orientechnologies.orient.core.storage.impl.local.OAbstractPaginatedStorage.readRecord(OAbstractPaginatedStorage.java:694)
at com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx.executeReadRecord(ODatabaseDocumentTx.java:1569)
at com.orientechnologies.orient.core.tx.OTransactionNoTx.loadRecord(OTransactionNoTx.java:80)
at com.orientechnologies.orient.core.db.document.ODatabaseDocumentTx.load(ODatabaseDocumentTx.java:1434)
at com.orientechnologies.orient.server.network.protocol.binary.ONetworkProtocolBinary.readRecord(ONetworkProtocolBinary.java:1456)
at com.orientechnologies.orient.server.network.protocol.binary.ONetworkProtocolBinary.executeRequest(ONetworkProtocolBinary.java:346)
at com.orientechnologies.orient.server.network.protocol.binary.OBinaryNetworkProtocolAbstract.execute(OBinaryNetworkProtocolAbstract.java:216)
at com.orientechnologies.common.thread.OSoftThread.run(OSoftThread.java:65)
Following is the block that I use to query edges and create associations between vertices
public User generateFriend(String mobile, String userRID) {
StringBuilder errorMsg = new StringBuilder();
Iterable<OrientVertex> vertexes;
//Retrieve friends of the user
List<User> friendsList = new ArrayList<User>();
vertexes = db.queryVertices("select expand( unionAll(inE('E_Friend').out,outE('E_Friend').in) ) from " + userRID,errorMsg);
if (!errorMsg.toString().equals("")) {
throw new DbException("Db exception occured, " + errorMsg);
}
for (OrientVertex v : vertexes){
friendsList.add(vertexToUser(v));
}
//Create edges if between the user and other users with mobile number in the list and if the edge is not yet created
User u = findUserByMobileNo(friendsList,mobile);
if ( u == null){
u = findByMobileNo(mobile);
if (u != null) {
//create edge
db.executeQuery("select createEdge('E_Friend','" + userRID + "','" + u.getRid() + "') from " + userRID, new HashMap<String, Object>(), errorMsg);
if (!errorMsg.toString().equals("")) {
throw new DbException("Db exception occured, " + errorMsg);
}
}
}
return u;
}
public Iterable<OrientVertex> queryVertices(String query, StringBuilder errMsg){
logger.error("before getGraph, " + errMsg.toString());
graph = getGraph(errMsg);
if (!errMsg.toString().equals("")){
return null;
}
logger.error("after getGraph, " + errMsg.toString());
Iterable<OrientVertex> vertices = null;
try {
OSQLSynchQuery<OrientVertex> qr = new OSQLSynchQuery<OrientVertex>(query);
vertices = graph.command(qr).execute();
logger.error("after graph command execute, " + errMsg.toString());
}
catch (Exception ex){
errMsg.append(ex.getMessage());
logger.error("graph command exception, " + errMsg.toString());
}
logger.error("before return vertices, " + errMsg.toString());
return vertices;
}
public List<ODocument> executeQuery(String sql, HashMap<String,Object> params,StringBuilder errMsg) {
List<ODocument> result = new ArrayList<ODocument>();
try {
db = getDatabase(errMsg);
if (!errMsg.toString().equals("")){
return null;
}
OSQLSynchQuery<ODocument> query = new OSQLSynchQuery<ODocument>(sql);
if (params.isEmpty()) {
result = db.command(query).execute();
} else {
result = db.command(query).execute(params);
}
} catch (Exception e) {
errMsg.append(e.getMessage());
//TODO: Add threaded error log saving mechanism
}
return result;
}
Due to index missing on table deadlock come, so check your all table which are involved in this operation and find out that indexes are present or not on column.
Refer link in which I have a same problem of deadlock.

Categories