Apache Ignite : ScanQuery giving exception - java

I am newbie to Apache Ignite. On my windows box, I have started Apache Ignite with double click on ignite.bat file and trying to run following code -
Cache populating client code
package ignite;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.Ignition;
import org.apache.ignite.configuration.CacheConfiguration;
public class SpringIgniteClient {
public static void main(String[] args) throws Exception {
System.out.println("Run example!!");
Ignition.setClientMode(true);
// Start Ignite in client mode.
Ignite ignite = Ignition.start();
CacheConfiguration<Integer, Person> cfg = new CacheConfiguration<Integer, Person>("myStreamCache");
cfg.setIndexedTypes(Integer.class, Person.class);
IgniteCache<Integer, Person> cache = ignite.getOrCreateCache(cfg);
//for(int i = 1; i < 1000; i++){ cache.put(i, Integer.toString(i)+"sushil---"); }
for (int i = 0; i < 100; i++) {
Person person = new Person(i, i, "name_" + i, (i * 100) % 3000);
if(person.getSal() < 1000){
System.out.println(person);
}
cache.put(i, person);
}
}
}
Cache ScanQuery client code
package ignite;
import javax.cache.Cache.Entry;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteCache;
import org.apache.ignite.Ignition;
import org.apache.ignite.cache.query.QueryCursor;
import org.apache.ignite.cache.query.ScanQuery;
import org.apache.ignite.configuration.CacheConfiguration;
import org.apache.ignite.lang.IgniteBiPredicate;
public class SpringIgniteReceiverClient {
public static void main(String[] args) {
System.out.println("Run Receiver example!!");
Ignition.setClientMode(true);
// Start Ignite in client mode.
Ignite ignite = Ignition.start();
CacheConfiguration<Integer, Person> cfg = new CacheConfiguration<Integer, Person>("myStreamCache");
cfg.setIndexedTypes(Integer.class, Person.class);
IgniteCache<Integer, Person> cache = ignite.getOrCreateCache(cfg);
IgniteBiPredicate<Integer, Person> filter = new MyIgniteBiPredicate();
ScanQuery<Integer, Person> query = new ScanQuery<Integer, Person>(filter);
//query.setLocal(true);
QueryCursor<Entry<Integer, Person>> cursor= cache.query(query);
System.out.println("ALL DATA ->"+cursor.getAll());
}
}
and IgniteBiPredicate implementation is
package ignite;
import java.io.Serializable;
import org.apache.ignite.lang.IgniteBiPredicate;
public class MyIgniteBiPredicate implements IgniteBiPredicate<Integer, Person>, Serializable{
/**
*
*/
private static final long serialVersionUID = 1L;
#Override public boolean apply(Integer key, Person p) {
return p.getSal() < 1000;
}
}
Serialize Java POJO
package ignite;
import java.io.Serializable;
public class Person implements Serializable{
/**
*
*/
private static final long serialVersionUID = 1L;
private int age;
private int empId;
private String name;
private int sal;
public Person(int age, int empId, String name, int sal) {
super();
this.age = age;
this.empId = empId;
this.name = name;
this.sal = sal;
}
public int getAge() {
return age;
}
public void setAge(int age) {
this.age = age;
}
public int getEmpId() {
return empId;
}
public void setEmpId(int empId) {
this.empId = empId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getSal() {
return sal;
}
public void setSal(int sal) {
this.sal = sal;
}
#Override
public String toString() {
return "Person [age=" + age + ", empId=" + empId + ", name=" + name + ", sal=" + sal + "]";
}
}
During debug, I found that in IgniteCacheProxy.class, following method is called and it is returning null.
/**
* #param loc Enforce local.
* #return Local node cluster group.
*/
private ClusterGroup projection(boolean loc) {
if (loc || ctx.isLocal() || isReplicatedDataNode())
return ctx.kernalContext().grid().cluster().forLocal();
if (ctx.isReplicated())
return ctx.kernalContext().grid().cluster().forDataNodes(ctx.name()).forRandom();
return null;
}
And ScanQuery program gives following error.
Run Receiver example!!
[21:46:52] (wrn) Default Spring XML file not found (is IGNITE_HOME set?): config/default-config.xml
Mar 05, 2017 9:46:52 PM java.util.logging.LogManager$RootLogger log
SEVERE: Failed to resolve default logging config file: config/java.util.logging.properties
[21:46:53] __________ ________________
[21:46:53] / _/ ___/ |/ / _/_ __/ __/
[21:46:53] _/ // (7 7 // / / / / _/
[21:46:53] /___/\___/_/|_/___/ /_/ /___/
[21:46:53]
[21:46:53] ver. 1.8.0#20161205-sha1:9ca40dbe
[21:46:53] 2016 Copyright(C) Apache Software Foundation
[21:46:53]
[21:46:53] Ignite documentation: http://ignite.apache.org
[21:46:53]
[21:46:53] Quiet mode.
[21:46:53] ^-- To see **FULL** console log here add -DIGNITE_QUIET=false or "-v" to ignite.{sh|bat}
[21:46:53]
[21:46:53] OS: Windows 7 6.1 amd64
[21:46:53] VM information: Java(TM) SE Runtime Environment 1.8.0_65-b17 Oracle Corporation Java HotSpot(TM) 64-Bit Server VM 25.65-b01
[21:46:53] Initial heap size is 124MB (should be no less than 512MB, use -Xms512m -Xmx512m).
[21:46:53] Configured plugins:
[21:46:53] ^-- None
[21:46:53]
[21:46:54] Security status [authentication=off, tls/ssl=off]
[21:46:58] To start Console Management & Monitoring run ignitevisorcmd.{sh|bat}
[21:46:58]
[21:46:58] Ignite node started OK (id=ae95174d)
[21:46:58] Topology snapshot [ver=3, servers=1, clients=2, CPUs=4, heap=4.4GB]
Exception in thread "main" javax.cache.CacheException: class org.apache.ignite.IgniteCheckedException: Query execution failed: GridCacheQueryBean [qry=GridCacheQueryAdapter [type=SCAN, clsName=null, clause=null, filter=ignite.MyIgniteBiPredicate#294a6b8e, transform=null, part=null, incMeta=false, metrics=GridCacheQueryMetricsAdapter [minTime=9223372036854775807, maxTime=0, sumTime=0, avgTime=0.0, execs=0, completed=0, fails=0], pageSize=1024, timeout=0, keepAll=true, incBackups=false, dedup=false, prj=null, keepBinary=false, subjId=ae95174d-ff1c-44b2-a7dc-24fab738729e, taskHash=0], rdc=null, trans=null]
at org.apache.ignite.internal.processors.cache.GridCacheUtils.convertToCacheException(GridCacheUtils.java:1440)
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter.next(GridCacheQueryFutureAdapter.java:174)
at org.apache.ignite.internal.processors.cache.query.GridCacheDistributedQueryManager$5.onHasNext(GridCacheDistributedQueryManager.java:634)
at org.apache.ignite.internal.util.GridCloseableIteratorAdapter.hasNextX(GridCloseableIteratorAdapter.java:53)
at org.apache.ignite.internal.processors.cache.IgniteCacheProxy$2$1.onHasNext(IgniteCacheProxy.java:518)
at org.apache.ignite.internal.util.GridCloseableIteratorAdapter.hasNextX(GridCloseableIteratorAdapter.java:53)
at org.apache.ignite.internal.util.lang.GridIteratorAdapter.hasNext(GridIteratorAdapter.java:45)
at org.apache.ignite.internal.processors.cache.QueryCursorImpl.getAll(QueryCursorImpl.java:117)
at ignite.SpringIgniteReceiverClient.main(SpringIgniteReceiverClient.java:31)
Caused by: class org.apache.ignite.IgniteCheckedException: Query execution failed: GridCacheQueryBean [qry=GridCacheQueryAdapter [type=SCAN, clsName=null, clause=null, filter=ignite.MyIgniteBiPredicate#294a6b8e, transform=null, part=null, incMeta=false, metrics=GridCacheQueryMetricsAdapter [minTime=9223372036854775807, maxTime=0, sumTime=0, avgTime=0.0, execs=0, completed=0, fails=0], pageSize=1024, timeout=0, keepAll=true, incBackups=false, dedup=false, prj=null, keepBinary=false, subjId=ae95174d-ff1c-44b2-a7dc-24fab738729e, taskHash=0], rdc=null, trans=null]
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter.checkError(GridCacheQueryFutureAdapter.java:260)
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter.internalIterator(GridCacheQueryFutureAdapter.java:318)
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter.next(GridCacheQueryFutureAdapter.java:164)
... 7 more
Caused by: class org.apache.ignite.IgniteCheckedException: Failed to execute query on node [query=GridCacheQueryBean [qry=GridCacheQueryAdapter [type=SCAN, clsName=null, clause=null, filter=ignite.MyIgniteBiPredicate#294a6b8e, transform=null, part=null, incMeta=false, metrics=GridCacheQueryMetricsAdapter [minTime=9223372036854775807, maxTime=0, sumTime=0, avgTime=0.0, execs=0, completed=0, fails=0], pageSize=1024, timeout=0, keepAll=true, incBackups=false, dedup=false, prj=null, keepBinary=false, subjId=ae95174d-ff1c-44b2-a7dc-24fab738729e, taskHash=0], rdc=null, trans=null], nodeId=366435c6-5fca-43dc-b1f2-5ff2b0d3ee2d]
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryFutureAdapter.onPage(GridCacheQueryFutureAdapter.java:383)
at org.apache.ignite.internal.processors.cache.query.GridCacheDistributedQueryManager.processQueryResponse(GridCacheDistributedQueryManager.java:398)
at org.apache.ignite.internal.processors.cache.query.GridCacheDistributedQueryManager.access$000(GridCacheDistributedQueryManager.java:63)
at org.apache.ignite.internal.processors.cache.query.GridCacheDistributedQueryManager$1.apply(GridCacheDistributedQueryManager.java:93)
at org.apache.ignite.internal.processors.cache.query.GridCacheDistributedQueryManager$1.apply(GridCacheDistributedQueryManager.java:91)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.processMessage(GridCacheIoManager.java:827)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.onMessage0(GridCacheIoManager.java:369)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.access$400(GridCacheIoManager.java:95)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager$OrderedMessageListener.onMessage(GridCacheIoManager.java:1345)
at org.apache.ignite.internal.managers.communication.GridIoManager.invokeListener(GridIoManager.java:1082)
at org.apache.ignite.internal.managers.communication.GridIoManager.access$1600(GridIoManager.java:102)
at org.apache.ignite.internal.managers.communication.GridIoManager$GridCommunicationMessageSet.unwind(GridIoManager.java:2332)
at org.apache.ignite.internal.managers.communication.GridIoManager.unwindMessageSet(GridIoManager.java:1042)
at org.apache.ignite.internal.managers.communication.GridIoManager.access$1900(GridIoManager.java:102)
at org.apache.ignite.internal.managers.communication.GridIoManager$6.run(GridIoManager.java:1011)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: class org.apache.ignite.IgniteCheckedException: ignite.MyIgniteBiPredicate
at org.apache.ignite.internal.util.IgniteUtils.unmarshal(IgniteUtils.java:9785)
at org.apache.ignite.internal.processors.cache.query.GridCacheQueryRequest.finishUnmarshal(GridCacheQueryRequest.java:322)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.unmarshall(GridCacheIoManager.java:1298)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.onMessage0(GridCacheIoManager.java:364)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.handleMessage(GridCacheIoManager.java:293)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager.access$000(GridCacheIoManager.java:95)
at org.apache.ignite.internal.processors.cache.GridCacheIoManager$1.onMessage(GridCacheIoManager.java:238)
at org.apache.ignite.internal.managers.communication.GridIoManager.invokeListener(GridIoManager.java:1082)
at org.apache.ignite.internal.managers.communication.GridIoManager.processRegularMessage0(GridIoManager.java:710)
at org.apache.ignite.internal.managers.communication.GridIoManager.access$1700(GridIoManager.java:102)
at org.apache.ignite.internal.managers.communication.GridIoManager$5.run(GridIoManager.java:673)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
... 1 more
Caused by: class org.apache.ignite.binary.BinaryInvalidTypeException: ignite.MyIgniteBiPredicate
at org.apache.ignite.internal.binary.BinaryContext.descriptorForTypeId(BinaryContext.java:689)
at org.apache.ignite.internal.binary.BinaryContext.descriptorForTypeId(BinaryContext.java:686)
at org.apache.ignite.internal.binary.BinaryReaderExImpl.deserialize0(BinaryReaderExImpl.java:1491)
at org.apache.ignite.internal.binary.BinaryReaderExImpl.deserialize(BinaryReaderExImpl.java:1450)
at org.apache.ignite.internal.binary.GridBinaryMarshaller.deserialize(GridBinaryMarshaller.java:298)
at org.apache.ignite.internal.binary.BinaryMarshaller.unmarshal0(BinaryMarshaller.java:100)
at org.apache.ignite.marshaller.AbstractNodeNameAwareMarshaller.unmarshal(AbstractNodeNameAwareMarshaller.java:82)
at org.apache.ignite.internal.util.IgniteUtils.unmarshal(IgniteUtils.java:9779)
... 13 more
Caused by: java.lang.ClassNotFoundException: ignite.MyIgniteBiPredicate
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:274)
at org.apache.ignite.internal.util.IgniteUtils.forName(IgniteUtils.java:8393)
at org.apache.ignite.internal.MarshallerContextAdapter.getClass(MarshallerContextAdapter.java:185)
at org.apache.ignite.internal.binary.BinaryContext.descriptorForTypeId(BinaryContext.java:680)
... 20 more

You need to deploy MyIgniteBiPredicate on server nodes. Create a JAR file with this class and put this JAR into IGNITE_HOME/libs folder prior to cluster startup.

Related

Testcontainers "JedisConnectionException: Could not get a resource from the pool" on Gitlab pipeline

I'm using Testcontainers for Redis cluster integration tests; though locally everything work as expected, but after creating a remote branch and pushing the code on Gitlab pipeline I'm getting the exception below. I have Cassandra and PostgreSQL that are working fine, but for Redis I'm getting the exception below.
Update:
previously with fixedExposePort the tests are working locally but now after removing the fixedExposePort even locally I'm getting the same error.
Caused by: redis.clients.jedis.exceptions.JedisClusterMaxAttemptsException: No more cluster attempts left.
at redis.clients.jedis.JedisClusterCommand.runWithRetries(JedisClusterCommand.java:156)
at redis.clients.jedis.JedisClusterCommand.runBinary(JedisClusterCommand.java:69)
at redis.clients.jedis.BinaryJedisCluster.setex(BinaryJedisCluster.java:496)
at redis.clients.jedis.commands.BinaryJedisClusterCommands.setex(BinaryJedisClusterCommands.java:74)
at org.springframework.data.redis.connection.jedis.JedisClusterStringCommands.setEx(JedisClusterStringCommands.java:175)
... 39 common frames omitted
Suppressed: redis.clients.jedis.exceptions.JedisConnectionException: Could not get a resource from the pool
at redis.clients.jedis.util.Pool.getResource(Pool.java:84)
at redis.clients.jedis.JedisPool.getResource(JedisPool.java:366)
at redis.clients.jedis.JedisSlotBasedConnectionHandler.getConnectionFromSlot(JedisSlotBasedConnectionHandler.java:129)
at redis.clients.jedis.JedisClusterCommand.runWithRetries(JedisClusterCommand.java:118)
... 43 common frames omitted
Caused by: redis.clients.jedis.exceptions.JedisConnectionException: Failed to create socket.
at redis.clients.jedis.DefaultJedisSocketFactory.createSocket(DefaultJedisSocketFactory.java:110)
at redis.clients.jedis.Connection.connect(Connection.java:226)
at redis.clients.jedis.BinaryClient.connect(BinaryClient.java:135)
at redis.clients.jedis.BinaryJedis.connect(BinaryJedis.java:309)
at redis.clients.jedis.BinaryJedis.initializeFromClientConfig(BinaryJedis.java:87)
at redis.clients.jedis.BinaryJedis.<init>(BinaryJedis.java:292)
at redis.clients.jedis.Jedis.<init>(Jedis.java:167)
at redis.clients.jedis.JedisFactory.makeObject(JedisFactory.java:177)
at org.apache.commons.pool2.impl.GenericObjectPool.create(GenericObjectPool.java:889)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:424)
at org.apache.commons.pool2.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:349)
at redis.clients.jedis.util.Pool.getResource(Pool.java:75)
... 46 common frames omitted
Caused by: java.net.ConnectException: Connection refused (Connection refused)
at java.base/java.net.PlainSocketImpl.socketConnect(Native Method)
at java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:412)
at java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:255)
at java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:237)
at java.base/java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.base/java.net.Socket.connect(Socket.java:609)
at redis.clients.jedis.DefaultJedisSocketFactory.createSocket(DefaultJedisSocketFactory.java:80)
... 57 common frames omitted
Here is the Testcontainers configuration.
public class RedisClusterContainer extends GenericContainer<RedisClusterContainer> {
public RedisClusterContainer() {
super("grokzen/redis-cluster:6.2.8");
withEnv("IP", "0.0.0.0");
addExposedPorts(7000, 7001, 7002, 7003, 7004, 7005);
}
public String getNodeAddress() {
return Stream.of(7000, 7001, 7002, 7003, 7004, 7005)
.map(port -> {
Integer mappedPort = getMappedPort(port);
return getHost() + ":" + mappedPort;
})
.collect(Collectors.joining(","));
}
}
I have now added two tests that create the container in different ways without the FixedExposedPort but non of them is working.
#Slf4j
#SpringBootTest
public class AbstractRedisClusterIT {
}
#Slf4j
class FirstRedisClusterIT extends AbstractRedisClusterIT {
#Autowired
private RedisTemplate<String, String> redisTemplate;
static {
final RedisClusterContainer redisClusterContainer = new RedisClusterContainer();
redisClusterContainer.start();
String redisNodes = redisClusterContainer.getNodeAddress();
log.info("Redis container started on nodes: {}", redisNodes);
System.setProperty("spring.redis.cluster.nodes", redisNodes);
}
#Test
void firstRedisTestContainerTest() {
redisTemplate.opsForValue().set("secondRedisKey", "secondRedisValue", 15, TimeUnit.MINUTES);
String result = redisTemplate.opsForValue().get("secondRedisKey");
assertThat(result).isEqualTo("secondRedisValue");
}
}
#Slf4j
class SecondRedisClusterIT extends AbstractRedisClusterIT {
#Autowired
private RedisTemplate<String, String> redisTemplate;
static {
final GenericContainer<?> genericContainer = new GenericContainer<>(DockerImageName.parse("grokzen/redis-cluster:6.2.8"))
.withEnv("IP", "0.0.0.0")
.withExposedPorts(7000, 7001, 7002, 7003, 7004, 7005);
genericContainer.start();
String redisNodes = Stream.of(7000, 7001, 7002, 7003, 7004, 7005)
.map(port -> {
Integer mappedPort = genericContainer.getMappedPort(port);
return genericContainer.getHost() + ":" + mappedPort;
})
.collect(Collectors.joining(","));
System.setProperty("spring.redis.cluster.nodes", redisNodes);
log.info("Redis container started on nodes: {}", redisNodes);
}
#Test
void secondRedisTestContainerTest() {
redisTemplate.opsForValue().set("firstRedisKey", "firstRedisValue", 15, TimeUnit.MINUTES);
String result = redisTemplate.opsForValue().get("firstRedisKey");
assertThat(result).isEqualTo("firstRedisValue");
}
}
Here is the connection factory configuration for the cluster.
#Slf4j
#Configuration
public class JedisConfiguration {
#Value("${spring.redis.cluster.nodes}")
private String redisClusterNodes;
#Value("${spring.redis.client-name:redis}")
private String clientName;
#Bean
#Primary
RedisConnectionFactory connectionFactory() {
log.info("Cluster nodes: {}", redisClusterNodes);
List<String> nodes = Arrays.stream(redisClusterNodes.split(",")).collect(toList());
RedisClusterConfiguration clusterConfig = new RedisClusterConfiguration(nodes);
JedisClientConfiguration clientConfig = JedisClientConfiguration.builder().clientName(clientName).usePooling().build();
return new JedisConnectionFactory(clusterConfig, clientConfig);
}
#Bean
RedisTemplate<String, String> redisTemplate(RedisConnectionFactory factory) {
return new StringRedisTemplate(factory);
}
}
In the pipeline logs I could see the containers started.
2022-12-15 14:14:19.804 INFO 87 --- [ Test worker] i.c.testenv.RedisContainerExtension : Starting Redis container
2022-12-15 14:14:19.814 INFO 87 --- [ Test worker] 🐳 [grokzen/redis-cluster:5.0.7] : Pulling docker image: grokzen/redis-cluster:5.0.7. Please be patient; this may take some time but only needs to be done once.
2022-12-15 14:14:20.170 INFO 87 --- [ream-2042455873] 🐳 [grokzen/redis-cluster:5.0.7] : Starting to pull image
................
2022-12-15 14:14:35.997 INFO 87 --- [ Test worker] 🐳 [grokzen/redis-cluster:5.0.7] : Creating container for image: grokzen/redis-cluster:5.0.7
2022-12-15 14:14:35.999 INFO 87 --- [ream-2042455873] 🐳 [grokzen/redis-cluster:5.0.7] : Pull complete. 17 layers, pulled in 15s (downloaded 176 MB at 11 MB/s)
2022-12-15 14:14:36.335 INFO 87 --- [ Test worker] 🐳 [grokzen/redis-cluster:5.0.7] : Container grokzen/redis-cluster:5.0.7 is starting: 47534017152ee0a974cf65d2030fbbab592da976a2d258613e5c27ad4b5b71e9
2022-12-15 14:14:39.206 INFO 87 --- [ Test worker] 🐳 [grokzen/redis-cluster:5.0.7] : Container grokzen/redis-cluster:5.0.7 started in PT19.39715S
Even though the container is started and getHost() returns docker as the host but I'm still getting the above exception, does anyone knows what's I am doing wrong?
An example based on this github project can be found below
The example has been created using start.spring.io
The trick is in the Config class, spring.data.redis.cluster.nodes property is set with host and random ports. However, the client tries to resolve random ports and the original ones, so, the customizer will help to redirect the original ones to the random ones.
#Testcontainers
#SpringBootTest
class SpringBootRedisApplicationTests {
#Container
private static final RedisClusterContainer redisCluster = new RedisClusterContainer();
#Autowired
private RedisTemplate<String, String> redisTemplate;
#DynamicPropertySource
static void properties(DynamicPropertyRegistry registry) {
registry.add("spring.data.redis.cluster.nodes", redisCluster::getNodeAddress);
}
#Test
void contextLoads() {
this.redisTemplate.opsForValue().set("redisKey", "redisValue");
String result = redisTemplate.opsForValue().get("redisKey");
assertThat(result).isEqualTo("redisValue");
}
static class RedisClusterContainer extends GenericContainer<RedisClusterContainer> {
public RedisClusterContainer() {
super("grokzen/redis-cluster:6.2.8");
withEnv("IP", "0.0.0.0");
addExposedPorts(7000, 7001, 7002, 7003, 7004, 7005);
waitStrategy = Wait.forLogMessage(".*Cluster state changed: ok*\\n", 6);
}
public String getNodeAddress() {
return Stream.of(7000, 7001, 7002, 7003, 7004, 7005)
.map(port -> {
Integer mappedPort = getMappedPort(port);
return getHost() + ":" + mappedPort;
})
.collect(Collectors.joining(","));
}
public Map<Integer, Integer > ports() {
return Map.of(7000, getMappedPort(7000),
7001, getMappedPort(7001),
7002, getMappedPort(7002),
7003, getMappedPort(7003),
7004, getMappedPort(7004),
7005, getMappedPort(7005));
}
}
#TestConfiguration
static class Config {
#Bean
ClientResourcesBuilderCustomizer customizer() {
return builder -> {
Function<HostAndPort, HostAndPort> mappingFn = hostAndPort -> {
if (redisCluster.ports().containsKey(hostAndPort.getPort())) {
Integer mappedPort = redisCluster.ports().get(hostAndPort.getPort());
return HostAndPort.of(hostAndPort.getHostText(), mappedPort);
}
return hostAndPort;
};
builder.socketAddressResolver(MappingSocketAddressResolver.create(mappingFn));
};
}
}
}

Cassandra failure during read query at consistency QUORUM - ReadFailureException

I have a simple scala/java program to demo Cassandra java API.
I have a simple UDT class Address which is used in class User. For some reason userMapper.get(userId) fails with no clear error message.
Code is part of scala project.
Runner code (java):
void exp02() {
log.debug("JAVA -- exp02");
Cluster cluster = null;
try {
CodecRegistry codecRegistry = new CodecRegistry();
cluster = Cluster.builder() // (1)
.withCodecRegistry(codecRegistry)
.addContactPoint("127.0.0.1")
.build();
log.debug("connect...exp02");
Session session = cluster.connect(); // (2)
MappingManager manager = new MappingManager(session);
Mapper<User> userMapper = manager.mapper(User.class);
// For some reason this will break
{
log.debug("create user *********************** isClosed: " + cluster.isClosed());
log.debug("get users");
ResultSet results = session.execute("SELECT * FROM cTest.user;");
Result<User> user = userMapper.map(results);
for (User u : user) {
log.debug("User : " + u);
}
log.debug("Users printed");
UUID userId = UUID.fromString("567378a9-8533-4d1c-80a8-71bf4b77189e");
User u2 = userMapper.get(userId); // <<<--- This line throws exception, (JRunner.java:67)
log.debug("Select user = " + u2);
}
} catch (RuntimeException e) {
log.error("Exception: " + e);
e.printStackTrace();
} finally {
log.debug("close...exp02");
if (cluster != null) cluster.close(); // (5)
}
}
Main (scala):
package com.example.crunner
import org.slf4j.{Logger, LoggerFactory}
object MainRunner {
val log: Logger = LoggerFactory.getLogger(getClass())
def main(args: Array[String]): Unit = {
val jrunner = new JRunner()
jrunner.exp02()
}
}
User class (java):
package com.example.crunner;
import java.util.UUID;
import com.datastax.driver.mapping.annotations.Column;
import com.datastax.driver.mapping.annotations.PartitionKey;
import com.datastax.driver.mapping.annotations.Table;
#Table(keyspace = "cTest", name = "user",
readConsistency = "QUORUM",
writeConsistency = "QUORUM"
// caseSensitiveKeyspace = false,
// caseSensitiveTable = false
)
public class User {
#PartitionKey
#Column(name = "user_id")
private UUID userId;
private String name;
private Address address;
public User(UUID userId, String name, Address address) {
this.userId = userId;
this.name = name;
this.address = address;
}
public User() { address = new Address(); }
public UUID getUserId() {
return userId;
}
public void setUserId(UUID userId) {
this.userId = userId;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
#Override
public String toString() {
return "User{" +
"userId=" + userId +
", name='" + name + '\'' +
", address=" + address +
'}';
}
}
UDT Address class (java)
package com.example.crunner;
import com.datastax.driver.mapping.annotations.Field;
import com.datastax.driver.mapping.annotations.UDT;
#UDT(keyspace = "cTest", name = "addressT") //, caseSensitiveType = true)
public class Address {
private String street;
private int zipCode;
public Address(String street, int zipCode) {
this.street = street;
this.zipCode = zipCode;
}
public Address() {
}
public String getStreet() {
return street;
}
public void setStreet(String street) {
this.street = street;
}
public int getZipCode() {
return zipCode;
}
public void setZipCode(int zipCode) {
this.zipCode = zipCode;
}
#Override
public String toString() {
return "Address{" +
"street='" + street + '\'' +
", zipCode=" + zipCode +
'}';
}
}
CQL (other tables not included here):
CREATE TYPE ctest.addresst (
street text,
zipcode int
);
CREATE TABLE ctest.user (
user_id uuid PRIMARY KEY,
address addresst,
name text
) WITH bloom_filter_fp_chance = 0.01
AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
AND comment = ''
AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'}
AND compression = {'chunk_length_in_kb': '64', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'}
AND crc_check_chance = 1.0
AND dclocal_read_repair_chance = 0.1
AND default_time_to_live = 0
AND gc_grace_seconds = 864000
AND max_index_interval = 2048
AND memtable_flush_period_in_ms = 0
AND min_index_interval = 128
AND read_repair_chance = 0.0
AND speculative_retry = '99PERCENTILE';
build.sbt
name := "CassJExp2"
version := "0.1-SNAPSHOT"
scalaVersion := "2.11.9"
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
val cassandraVersion = "3.2.0"
val logbackVersion = "1.2.3"
libraryDependencies ++= Seq(
"ch.qos.logback" % "logback-classic" % logbackVersion withSources() withJavadoc(), //
"ch.qos.logback" % "logback-core" % logbackVersion withSources() withJavadoc(), //
"ch.qos.logback" % "logback-access" % logbackVersion withSources() withJavadoc(), //
"org.slf4j" % "slf4j-api" % "1.7.25" withSources() withJavadoc(), //
"joda-time" % "joda-time" % "2.9.9" withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-core" % cassandraVersion withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-mapping" % cassandraVersion withSources() withJavadoc(), //
"com.datastax.cassandra" % "cassandra-driver-extras" % cassandraVersion withSources() withJavadoc() //
)
scalacOptions += "-deprecation"
When I run this code on sbt console, I get following output:
18:08:41.447 [run-main-f] DEBUG com.example.crunner.JRunner - JAVA -- exp02
18:08:41.497 [run-main-f] INFO c.d.driver.core.GuavaCompatibility - Detected Guava >= 19 in the classpath, using modern compatibility layer
18:08:41.634 [run-main-f] INFO c.datastax.driver.core.ClockFactory - Using native clock to generate timestamps.
18:08:41.644 [run-main-f] DEBUG com.example.crunner.JRunner - connect...exp02
18:08:41.674 [run-main-f] INFO com.datastax.driver.core.NettyUtil - Did not find Netty's native epoll transport in the classpath, defaulting to NIO.
18:08:42.049 [run-main-f] INFO c.d.d.c.p.DCAwareRoundRobinPolicy - Using data-center name 'datacenter1' for DCAwareRoundRobinPolicy (if this is incorrect, please provide the correct datacenter name with DCAwareRoundRobinPolicy constructor)
18:08:42.051 [run-main-f] INFO com.datastax.driver.core.Cluster - New Cassandra host /127.0.0.1:9042 added
18:08:42.107 [run-main-f] DEBUG com.example.crunner.JRunner - create user *********************** isClosed: false
18:08:42.108 [run-main-f] DEBUG com.example.crunner.JRunner - get users
18:08:42.139 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=54cbad6e-3f27-4b7e-bce0-8a4a4fbffbdf, name='John Doe', address=Address{street='street', zipCode=512}}
18:08:42.139 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=6122b896-8b28-448d-ac5c-4bc9b5c7c7ab, name='John Doe', address=Address{street='street', zipCode=512}}
... output truncated here, table contains about 150 rows ...
18:08:42.175 [run-main-f] DEBUG com.example.crunner.JRunner - User : User{userId=44f69277-ff97-4ba2-9216-bdf65eccd7c3, name='John Doe', address=Address{street='street', zipCode=512}}
18:08:42.175 [run-main-f] DEBUG com.example.crunner.JRunner - Users printed
18:08:42.203 [run-main-f] ERROR com.example.crunner.JRunner - Exception: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:130)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:30)
at com.datastax.driver.mapping.DriverThrowables.propagateCause(DriverThrowables.java:41)
at com.datastax.driver.mapping.Mapper.get(Mapper.java:435)
at com.example.crunner.JRunner.exp02(JRunner.java:67)
at com.example.crunner.MainRunner$.main(MainRunner.scala:18)
at com.example.crunner.MainRunner.main(MainRunner.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at sbt.Run.invokeMain(Run.scala:67)
at sbt.Run.run0(Run.scala:61)
at sbt.Run.sbt$Run$$execute$1(Run.scala:51)
at sbt.Run$$anonfun$run$1.apply$mcV$sp(Run.scala:55)
at sbt.Run$$anonfun$run$1.apply(Run.scala:55)
at sbt.Run$$anonfun$run$1.apply(Run.scala:55)
at sbt.Logger$$anon$4.apply(Logger.scala:84)
at sbt.TrapExit$App.run(TrapExit.scala:248)
at java.lang.Thread.run(Thread.java:745)
Caused by: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.exceptions.ReadFailureException.copy(ReadFailureException.java:142)
at com.datastax.driver.core.Responses$Error.asException(Responses.java:140)
at com.datastax.driver.core.DefaultResultSetFuture.onSet(DefaultResultSetFuture.java:179)
at com.datastax.driver.core.RequestHandler.setFinalResult(RequestHandler.java:179)
at com.datastax.driver.core.RequestHandler.access$2400(RequestHandler.java:49)
at com.datastax.driver.core.RequestHandler$SpeculativeExecution.setFinalResult(RequestHandler.java:799)
at com.datastax.driver.core.RequestHandler$SpeculativeExecution.onSet(RequestHandler.java:633)
at com.datastax.driver.core.Connection$Dispatcher.channelRead0(Connection.java:1075)
at com.datastax.driver.core.Connection$Dispatcher.channelRead0(Connection.java:998)
at io.netty.channel.SimpleChannelInboundHandler.channelRead(SimpleChannelInboundHandler.java:105)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:287)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:102)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:293)
at io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:267)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:336)
at io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1294)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:357)
at io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:343)
at io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:911)
at io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:131)
at io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:643)
at io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:566)
at io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:480)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:442)
at io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:131)
at io.netty.util.concurrent.DefaultThreadFactory$DefaultRunnableDecorator.run(DefaultThreadFactory.java:144)
... 1 more
Caused by: com.datastax.driver.core.exceptions.ReadFailureException: Cassandra failure during read query at consistency QUORUM (1 responses were required but only 0 replica responded, 1 failed)
at com.datastax.driver.core.Responses$Error$1.decode(Responses.java:88)
at com.datastax.driver.core.Responses$Error$1.decode(Responses.java:38)
at com.datastax.driver.core.Message$ProtocolDecoder.decode(Message.java:289)
at com.datastax.driver.core.Message$ProtocolDecoder.decode(Message.java:269)
at io.netty.handler.codec.MessageToMessageDecoder.channelRead(MessageToMessageDecoder.java:88)
... 20 more
18:08:42.205 [run-main-f] DEBUG com.example.crunner.JRunner - close...exp02
[success] Total time: 4 s, completed Apr 18, 2017 6:08:45 PM
At the same time I get the following error message into /var/log/cassandra/system.log:
WARN [ReadStage-2] 2017-04-18 18:08:42,202 AbstractLocalAwareExecutorService.java:169 - Uncaught exception on thread Thread[ReadStage-2,10,main]: {}
java.lang.AssertionError: null
at org.apache.cassandra.db.rows.BTreeRow.getCell(BTreeRow.java:212) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.canRemoveRow(SinglePartitionReadCommand.java:895) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.reduceFilter(SinglePartitionReadCommand.java:859) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndSSTablesInTimestampOrder(SinglePartitionReadCommand.java:744) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndDiskInternal(SinglePartitionReadCommand.java:515) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryMemtableAndDisk(SinglePartitionReadCommand.java:492) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.SinglePartitionReadCommand.queryStorage(SinglePartitionReadCommand.java:358) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.db.ReadCommand.executeLocally(ReadCommand.java:397) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.service.StorageProxy$LocalReadRunnable.runMayThrow(StorageProxy.java:1801) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:2486) ~[apache-cassandra-3.9.jar:3.9]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) ~[na:1.8.0_121]
at org.apache.cassandra.concurrent.AbstractLocalAwareExecutorService$FutureTask.run(AbstractLocalAwareExecutorService.java:164) ~[apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.concurrent.AbstractLocalAwareExecutorService$LocalSessionFutureTask.run(AbstractLocalAwareExecutorService.java:136) [apache-cassandra-3.9.jar:3.9]
at org.apache.cassandra.concurrent.SEPWorker.run(SEPWorker.java:109) [apache-cassandra-3.9.jar:3.9]
at java.lang.Thread.run(Thread.java:745) [na:1.8.0_121]
Cassandra version is [cqlsh 5.0.1 | Cassandra 3.9 | CQL spec 3.4.2 | Native protocol v4]
So userMapper can map ResultSet of users but getting a single user will fail. The userId I try to fetch exists in the user table. It is also possible to save a new user into db using the userMapper without failure.
I don't know if this is somehow related to having a UDT Address in User class. Tables / mappers without UDT classes are working fine.
EDIT:
As Marko Švaljek suggested I tried the query at command line:
cqlsh> SELECT * FROM cTest.user where user_id=567378a9-8533-4d1c-80a8-71bf4b77189e;
ReadFailure: Error from server: code=1300 [Replica(s) failed to execute read] message="Operation failed - received 0 responses and 1 failures" info={'failures': 1, 'received_responses': 0, 'required_responses': 1, 'consistency': 'ONE'}
Looks like same error than with java client.
SELECT * FROM cTest.user works fine.
EDIT 2:
This is single instance environment.
nodetool status
Datacenter: datacenter1
=======================
Status=Up/Down
|/ State=Normal/Leaving/Joining/Moving
-- Address Load Tokens Owns Host ID Rack
UN 127.0.0.1 354.4 KiB 256 ? 33490146-da36-4359-bb24-42854bdb3c26 rack1
Note: Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless
What's the reason for this error and how to fix it? Thank you for your support.

Catch user exception in remote service at caller level

I am running multiple services in an Ignite cluster which depend on each other.
I'd like to catch (user defined) exceptions at caller level when I call a remote service function. See example based on the Service example in the docs for 1.7.
MyUserException.java
package com.example.testing;
public class MyUserException extends Throwable {}
MyCounterService.java
package com.example.testing;
public interface MyCounterService {
int increment() throws MyUserException;
}
MyCounterServiceImpl.java (Error condition is ignite.cluster().forYoungest())
package com.example.testing;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteServices;
import org.apache.ignite.Ignition;
import org.apache.ignite.resources.IgniteInstanceResource;
import org.apache.ignite.services.Service;
import org.apache.ignite.services.ServiceContext;
public class MyCounterServiceImpl implements MyCounterService, Service {
#IgniteInstanceResource
private Ignite ignite;
private int value = 0;
public int increment() throws MyUserException {
if ((value % 2) == 0) {
throw new MyUserException();
} else {
value++;
}
return value;
}
public static void main(String [] args) {
Ignite ignite = Ignition.start();
IgniteServices svcs = ignite.services(ignite.cluster().forYoungest());
svcs.deployNodeSingleton("MyCounterService", new MyCounterServiceImpl());
}
#Override
public void cancel(ServiceContext ctx) {
System.out.println("Service cancelled");
}
#Override
public void init(ServiceContext ctx) throws Exception {
System.out.println("Service initialized");
}
#Override
public void execute(ServiceContext ctx) throws Exception {
System.out.println("Service running");
}
}
MyCallerService.java
package com.example.testing;
import org.apache.ignite.Ignite;
import org.apache.ignite.IgniteException;
import org.apache.ignite.Ignition;
import org.apache.ignite.resources.IgniteInstanceResource;
import org.apache.ignite.services.Service;
import org.apache.ignite.services.ServiceContext;
public class MyCallerService implements Service {
#IgniteInstanceResource
private Ignite ignite;
private Boolean stopped;
public void run() {
stopped = false;
MyCounterService service = ignite.services().serviceProxy("MyCounterService", MyCounterService.class, false);
while (!stopped)
{
try {
Thread.sleep(500);
service.increment();
} catch (MyUserException e) {
System.out.println("Got exception");
//e.printStackTrace();
} catch (InterruptedException e) {
//e.printStackTrace();
}
catch (IgniteException e) {
System.out.println("Got critial exception");
// would print the actual user exception
//e.getCause().getCause().getCause().printStackTrace();
break;
}
}
}
public static void main(String [] args) {
Ignite ignite = Ignition.start();
ignite.services(ignite.cluster().forYoungest()).deployNodeSingleton("MyCallerService", new MyCallerService());
}
#Override
public void cancel(ServiceContext ctx) {
stopped = true;
}
#Override
public void init(ServiceContext ctx) throws Exception {
}
#Override
public void execute(ServiceContext ctx) throws Exception {
run();
}
}
The exception is not being catched at the caller level. Instead these exceptions show up in the console. How do I catch and handle the exceptions properly when a service function is called?
Output of MyCounterServiceImpl
[18:23:23] Ignite node started OK (id=c82df19c)
[18:23:23] Topology snapshot [ver=1, servers=1, clients=0, CPUs=4, heap=3.5GB]
Service initialized
Service running
[18:23:27] Topology snapshot [ver=2, servers=2, clients=0, CPUs=4, heap=7.0GB]
Nov 17, 2016 6:23:28 PM org.apache.ignite.logger.java.JavaLogger error
SCHWERWIEGEND: Failed to execute job [jobId=82580537851-3c0a354f-69b5-496c-af10-ee789a5387c3, ses=GridJobSessionImpl [ses=GridTaskSessionImpl [taskName=o.a.i.i.processors.service.GridServiceProxy$ServiceProxyCallable, dep=LocalDeployment [super=GridDeployment [ts=1479403401422, depMode=SHARED, clsLdr=sun.misc.Launcher$AppClassLoader#1d44bcfa, clsLdrId=4fe60537851-c82df19c-cdff-43ef-b7b6-e8485231629a, userVer=0, loc=true, sampleClsName=java.lang.String, pendingUndeploy=false, undeployed=false, usage=0]], taskClsName=o.a.i.i.processors.service.GridServiceProxy$ServiceProxyCallable, sesId=72580537851-3c0a354f-69b5-496c-af10-ee789a5387c3, startTime=1479403408961, endTime=9223372036854775807, taskNodeId=3c0a354f-69b5-496c-af10-ee789a5387c3, clsLdr=sun.misc.Launcher$AppClassLoader#1d44bcfa, closed=false, cpSpi=null, failSpi=null, loadSpi=null, usage=1, fullSup=false, subjId=3c0a354f-69b5-496c-af10-ee789a5387c3, mapFut=IgniteFuture [orig=GridFutureAdapter [resFlag=0, res=null, startTime=1479403408960, endTime=0, ignoreInterrupts=false, state=INIT]]], jobId=82580537851-3c0a354f-69b5-496c-af10-ee789a5387c3]]
class org.apache.ignite.IgniteException: null
at org.apache.ignite.internal.processors.closure.GridClosureProcessor$C2V2.execute(GridClosureProcessor.java:2009)
at org.apache.ignite.internal.processors.job.GridJobWorker$2.call(GridJobWorker.java:509)
at org.apache.ignite.internal.util.IgniteUtils.wrapThreadLoader(IgniteUtils.java:6521)
at org.apache.ignite.internal.processors.job.GridJobWorker.execute0(GridJobWorker.java:503)
at org.apache.ignite.internal.processors.job.GridJobWorker.body(GridJobWorker.java:456)
at org.apache.ignite.internal.util.worker.GridWorker.run(GridWorker.java:110)
at org.apache.ignite.internal.processors.job.GridJobProcessor.processJobExecuteRequest(GridJobProcessor.java:1161)
at org.apache.ignite.internal.processors.job.GridJobProcessor$JobExecutionListener.onMessage(GridJobProcessor.java:1766)
at org.apache.ignite.internal.managers.communication.GridIoManager.invokeListener(GridIoManager.java:1238)
at org.apache.ignite.internal.managers.communication.GridIoManager.processRegularMessage0(GridIoManager.java:866)
at org.apache.ignite.internal.managers.communication.GridIoManager.access$1700(GridIoManager.java:106)
at org.apache.ignite.internal.managers.communication.GridIoManager$5.run(GridIoManager.java:829)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ignite.internal.processors.service.GridServiceProxy$ServiceProxyCallable.call(GridServiceProxy.java:392)
at org.apache.ignite.internal.processors.closure.GridClosureProcessor$C2V2.execute(GridClosureProcessor.java:2006)
... 14 more
Caused by: com.example.testing.MyUserException
at com.example.testing.MyCounterServiceImpl.increment(MyCounterServiceImpl.java:19)
... 20 more
Output of MyCallerService
[18:23:28] Ignite node started OK (id=3c0a354f)
[18:23:28] Topology snapshot [ver=2, servers=2, clients=0, CPUs=4, heap=7.0GB]
Nov 17, 2016 6:23:28 PM org.apache.ignite.logger.java.JavaLogger error
SCHWERWIEGEND: Failed to obtain remote job result policy for result from ComputeTask.result(..) method (will fail the whole task): GridJobResultImpl [job=C2V2 [c=ServiceProxyCallable [mtdName=increment, svcName=MyCounterService, ignite=null]], sib=GridJobSiblingImpl [sesId=72580537851-3c0a354f-69b5-496c-af10-ee789a5387c3, jobId=82580537851-3c0a354f-69b5-496c-af10-ee789a5387c3, nodeId=c82df19c-cdff-43ef-b7b6-e8485231629a, isJobDone=false], jobCtx=GridJobContextImpl [jobId=82580537851-3c0a354f-69b5-496c-af10-ee789a5387c3, timeoutObj=null, attrs={}], node=TcpDiscoveryNode [id=c82df19c-cdff-43ef-b7b6-e8485231629a, addrs=[0:0:0:0:0:0:0:1%lo, 127.0.0.1, 172.18.22.52], sockAddrs=[/0:0:0:0:0:0:0:1%lo:47500, /127.0.0.1:47500, /172.18.22.52:47500], discPort=47500, order=1, intOrder=1, lastExchangeTime=1479403407847, loc=false, ver=1.7.0#20160801-sha1:383273e3, isClient=false], ex=class o.a.i.IgniteException: null, hasRes=true, isCancelled=false, isOccupied=true]
class org.apache.ignite.IgniteException: Remote job threw user exception (override or implement ComputeTask.result(..) method if you would like to have automatic failover for this exception).
at org.apache.ignite.compute.ComputeTaskAdapter.result(ComputeTaskAdapter.java:101)
at org.apache.ignite.internal.processors.task.GridTaskWorker$4.apply(GridTaskWorker.java:946)
at org.apache.ignite.internal.processors.task.GridTaskWorker$4.apply(GridTaskWorker.java:939)
at org.apache.ignite.internal.util.IgniteUtils.wrapThreadLoader(IgniteUtils.java:6553)
at org.apache.ignite.internal.processors.task.GridTaskWorker.result(GridTaskWorker.java:939)
at org.apache.ignite.internal.processors.task.GridTaskWorker.onResponse(GridTaskWorker.java:810)
at org.apache.ignite.internal.processors.task.GridTaskProcessor.processJobExecuteResponse(GridTaskProcessor.java:995)
at org.apache.ignite.internal.processors.task.GridTaskProcessor$JobMessageListener.onMessage(GridTaskProcessor.java:1220)
at org.apache.ignite.internal.managers.communication.GridIoManager.invokeListener(GridIoManager.java:1238)
at org.apache.ignite.internal.managers.communication.GridIoManager.processRegularMessage0(GridIoManager.java:866)
at org.apache.ignite.internal.managers.communication.GridIoManager.access$1700(GridIoManager.java:106)
at org.apache.ignite.internal.managers.communication.GridIoManager$5.run(GridIoManager.java:829)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: class org.apache.ignite.IgniteException: null
at org.apache.ignite.internal.processors.closure.GridClosureProcessor$C2V2.execute(GridClosureProcessor.java:2009)
at org.apache.ignite.internal.processors.job.GridJobWorker$2.call(GridJobWorker.java:509)
at org.apache.ignite.internal.util.IgniteUtils.wrapThreadLoader(IgniteUtils.java:6521)
at org.apache.ignite.internal.processors.job.GridJobWorker.execute0(GridJobWorker.java:503)
at org.apache.ignite.internal.processors.job.GridJobWorker.body(GridJobWorker.java:456)
at org.apache.ignite.internal.util.worker.GridWorker.run(GridWorker.java:110)
at org.apache.ignite.internal.processors.job.GridJobProcessor.processJobExecuteRequest(GridJobProcessor.java:1161)
at org.apache.ignite.internal.processors.job.GridJobProcessor$JobExecutionListener.onMessage(GridJobProcessor.java:1766)
... 7 more
Caused by: java.lang.reflect.InvocationTargetException
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.ignite.internal.processors.service.GridServiceProxy$ServiceProxyCallable.call(GridServiceProxy.java:392)
at org.apache.ignite.internal.processors.closure.GridClosureProcessor$C2V2.execute(GridClosureProcessor.java:2006)
... 14 more
Caused by: com.example.testing.MyUserException
at com.example.testing.MyCounterServiceImpl.increment(MyCounterServiceImpl.java:19)
... 20 more
Got critial exception
Apperently this is a bug that's to be resolved:
https://issues.apache.org/jira/browse/IGNITE-4298
i think exception must throw over to caller node. Could you please provide full code example? Also, so strange that on node which have service, was exception with null value.
UPD.
Could you please also add log, because fort me, all work as expected. I catched MyUserException, and have in log message "Got exception".

How to store multi-column data in Apache Geode?

I am new to Apache Geode and I am trying a sample program to store date like:
empid:col1:col2
1:10:15
I have written a sample program but at runtime its giving error like: "Error registering instantiator on pool:". If I go through logs I can see record has been inserted in regions but also at query time I am getting following error:
Result : false
startCount : 0
endCount : 20
Message : A ClassNotFoundException was thrown while trying to deserialize cached value.
sharing complete code.
DataEntry.java
package com.apache.geode;
import java.util.Map.Entry;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.client.ClientCache;
import com.gemstone.gemfire.cache.client.ClientCacheFactory;
import com.gemstone.gemfire.cache.client.ClientRegionShortcut;
import com.gemstone.gemfire.cache.query.FunctionDomainException;
import com.gemstone.gemfire.cache.query.NameResolutionException;
import com.gemstone.gemfire.cache.query.QueryInvocationTargetException;
import com.gemstone.gemfire.cache.query.TypeMismatchException;
public class DataEntry {
public static void main(String[] args) throws FunctionDomainException,TypeMismatchException, NameResolutionException, QueryInvocationTargetException {
ClientCache cache = new ClientCacheFactory().addPoolLocator(
"10.77.17.17", 10334).create();
Region<String, CustomerValue> customer = cache
.<String, CustomerValue> createClientRegionFactory(
ClientRegionShortcut.CACHING_PROXY)
.setValueConstraint(CustomerValue.class)
.setKeyConstraint(String.class).create("custRegion");
CustomerValue customerValue = new CustomerValue(10, 15);
customer.put("1", customerValue);
System.out.println("successfully Put customer object into the cache");
for (Entry<String, CustomerValue> entry : customer.entrySet()) {
System.out.format("key = %s, value = %s\n", entry.getKey(),
entry.getValue());
}
cache.close();
}
}
ConsumerValue.java
package com.apache.geode;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import com.gemstone.gemfire.DataSerializable;
import com.gemstone.gemfire.Instantiator;
public class CustomerValue implements DataSerializable{
private static final long serialVersionUID = -5524295054253565345L;
private int points_5A;
private int points_10A;
static {
Instantiator.register(new Instantiator(CustomerValue.class, 45) {
public DataSerializable newInstance() {
return new CustomerValue();
}
});
}
public CustomerValue()
{
}
public CustomerValue(int points_5A,int points_10A)
{
this.points_10A=points_10A;
this.points_5A=points_5A;
}
public int getPoints_5A() {
return points_5A;
}
public void setPoints_5A(int points_5a) {
points_5A = points_5a;
}
public int getPoints_10A() {
return points_10A;
}
public void setPoints_10A(int points_10a) {
points_10A = points_10a;
}
#Override
public String toString()
{
return "customer [ 5Apoints=" + points_5A +",10Apoints=" + points_10A +"]";
}
public void fromData(DataInput in) throws IOException {
this.points_5A=in.readInt();
this.points_10A=in.readInt();
}
public void toData(DataOutput io) throws IOException {
io.writeInt(points_5A);
io.writeInt(points_10A);
}
}
output logs:
[info 2015/08/13 14:28:23.452 UTC <main> tid=0x1] Running in local mode since mcast-port was 0 and locators was empty.
[info 2015/08/13 14:28:23.635 UTC <Thread-0 StatSampler> tid=0x9] Disabling statistic archival.
[config 2015/08/13 14:28:23.881 UTC <main> tid=0x1] Pool DEFAULT started with multiuser-authentication=false
[config 2015/08/13 14:28:23.938 UTC <poolTimer-DEFAULT-3> tid=0x13] Updating membership port. Port changed from 0 to 59,982.
[warning 2015/08/13 14:28:24.176 UTC <main> tid=0x1] Error registering instantiator on pool:
com.gemstone.gemfire.cache.client.ServerOperationException: : While performing a remote registerInstantiators
at com.gemstone.gemfire.cache.client.internal.AbstractOp.processAck(AbstractOp.java:257)
at com.gemstone.gemfire.cache.client.internal.RegisterInstantiatorsOp$RegisterInstantiatorsOpImpl.processResponse(RegisterInstantiatorsOp.java:140)
at com.gemstone.gemfire.cache.client.internal.AbstractOp.processResponse(AbstractOp.java:219)
at com.gemstone.gemfire.cache.client.internal.AbstractOp.attemptReadResponse(AbstractOp.java:167)
at com.gemstone.gemfire.cache.client.internal.AbstractOp.attempt(AbstractOp.java:373)
at com.gemstone.gemfire.cache.client.internal.ConnectionImpl.execute(ConnectionImpl.java:261)
at com.gemstone.gemfire.cache.client.internal.pooling.PooledConnection.execute(PooledConnection.java:323)
at com.gemstone.gemfire.cache.client.internal.OpExecutorImpl.executeWithPossibleReAuthentication(OpExecutorImpl.java:932)
at com.gemstone.gemfire.cache.client.internal.OpExecutorImpl.execute(OpExecutorImpl.java:162)
at com.gemstone.gemfire.cache.client.internal.PoolImpl.execute(PoolImpl.java:660)
at com.gemstone.gemfire.cache.client.internal.RegisterInstantiatorsOp.execute(RegisterInstantiatorsOp.java:42)
at com.gemstone.gemfire.internal.cache.PoolManagerImpl.allPoolsRegisterInstantiator(PoolManagerImpl.java:219)
at com.gemstone.gemfire.internal.InternalInstantiator.sendRegistrationMessageToServers(InternalInstantiator.java:206)
at com.gemstone.gemfire.internal.InternalInstantiator._register(InternalInstantiator.java:161)
at com.gemstone.gemfire.internal.InternalInstantiator.register(InternalInstantiator.java:89)
at com.gemstone.gemfire.Instantiator.register(Instantiator.java:175)
at CustomerValue.<clinit>(CustomerValue.java:16)
at DataEntry.main(DataEntry.java:22)
Caused by: java.lang.ClassNotFoundException: CustomerValue$1
at com.gemstone.gemfire.internal.ClassPathLoader.forName (ClassPathLoader.java:422)
at com.gemstone.gemfire.internal.InternalDataSerializer.getCachedClass (InternalDataSerializer.java:4066)
at com.gemstone.gemfire.internal.cache.tier.sockets.command.RegisterInstantiators.cmdExecute(RegisterInstantiators.java:89)
at com.gemstone.gemfire.internal.cache.tier.sockets.BaseCommand.execute(BaseCommand.java:182)
at com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection.doNormalMsg(ServerConnection.java:787)
at com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection.doOneMessage(ServerConnection.java:914)
at com.gemstone.gemfire.internal.cache.tier.sockets.ServerConnection.run(ServerConnection.java:1159)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at com.gemstone.gemfire.internal.cache.tier.sockets.AcceptorImpl$1$1.run(AcceptorImpl.java:580)
at java.lang.Thread.run(Thread.java:745)
successfully Put customer object into the cache
key = 1, value = customer [ 5Apoints=10,10Apoints=15]
[info 2015/08/13 14:28:24.225 UTC <main> tid=0x1] GemFireCache[id = 712610161; isClosing = true; isShutDownAll = false; closingGatewayHubsByShutdownAll = false; created = Thu Aug 13 14:28:23 UTC 2015; server = false; copyOnRead = false; lockLease = 120; lockTimeout = 60]: Now closing.
[info 2015/08/13 14:28:24.277 UTC <main> tid=0x1] Resetting original MemoryPoolMXBean heap threshold bytes 0 on pool PS Old Gen
[config 2015/08/13 14:28:24.329 UTC <main> tid=0x1] Destroying connection pool DEFAULT
Your CustomerValue class needs to be on the server's classpath. Please refer to geode documentation on how to deploy jars to the server.

hazelcast mapper class not found

I`m trying to run map/reduce task on hazelcast 3.4, but I keep getting classs not found exception
I created several players and store them to IMap
this.conf = new ClientConfig();
HazelcastInstance cluster = HazelcastClient.newHazelcastClient(this.conf);
Map<String, Player> mapPlayers = cluster.getMap("players");
for (int playerID = 0; playerID < 10000; playerID++) {
Player p = new Player();
mapPlayers.put(Integer.toString(playerID), p);
System.out.println("inserted player nuber " + Integer.toString(playerID));
}
after that I created map reduce class MRCount
public class MRCount {
private ClientConfig conf;
public MRCount() {
this.conf = new ClientConfig();
this.conf.getNetworkConfig();
}
public void getCount() throws ExecutionException, InterruptedException {
HazelcastInstance cluster = HazelcastClient.newHazelcastClient(this.conf);
IMap<String, Player> mapPlayers = cluster.getMap("players");
KeyValueSource<String, Player> source = KeyValueSource.fromMap(mapPlayers);
JobTracker jobTracker = cluster.getJobTracker("default");
Job<String, Player> job = jobTracker.newJob(source);
ICompletableFuture<Map<String, Integer>> future = job.mapper(new MyMapper())
.reducer(new MyReducerFactory()).submit();
// future.andThen(buildCallback());
Map<String, Integer> result = future.get();
for (Map.Entry<String, Integer> e: result.entrySet()) {
System.out.println(e.getKey() + ": " + Integer.toString(e.getValue()) );
}
cluster.shutdown();
}
}
mapper class:
public class MyMapper implements Mapper<String, Player, String, Integer> {
#Override
public void map(String key, Player value, Context<String, Integer> context) {
context.emit("total", 1);
}
}
public class MyReducerFactory implements ReducerFactory<String, Integer, Integer>{
#Override
public Reducer<Integer, Integer> newReducer(String key) {
return new MyReducer();
}
private class MyReducer extends Reducer<Integer, Integer> {
private volatile Integer result = 0;
#Override
public void reduce(Integer value) {
result += value;
}
#Override
public Integer finalizeReduce() {
return result;
}
}
}
I builded my project with maven. when I start the import functions everything goes well and I`ve got players stored in HZ cluster, but when i start the MRCount.getCount() function I keep getting HazelcastSerializationException
Exception in thread "main" java.util.concurrent.ExecutionException: com.hazelcast.nio.serialization.HazelcastSerializationException: java.lang.ClassNotFoundException: com.test.queries.models.MyMapper
at com.hazelcast.client.spi.impl.ClientCallFuture.resolveResponse(ClientCallFuture.java:214)
at com.hazelcast.client.spi.impl.ClientCallFuture.access$000(ClientCallFuture.java:53)
at com.hazelcast.client.spi.impl.ClientCallFuture$1.run(ClientCallFuture.java:286)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
at com.hazelcast.util.executor.HazelcastManagedThread.executeRun(HazelcastManagedThread.java:76)
at com.hazelcast.util.executor.HazelcastManagedThread.run(HazelcastManagedThread.java:92)
Caused by: com.hazelcast.nio.serialization.HazelcastSerializationException: java.lang.ClassNotFoundException: com.test.queries.models.MyMapper
at com.hazelcast.nio.serialization.DefaultSerializers$ObjectSerializer.read(DefaultSerializers.java:201)
at com.hazelcast.nio.serialization.StreamSerializerAdapter.read(StreamSerializerAdapter.java:44)
at com.hazelcast.nio.serialization.SerializationServiceImpl.readObject(SerializationServiceImpl.java:309)
at com.hazelcast.nio.serialization.ByteArrayObjectDataInput.readObject(ByteArrayObjectDataInput.java:439)
at com.hazelcast.mapreduce.impl.client.ClientMapReduceRequest.readData(ClientMapReduceRequest.java:226)
at com.hazelcast.mapreduce.impl.client.ClientMapReduceRequest.read(ClientMapReduceRequest.java:181)
at com.hazelcast.client.impl.client.ClientRequest.readPortable(ClientRequest.java:116)
at com.hazelcast.nio.serialization.PortableSerializer.read(PortableSerializer.java:88)
at com.hazelcast.nio.serialization.PortableSerializer.read(PortableSerializer.java:30)
at com.hazelcast.nio.serialization.StreamSerializerAdapter.toObject(StreamSerializerAdapter.java:65)
at com.hazelcast.nio.serialization.SerializationServiceImpl.toObject(SerializationServiceImpl.java:260)
at com.hazelcast.client.impl.ClientEngineImpl$ClientPacketProcessor.loadRequest(ClientEngineImpl.java:364)
at com.hazelcast.client.impl.ClientEngineImpl$ClientPacketProcessor.run(ClientEngineImpl.java:340)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
at com.hazelcast.util.executor.HazelcastManagedThread.executeRun(HazelcastManagedThread.java:76)
at com.hazelcast.util.executor.HazelcastManagedThread.run(HazelcastManagedThread.java:92)
at ------ End remote and begin local stack-trace ------.(Unknown Source)
at com.hazelcast.client.spi.impl.ClientCallFuture.resolveResponse(ClientCallFuture.java:201)
... 7 more
Caused by: java.lang.ClassNotFoundException: com.test.queries.models.MyMapper
at java.net.URLClassLoader$1.run(URLClassLoader.java:366)
at java.net.URLClassLoader$1.run(URLClassLoader.java:355)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
at com.hazelcast.nio.ClassLoaderUtil.tryLoadClass(ClassLoaderUtil.java:124)
at com.hazelcast.nio.ClassLoaderUtil.loadClass(ClassLoaderUtil.java:113)
at com.hazelcast.nio.IOUtil$1.resolveClass(IOUtil.java:113)
at java.io.ObjectInputStream.readNonProxyDesc(ObjectInputStream.java:1612)
at java.io.ObjectInputStream.readClassDesc(ObjectInputStream.java:1517)
at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1771)
at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
at com.hazelcast.nio.serialization.DefaultSerializers$ObjectSerializer.read(DefaultSerializers.java:196)
at com.hazelcast.nio.serialization.StreamSerializerAdapter.read(StreamSerializerAdapter.java:44)
at com.hazelcast.nio.serialization.SerializationServiceImpl.readObject(SerializationServiceImpl.java:309)
at com.hazelcast.nio.serialization.ByteArrayObjectDataInput.readObject(ByteArrayObjectDataInput.java:439)
at com.hazelcast.mapreduce.impl.client.ClientMapReduceRequest.readData(ClientMapReduceRequest.java:226)
at com.hazelcast.mapreduce.impl.client.ClientMapReduceRequest.read(ClientMapReduceRequest.java:181)
at com.hazelcast.client.impl.client.ClientRequest.readPortable(ClientRequest.java:116)
at com.hazelcast.nio.serialization.PortableSerializer.read(PortableSerializer.java:88)
at com.hazelcast.nio.serialization.PortableSerializer.read(PortableSerializer.java:30)
at com.hazelcast.nio.serialization.StreamSerializerAdapter.toObject(StreamSerializerAdapter.java:65)
at com.hazelcast.nio.serialization.SerializationServiceImpl.toObject(SerializationServiceImpl.java:260)
at com.hazelcast.client.impl.ClientEngineImpl$ClientPacketProcessor.loadRequest(ClientEngineImpl.java:364)
at com.hazelcast.client.impl.ClientEngineImpl$ClientPacketProcessor.run(ClientEngineImpl.java:340)
... 5 more
You have to deploy all the classes on the server side as well.
First, create custom Jar file with com.test.queries.models.MyMapper class and place it Hazelcast lib folder. Then, add the following classpath entry in server .bat file and restart.
set CLASSPATH=%~dp0..\lib\hazelcast-all-3.5.2.jar
set CLASSPATH=%CLASSPATH%;..\lib\Custom-0.0.1-SNAPSHOT.jar

Categories