import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
Import org.apache.hadoop.hbase.client.HBaseAdmin;
public class HBaseConnection
{
public static void main(String[] args) throws IOException
{
HBaseConfiguration hc = new HBaseConfiguration(new Configuration());
HTableDescriptor ht = new HTableDescriptor("guru99");
ht.addFamily( new HColumnDescriptor("education"));
ht.addFamily( new HColumnDescriptor("projects"));
System.out.println( "connecting" );
HBaseAdmin hba = new HBaseAdmin( hc );
System.out.println( "Creating Table" );
hba.createTable( ht );
System.out.println("Done......");
}
}
The above is my java code that I'm using to connect my hbase with my java api, but I get an error as
Exception in thread "main" java.lang.Error: Unresolved compilation
error
I cleaned the project and tried running it again, I have added all the external jar files that HBase has, by the im using HBase in a pseudo distribution mode with hadoop, and at the top of my eclipse I also get an error as
The type com.google.com.protobuf.GeneratedMessage$Builder cannot be resolved. It is indirectly referenced from required .class files
Related
I am trying to run a Drools app, my ecplise got corrupted, I reinstalled, reloaded drools, jbpm, maven and I can not figure out why I get this error in every drools app I run. Even working demos from github
Exception in thread "main" java.lang.ExceptionInInitializerError
at org.drools.compiler.rule.builder.dialect.asm.InvokerGenerator.createStubGenerator(InvokerGenerator.java:34)
Sample Code: (Confirmed worked before I had to reinstall)
package com.jenn.DroolsDemo;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import org.drools.compiler.compiler.DroolsParserException;
import org.drools.compiler.compiler.PackageBuilder;
import org.drools.compiler.rule.builder.dialect.*;
import org.drools.core.RuleBase;
import org.drools.core.RuleBaseFactory;
import org.drools.core.WorkingMemory;
/**
*
* #author Binod Suman
* Binod Suman Academy YouTube
*
*/
public class DemoTest {
public static void main(String[] args) throws DroolsParserException, IOException {
DemoTest client = new DemoTest();
client.execteRule();
}
public void execteRule() throws DroolsParserException, IOException{
PackageBuilder builder = new PackageBuilder();
String ruleFile = "/offers.drl";
InputStream resourceAsStream = getClass().getResourceAsStream(ruleFile);
Reader ruleReader = new InputStreamReader(resourceAsStream);
builder.addPackageFromDrl(ruleReader);
org.drools.core.rule.Package rulePackage = builder.getPackage();
RuleBase ruleBase = RuleBaseFactory.newRuleBase();
ruleBase.addPackage(rulePackage);
WorkingMemory workingMemory = ruleBase.newStatefulSession();
PaymentOffer paymentOffer = new PaymentOffer();
paymentOffer.setChannel("paytm");
workingMemory.insert(paymentOffer);
workingMemory.fireAllRules();
System.out.println("The cashback for this payment channel "+paymentOffer.getChannel()+" is "+paymentOffer.getDiscount());
}
}
After some research and the post above from Roddy it is confirmed my development partner is using very old drools core. I am working to get that upgraded in the code.
I get error: incompatible types: org.eclipse.jetty.servlet.ServletHandler cannot be converted to org.mortbay.jetty.Handler
While trying to run my below code. I'm new to Java and not sure why this is happening. Any ideas? (I'm using JDK 11 and the latest Jetty versions 9.3 and IDE IntelliJ)
package newJetty;
import newJetty.handler.PingHandler;
import org.eclipse.jetty.servlet.ServletHandler;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.Server;
/**
* Hello world!
*
*/
public class JettyServer
{
public static void main( String[] args ) throws Exception {
Server server = new Server(8080);
ServletHandler handler = new ServletHandler();
handler.addServletWithMapping(PingHandler.class, "/ping");
server.setHandler(handler);
//
server.start();
server.join();
}
}
You are importing the wrong classes.
Remove the imports:
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.Server;
and change to the following imports:
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.Handler;
I am building a gradle java project (please refer below) using Apache Beam code and executing on Eclipse Oxygen.
package com.xxxx.beam;
import java.io.IOException;
import org.apache.beam.runners.spark.SparkContextOptions;
import org.apache.beam.runners.spark.SparkPipelineResult;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.PipelineRunner;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.MapElements;
import org.apache.beam.sdk.transforms.SimpleFunction;
import org.apache.beam.sdk.values.KV;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.beam.sdk.io.FileIO;
import org.apache.beam.sdk.io.FileIO.ReadableFile;
public class ApacheBeamTestProject {
public void modelExecution(){
SparkContextOptions options = (SparkContextOptions) PipelineOptionsFactory.create();
options.setSparkMaster("xxxxxxxxx");
JavaSparkContext sc = options.getProvidedSparkContext();
JavaLinearRegressionWithSGDExample.runJavaLinearRegressionWithSGDExample(sc);
Pipeline p = Pipeline.create(options);
p.apply(FileIO.match().filepattern("hdfs://path/to/*.gz"))
// withCompression can be omitted - by default compression is detected from the filename.
.apply(FileIO.readMatches())
.apply(MapElements
// uses imports from TypeDescriptors
.via(
new SimpleFunction <ReadableFile, KV<String,String>>() {
private static final long serialVersionUID = -5715607038612883677L;
#SuppressWarnings("unused")
public KV<String,String> createKV(ReadableFile f) {
String temp = null;
try{
temp = f.readFullyAsUTF8String();
}catch(IOException e){
}
return KV.of(f.getMetadata().resourceId().toString(), temp);
}
}
))
.apply(FileIO.write())
;
SparkPipelineResult result = (SparkPipelineResult) p.run();
result.getState();
}
public static void main(String[] args) throws IOException {
System.out.println("Test log");
PipelineOptions options = PipelineOptionsFactory.create();
Pipeline p = Pipeline.create(options);
p.apply(FileIO.match().filepattern("hdfs://path/to/*.gz"))
// withCompression can be omitted - by default compression is detected from the filename.
.apply(FileIO.readMatches())
.apply(MapElements
// uses imports from TypeDescriptors
.via(
new SimpleFunction <ReadableFile, KV<String,String>>() {
private static final long serialVersionUID = -5715607038612883677L;
#SuppressWarnings("unused")
public KV<String,String> createKV(ReadableFile f) {
String temp = null;
try{
temp = f.readFullyAsUTF8String();
}catch(IOException e){
}
return KV.of(f.getMetadata().resourceId().toString(), temp);
}
}
))
.apply(FileIO.write());
p.run();
}
}
I am observing the following error when executing this project in Eclipse.
Test log
Exception in thread "main" java.lang.IllegalArgumentException: No Runner was specified and the DirectRunner was not found on the classpath.
Specify a runner by either:
Explicitly specifying a runner by providing the 'runner' property
Adding the DirectRunner to the classpath
Calling 'PipelineOptions.setRunner(PipelineRunner)' directly
at org.apache.beam.sdk.options.PipelineOptions$DirectRunner.create(PipelineOptions.java:291)
at org.apache.beam.sdk.options.PipelineOptions$DirectRunner.create(PipelineOptions.java:281)
at org.apache.beam.sdk.options.ProxyInvocationHandler.returnDefaultHelper(ProxyInvocationHandler.java:591)
at org.apache.beam.sdk.options.ProxyInvocationHandler.getDefault(ProxyInvocationHandler.java:532)
at org.apache.beam.sdk.options.ProxyInvocationHandler.invoke(ProxyInvocationHandler.java:155)
at org.apache.beam.sdk.options.PipelineOptionsValidator.validate(PipelineOptionsValidator.java:95)
at org.apache.beam.sdk.options.PipelineOptionsValidator.validate(PipelineOptionsValidator.java:49)
at org.apache.beam.sdk.PipelineRunner.fromOptions(PipelineRunner.java:44)
at org.apache.beam.sdk.Pipeline.create(Pipeline.java:150)
This project doesn't contain pom.xml file. Gradle has setup for all the links.
I am not sure how to fix this error? Could someone advise?
It seems that you are trying to use the DirectRunner and it is not on the classpath of your application. You can supply it by adding beam-runners-direct-java dependency to your application:
https://mvnrepository.com/artifact/org.apache.beam/beam-runners-direct-java
EDIT (answered in comment): you are trying to run this code on spark, but didn't specify it in PipelineOptions. Beam by default tries to run the code on DirectRunner, so I think this is why you get this error. Specifying:
options.setRunner(SparkRunner.class); before creating the pipeline sets the correct runner and fixes the issue.
Downloading the beam-runners-direct-java-x.x.x.jar and adding it to the project classpath worked for me. Please refer to this maven repository to download the DirectRunner jar file.
Furthermore, if you need a specific beam runner for your project, you can pass the runner name as a program argument (eg: --runner=DataflowRunner) and add the corresponding jar to the project classpath.
I have a file
import java.io.IOException;
import java.nio.file.Paths;
import java.util.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.*;
import org.apache.hadoop.mapreduce.lib.output.*;
import org.apache.hadoop.util.*;
public class ViewCount extends Configured implements Tool {
public static void main(String args[]) throws Exception {
int res = ToolRunner.run(new ViewCount(), args);
System.exit(res);
}
public int run(String[] args) throws Exception {
//Path inputPath = new Path(args[0]);
Path inputPath = Paths.get("C:/WorkSpace/input.txt");
Path outputPath = Paths.get("C:/WorkSpace/output.txt");
Configuration conf = getConf();
Job job = new Job(conf, this.getClass().toString());
I try to run a the app in windows. How can I set inputPath and outputPath? The method I use now doesn't work. Before I had
Path inputPath = new Path(args[0]);
Path outputPath = new Path(args[1]);
and I had to go to the command line. Now I want to run the app from the IDE.
I'm getting
Required:
org.apache.hadoop.fs.Path
Found:
java.nio.file.Path
For Eclipse, you could set arguments :
Run -> run configuration -> arguments.
It should be the same in Intellij.
The error tells you that it is expecting a org.apache.hadoop.fs.Path, but instead it receives a java.nio.file.Paths.
This means that you should change the second import of your code to
org.apache.hadoop.fs.Path. IDEs import suggestions can be wrong some times ;)
Change the import and then use the method that you already had to add the input and output path. Those arguments are given in Eclipse with right-clicking the project -> Run as -> Run configurations -> arguments. The two paths should be white-space separated. Apply and run!
For the next executions, just run the project.
According to the document of Neo4j, I tried below to create the database using Neo4j Java APIs in Eclipse:
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase("D:/Eclipse/Workplace/test");
and also tried to set the path under the Neo4j folder("D:\Neo4j3.0.1\workplace3"). But I got the same error:
The method newEmbeddedDatabase(File) in the type GraphDatabaseFactory is not applicable for the arguments (String)
Then I tried to import java.io.File; and add:
File dbpath = new File("D:/Neo4j3.0.1/workplace3");
org.neo4j.graphdb.GraphDatabaseService db = dbFactory.newEmbeddedDatabase(dbpath);
then the previous two packages are not used: org.neo4j.graphdb.GraphDatabaseService;, org.neo4j.graphdb.Transaction; But I can compile the program and show "Done successfully" in the Eclipse console.
When I tried to connect to the database using neo4j-ce.exe, it gave me below Alert:
Starting Neo4j failed: Component org.neo4j.server.database.LifecycleManagingDatabase#397b7f" was successfully initialized, but failed to start. Please see attached cause exception.
My code:
package com.peterlan522.neo4j.java.example;
import java.io.File;
import org.neo4j.graphdb.GraphDatabaseService;
import org.neo4j.graphdb.Transaction;
import org.neo4j.graphdb.factory.GraphDatabaseFactory;
import org.neo4j.graphdb.Node;
import org.neo4j.graphdb.Relationship;
import org.neo4j.graphdb.Label;
import org.neo4j.graphdb.RelationshipType;
public class Neo4jJavaAPIDBOperation {
public enum Tutorials implements Label { JAVA, SCALA, SQL, NEO4J,}
public enum TutorialRelationships implements RelationshipType { JVM_LANGIAGES, NON_JVM_LANGIAGES,}
public static void main(String[] args) {
org.neo4j.graphdb.factory.GraphDatabaseFactory dbFactory = new GraphDatabaseFactory();
File dbpath = new File("D:/Neo4j3.0.1/workplace3");
//GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase("D:/Eclipse/Workplace/test");
org.neo4j.graphdb.GraphDatabaseService db = dbFactory.newEmbeddedDatabase(dbpath);
try (org.neo4j.graphdb.Transaction tx = db.beginTx()) {
Node javaNode = db.createNode(Tutorials.JAVA);
javaNode.setProperty("TutorialID", "JAVA001");
javaNode.setProperty("Title", "Learn Java");
javaNode.setProperty("NoOfChapters", "25");
javaNode.setProperty("Status", "Completed");
Node scalaNode = db.createNode(Tutorials.SCALA);
scalaNode.setProperty("TutorialID", "SCALA001");
scalaNode.setProperty("Title", "Learn Scala");
scalaNode.setProperty("NoOfChapters", "20");
scalaNode.setProperty("Status", "Completed");
Relationship relationship = javaNode.createRelationshipTo
(scalaNode,TutorialRelationships.JVM_LANGIAGES);
relationship.setProperty("Id","1234");
relationship.setProperty("OOPS","YES");
relationship.setProperty("FP","YES");
tx.success();
}
System.out.print("Done successfully");
}
}
Could anyone help on this? And give executable examples on this. Thank you so much!
Below are the softwares version:
Neo4j community version 3.0.1,
Eclipse Mars(4.5.0),
Java 1.8.0_91,
JRE System Library: JavaSE-1.8
And please get in below link to see the log.txt:
https://drive.google.com/file/d/0B2xDq3--mwK4a0FoanlDengzVWs/view?usp=sharing