How to create Couchbase bucket via java API? - java

I am Using Spring data couchbase .
package com.CouchbaseMine.config;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.CommandLineRunner;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.couchbase.config.AbstractCouchbaseConfiguration;
import com.couchbase.client.CouchbaseClient;
#Configuration
#EnableAutoConfiguration
public class CouchbaseMineCouchBaseConfig extends AbstractCouchbaseConfiguration {
#Value("${couchbase.cluster.bucket}")
private String bucketName;
#Value("${couchbase.cluster.password}")
private String password;
#Value("${couchbase.cluster.ip}")
private String ip;
#Override
protected String getBucketName() {
List<URI> uris=new LinkedList<URI>();
uris.add(URI.create("5x.xx.xxx.xx9"));
CouchbaseClient client=null;
try {
System.err.println("-- > - > i am in ");
client=new CouchbaseClient(uris,"default","");
} catch (IOException e) {
System.err.println("IOException connetion to couchbase:"+e.getMessage() );
System.exit(1);
}
return this.bucketName;
}
#Override
protected String getBucketPassword() {
return this.password;
}
#Override
protected List<String> bootstrapHosts() {
// TODO Auto-generated method stub
//return Collections.singletonList("54.89.127.249");
return Arrays.asList(this.ip);
}
}
This is configuration class used for establish connection
Follow application properties file
server.port=3000
couchbase.cluster.ip 5x.xx.xxx.xx9
couchbase.cluster.bucket DHxxxar
couchbase.cluster.password 1221
Bottom line: I have created the bucket (Dhxxxar) manually in couchbase.But i need to automatically create the bucket(database) while i run my spring boot application.
So give me any suggestion regards the same . Thanks in advance

Try this:
Cluster cluster = CouchbaseCluster.create("127.0.0.1");
ClusterManager clusterManager = cluster.clusterManager("Administrator", "12345");
BucketSettings bucketSettings = new DefaultBucketSettings.Builder()
.type(BucketType.COUCHBASE)
.name("hello")
.quota(120)
.build();
clusterManager.insertBucket(bucketSettings);
More details:
https://developer.couchbase.com/documentation/server/current/sdk/java/managing-clusters.html

IgorekPotworek's answer is great for Couchbase Java SDK version 2.x.
For version 3.x, the code looks a little different:
Cluster cluster = Cluster.connect("localhost", "Administrator", "password");
BucketManager bucketManager = cluster.buckets();
bucketManager.createBucket(
BucketSettings.create("bucketName")
.ramQuotaMB(100));

Related

How to replicate the functionality of remove a document from a CouchBase DB Bucket identified by its ID in Spring Boot?

We are currently using Spring Boot 2.2.13 version with java 1.8-based dependencies. We are upgrading to Spring Boot version 2.7.3. with Java 17 support.
The current development uses CouchbaseClient.In the newer versions we hope to migrate, some functionality seems to be removed. I am specifically having an issue with replacing the below-mentioned functionality provided in (com.couchbase.client.java.Bucket) previously.
How could I replicate the functionality to Remove a {#link Document} from the Server identified by its ID in the newer version? Is there a possible alternative method that could be used for such a scenario?
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.query.N1qlQuery;
import com.couchbase.client.java.query.N1qlQueryResult;
import com.couchbase.client.java.query.N1qlQueryRow;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.Resource;
import org.springframework.core.io.ResourceLoader;
import org.springframework.core.io.support.ResourcePatternUtils;
import org.springframework.stereotype.Component;
import org.springframework.util.FileCopyUtils;
import com.acme.dto.configuration.ConfigEnvironment;
import com.acme.dto.configuration.ConfigurationStatus;
import com.acme.dto.configuration.LoadDbConfigurationResponseDTO;
import com.acme.exception.ErrorCodes;
import com.acme.exception.ServiceRuntimeException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import java.util.concurrent.TimeUnit;
#Component
#Slf4j
public class DBDocumentLoader extends DBConfigurationChain{
#Autowired
private ResourceLoader resourceLoader;
#Value("${configuration.resource.db.root}")
private String dbConfigurationRoot;
#Autowired
private Bucket couchbaseBucket;
#Autowired
private Bucket masterConfigurationBucket;
private Bucket defaultBucket;
int successDocumentCount;
int failDocumentCount;
#Override
public DBConfigurationLogicData process(DBConfigurationLogicData logicData) {
log.info("Load Document process START");
defaultBucket = getDefaultBucket(logicData.getRequestDTO().getBucketName());
if(defaultBucket == null){
throw new ServiceRuntimeException(ErrorCodes.Constants.DB_CONFIGURATION_NOT_SUPPORT_BUCKET);
}
log.info("BUCKET : "+logicData.getRequestDTO().getBucketName());
Resource[] resources = loadResources(logicData.getRequestDTO().getEnv());
if(resources != null && resources.length > 0){
log.info("Resources found. Count : "+resources.length);
log.info("Flushing bucket "+logicData.getRequestDTO().getBucketName());
if(!flushBucket())
throw new ServiceRuntimeException(ErrorCodes.Constants.DB_CONFIGURATION_BUCKET_FLUSH_FAIL);
log.info("Bucket "+logicData.getRequestDTO().getBucketName() + "flushed");
processAvailableResources(resources);
log.info("Success resource count : "+successDocumentCount +"\n"+"Failed resource count : "+failDocumentCount +"\n");
setResponseDTO(logicData);
}else{
throw new ServiceRuntimeException(ErrorCodes.Constants.DB_CONFIGURATION_NO_RESOURCE);
}
return super.process(logicData);
}
private boolean flushBucket(){
try {
final String bucketName = "`"+defaultBucket.name()+"`";
String query = "SELECT " + "META(" + bucketName + ").id FROM " + bucketName;
N1qlQueryResult result = defaultBucket.query(N1qlQuery.simple(query), 1, TimeUnit.MINUTES);
final boolean isSuccess = result.finalSuccess();
if (isSuccess && !result.allRows().isEmpty()) {
Iterator<N1qlQueryRow> rows = result.rows();
while (rows.hasNext()) {
JsonObject jsonObject = rows.next().value();
if (jsonObject.containsKey("id")) {
defaultBucket.remove(jsonObject.getString("id")); ***//Depricated functionality that needs to be replaced***
}
}
}
return true;
}catch (Exception ex){
log.error("Error in DBDocumentLoader-flushBucket",ex);
}
return false;
}
}
Old API - https://docs.couchbase.com/sdk-api/couchbase-java-client-2.4.1/com/couchbase/client/java/Bucket.html
New API - https://docs.couchbase.com/sdk-api/couchbase-java-client/com/couchbase/client/java/Bucket.html

Add Prometheus Metrics Endpoint to Java App Using Jax-Rs

I’m trying to add a Prometheus metrics exporter to my Java app. The app is currently using javax.ws.rs to define REST endpoints.
For example:
Import javax.ws.rs.*;
Import javax.ws.rs.core.MediaType;
Import javax.ws.rs.core.Response;
#GET
#Path(“/example”)
#Timed
Public Response example(#QueryParam(“id”) Integer id) {
return Response.ok(“testing”)
}
All the examples I found for setting up Prometheus in Java are using Spring. They suggest the following:
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import io.prometheus.client.exporter.HTTPServer;
import java.io.IOException;
#SpringBootApplication
public class App {
public static void main(String[] args) {
SpringApplication.run(App.class, args);
try {
HTTPServer server = new HTTPServer(8081);
} catch (IOException e) { e.printStackTrace(); }
}
}
Is there a way I can simply define a new endpoint in my current setup, for example:
#GET
#Path(“/metrics”)
#Timed
Public Response example {
return Response.ok(“return prom metrics here”)
}
Without having to introduce Spring into the stack?
This can be done as follows:
import io.prometheus.client.Counter;
import io.prometheus.client.CollectorRegistry;
import io.prometheus.client.exporter.TextFormat;
CollectorRegistry registry = new CollectorRegistry();
Counter exCounter = Counter.build().name(“example”).register(registry);
#GET
#Path(“/metrics”)
Public String getMetrics() {
Writer writer = new StringWriter();
try {
TextFormat.write004(writer, registry.metricFamilySamples());
return writer.toString();
} catch (IOException e) {
return “error”;
}
}

How can I query a remote Apache Tinkerpop Graph Database with Gremlin and Java?

I haven't been able to find a comprehensive example of connecting to and then querying a remote Apache Tinkerpop Graph Database with Gremlin and Java. And I can't quite get it to work. Can anyone that's done something like this before offer any advice?
I've set up a Azure Cosmos database in Graph-DB mode, which is expecting Gremlin queries in order to modify and access its data. I have the database host name, port, username, and password, and I'm able to execute queries, but only if I pass in a big ugly query string. I would like to be able to leverage the org.apache.tinkerpop.gremlin.structure.Graph traversal methods, but I can't quite get it working.
import java.util.List;
import java.util.concurrent.CompletableFuture;
import org.apache.tinkerpop.gremlin.driver.Result;
import org.apache.tinkerpop.gremlin.driver.ResultSet;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
//More imports...
#Service
public class SearchService {
private final static Logger log = LoggerFactory.getLogger(SearchService.class);
#Autowired
private GraphDbConnection graphDbConnection;
#Autowired
private Graph graph;
public Object workingQuery() {
try {
String query = "g.V('1234').outE('related').inV().both().as('v').project('vertex').by(select('v')).by(bothE().fold())";
log.info("Submitting this Gremlin query: {}", query);
ResultSet results = graphDbConnection.executeQuery(query);
CompletableFuture<List<Result>> completableFutureResults = results.all();
List<Result> resultList = completableFutureResults.get();
Result result = resultList.get(0);
log.info("Query result: {}", result.toString());
return result.toString();
} catch (Exception e) {
log.error("Error fetching data.", e);
}
return null;
}
public Object failingQuery() {
return graph.traversal().V(1234).outE("related").inV()
.both().as("v").project("vertex").by("v").bothE().fold()
.next();
/* I get an Exception:
"org.apache.tinkerpop.gremlin.process.remote.RemoteConnectionException:
java.lang.RuntimeException: java.lang.RuntimeException:
java.util.concurrent.TimeoutException: Timed out while waiting for an
available host - check the client configuration and connectivity to the
server if this message persists" */
}
}
This is my configuration class:
import java.util.HashMap;
import java.util.Map;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import org.apache.tinkerpop.gremlin.driver.MessageSerializer;
import org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection;
import org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerGremlinV2d0;
import org.apache.tinkerpop.gremlin.structure.Graph;
import org.apache.tinkerpop.gremlin.structure.util.GraphFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
#Configuration
public class GraphDbConfig {
private final static Logger log = LoggerFactory.getLogger(GraphDbConfig.class);
#Value("${item.graph.hostName}")
private String hostName;
#Value("${item.graph.port}")
private int port;
#Value("${item.graph.username}")
private String username;
#Value("${item.graph.password}")
private String password;
#Value("${item.graph.enableSsl}")
private boolean enableSsl;
#Bean
public Graph graph() {
Map<String, String> graphConfig = new HashMap<>();
graphConfig.put("gremlin.graph",
"org.apache.tinkerpop.gremlin.process.remote.RemoteGraph");
graphConfig.put("gremlin.remoteGraph.remoteConnectionClass",
"org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection");
Graph g = GraphFactory.open(graphConfig);
g.traversal().withRemote(DriverRemoteConnection.using(cluster()));
return g;
}
#Bean
public Cluster cluster() {
Cluster cluster = null;
try {
MessageSerializer serializer = new GraphSONMessageSerializerGremlinV2d0();
Cluster.Builder clusterBuilder = Cluster.build().addContactPoint(hostName)
.serializer(serializer)
.port(port).enableSsl(enableSsl)
.credentials(username, password);
cluster = clusterBuilder.create();
} catch (Exception e) {
log.error("Error in connecting to host address.", e);
}
return cluster;
}
}
And I have to define this connection component currently in order to send queries to the database:
import org.apache.tinkerpop.gremlin.driver.Client;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import org.apache.tinkerpop.gremlin.driver.ResultSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
#Component
public class GraphDbConnection {
private final static Logger log = LoggerFactory.getLogger(GraphDbConnection.class);
#Autowired
private Cluster cluster;
public ResultSet executeQuery(String query) {
Client client = connect();
ResultSet results = client.submit(query);
closeConnection(client);
return results;
}
private Client connect() {
Client client = null;
try {
client = cluster.connect();
} catch (Exception e) {
log.error("Error in connecting to host address.", e);
}
return client;
}
private void closeConnection(Client client) {
client.close();
}
}
You cannot leverage the remote API with CosmosDB yet. It does not support Gremlin Bytecode yet.
https://github.com/Azure/azure-documentdb-dotnet/issues/439
https://feedback.azure.com/forums/263030-azure-cosmos-db/suggestions/33632779-support-gremlin-bytecode-to-enable-the-fluent-api
You would have to continue with strings until then, though.....since you are using Java you could try a somewhat unadvertised feature: GroovyTranslator
gremlin> g = EmptyGraph.instance().traversal()
==>graphtraversalsource[emptygraph[empty], standard]
gremlin> translator = GroovyTranslator.of('g')
==>translator[g:gremlin-groovy]
gremlin> translator.translate(g.V().out('knows').has('person','name','marko').asAdmin().getBytecode())
==>g.V().out("knows").has("person","name","marko")
As you can see, it takes Gremlin Bytecode and converts it into a String of Gremlin that you could submit to CosmosDB. Later, when CosmosDB supports Bytecode, you could drop the GroovyTranslator and change from EmptyGraph construction of your GraphTraversalSource and everything should start working. To make this really seamless, you could go the extra step and write a TraversalStrategy that would do something similar to TinkerPop's RemoteStrategy. Instead of submitting Bytecode as that strategy does, you would just just use GroovyTranslator and submit the string of Gremlin. That approach would make it even easier to switch over when CosmosDB supports Bytecode because then all you would have to do is remove your custom TraversalStrategy and reconfigure your remote GraphTraversalSource in the standard way.

Java web services to allow large file streaming

I am building a java web services server that needs to scale and to be highly available. User can upload large file (~20M) through the services. SOAP is preferred.
My question are: is there any such a web service framework which support large file streaming? Any building blocks that I should consider? Any good practices?
Any thoughts would be appreciated. Thanks.
If you need high performance, webservices are not great.
You can try (Streaming SOAP Attachments):
http://docs.oracle.com/cd/E12840_01/wls/docs103/webserv_adv/mtom.html
example
http://www.mkyong.com/webservices/jax-ws/jax-ws-attachment-with-mtom/
File : ImageServer.java
//Service Endpoint Interface
package com.mkyong.ws;
import java.awt.Image;
import javax.jws.WebMethod;
import javax.jws.WebService;
import javax.jws.soap.SOAPBinding;
import javax.jws.soap.SOAPBinding.Style;
#WebService
#SOAPBinding(style = Style.RPC)
public interface ImageServer{
//download a image from server
#WebMethod Image downloadImage(String name);
//update image to server
#WebMethod String uploadImage(Image data);
}
//File : ImageServerImpl.java
package com.mkyong.ws;
import java.awt.Image;
import java.io.File;
import java.io.IOException;
import javax.imageio.ImageIO;
import javax.jws.WebService;
import javax.xml.ws.WebServiceException;
import javax.xml.ws.soap.MTOM;
//Service Implementation Bean
#MTOM
#WebService(endpointInterface = "com.mkyong.ws.ImageServer")
public class ImageServerImpl implements ImageServer{
#Override
public Image downloadImage(String name) {
try {
File image = new File("c:\\images\\" + name);
return ImageIO.read(image);
} catch (IOException e) {
e.printStackTrace();
return null;
}
}
#Override
public String uploadImage(Image data) {
if(data!=null){
//store somewhere
return "Upload Successful";
}
throw new WebServiceException("Upload Failed!");
}
}

infinispan: clustred instances not sharing cache

I've spent over 2 days doing nothing but trying to get Infinispan to work in a clustered environment and it's not working. I don't want to run a separate infinispan server, I just want to embed it in my application that runs on a clustered Glassfish. Is that not possible? I got a sample JSF app where you can just load values into a map that's supposed to sit in cache. I pull up one clustered instance, add the values, they show up. But when I go to the other clustered instance, it shows the map as empty.
I know I'm doing something wrong, I just don't know what. Been searching the internet and there is no comprehensive tutorial on how to get it to work.
config (coppied from a tutorial that supposedly shows clustering http://www.mastertheboss.com/infinispan/infinispan-tutorial-part-2/page-2 ):
<infinispan>
<global>
<transport clusterName="demoCluster"/>
<globalJmxStatistics enabled="true"/>
</global>
<default>
<jmxStatistics enabled="true"/>
<clustering mode="distribution">
<hash numOwners="2" rehashRpcTimeout="120000"/>
<sync/>
</clustering>
</default>
</infinispan>
Context listener:
package hazelcache.test;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import javax.servlet.annotation.WebListener;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
#WebListener()
public class Listener implements ServletContextListener
{
EmbeddedCacheManager manager;
#Override
public void contextInitialized(ServletContextEvent sce)
{
try
{
manager = new DefaultCacheManager("config.xml");
manager.start();
sce.getServletContext().setAttribute("cacheManager", manager);
}
catch (IOException ex)
{
Logger.getLogger(Listener.class.getName()).log(Level.SEVERE, null, ex);
}
}
#Override
public void contextDestroyed(ServletContextEvent sce)
{
manager.stop();
}
}
Bean:
package hazelcache.test;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import javax.faces.bean.ManagedBean;
import javax.faces.context.FacesContext;
import javax.servlet.ServletContext;
import org.infinispan.configuration.global.GlobalConfigurationBuilder;
import org.infinispan.manager.DefaultCacheManager;
import org.infinispan.manager.EmbeddedCacheManager;
#ManagedBean(name="clusterTest")
public class ClusteredCacheBean extends CacheTestBean
{
EmbeddedCacheManager manager;
public ClusteredCacheBean() throws IOException
{
System.out.println("Before setStuffz()");
manager = (EmbeddedCacheManager) ((ServletContext)FacesContext.getCurrentInstance().
getExternalContext().getContext()).getAttribute("cacheManager");
setStuffz(manager.getCache("stuffz"));
System.out.println("After setStuffz()");
}// end ClusteredCacheBean()
private static EmbeddedCacheManager createCacheManagerProgramatically() {
return new DefaultCacheManager(GlobalConfigurationBuilder.defaultClusteredBuilder().build());
}
#Override
public String addToCache()
{
String forwardTo = null;
manager.getCache("stuffz").put(getId(), getName());
return forwardTo;
}// end addToCache()
#Override
public List getStuffzList()
{
System.out.println("Stuffz: " + getStuffz().size());
return new LinkedList(manager.getCache("stuffz").entrySet());
}
}// end class ClusteredCacheBean
I really don't know what to do at this point...
A wonderful person on another forum helped me figure it out:
1) set this jvm option: -Djava.net.preferIPv4Stack=true
asadmin> create-jvm-options --target ClusterName -Djava.net.preferIPv4Stack=true
2) Call getCache in the listener once just to create the cache as the thing is starting up:
setStuffz(manager.getCache("stuffz"));
3) put namespace on the configuration file:
<infinispan
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="urn:infinispan:config:5.1 http://docs.jboss.org/infinispan/schemas/infinispan-config-5.1.xsd"
xmlns="urn:infinispan:config:5.1">
Thank you, Tristan from the jBoss forums (https://community.jboss.org/community/infinispan)!

Categories