jnetpcap Getting java.lang.IllegalArgumentException - java

My console output is --
Exception in thread "main" java.lang.IllegalArgumentException: size of array must be MAX_ID_COUNT size
at org.jnetpcap.packet.JScanner.loadScanners(Native Method)
at org.jnetpcap.packet.JScanner.reloadAll(JScanner.java:376)
at org.jnetpcap.packet.JScanner.<init>(JScanner.java:313)
at org.jnetpcap.packet.JScanner.<init>(JScanner.java:293)
at org.jnetpcap.packet.JPacket.getDefaultScanner(JPacket.java:621)
at org.jnetpcap.packet.JPacket.scan(JPacket.java:1094)
at org.jnetpcap.packet.JMemoryPacket.<init>(JMemoryPacket.java:387)
at org.jnetpcap.packet.JMemoryPacket.<init>(JMemoryPacket.java:440)
at ClassicPcapExample.main(ClassicPcapExample.java:128)
Code:
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import org.jnetpcap.Pcap;
import org.jnetpcap.PcapIf;
import org.jnetpcap.packet.JMemoryPacket;
import org.jnetpcap.packet.JPacket;
import org.jnetpcap.packet.PcapPacket;
import org.jnetpcap.packet.PcapPacketHandler;
import org.jnetpcap.protocol.JProtocol;
import org.jnetpcap.protocol.lan.Ethernet;
import org.jnetpcap.protocol.network.Ip4;
import org.jnetpcap.protocol.tcpip.Tcp;
public class ClassicPcapExample{
/**
* Main startup method
*
* #param args
* ignored
*/
public static void main(String[] args) {
List<PcapIf> alldevs = new ArrayList<PcapIf>(); // Will be filled with NICs
StringBuilder errbuf = new StringBuilder(); // For any error msgs
/***************************************************************************
* First get a list of devices on this system
**************************************************************************/
int r = Pcap.findAllDevs(alldevs, errbuf);
if (r == Pcap.NOT_OK || alldevs.isEmpty()) {
System.err.printf("Can't read list of devices, error is %s", errbuf
.toString());
return;
}
System.out.println("Network devices found:");
int i = 0;
for (PcapIf device : alldevs) {
String description =
(device.getDescription() != null) ? device.getDescription()
: "No description available";
System.out.printf("#%d: %s [%s]\n", i++, device.getName(), description);
}
PcapIf device = alldevs.get(0); // We know we have atleast 1 device
System.out
.printf("\nChoosing '%s' on your behalf:\n",
(device.getDescription() != null) ? device.getDescription()
: device.getName());
/***************************************************************************
* Second we open up the selected device
**************************************************************************/
int snaplen = 64 * 1024; // Capture all packets, no truncation
int flags = Pcap.MODE_PROMISCUOUS; // capture all packets
int timeout = 10 * 1000; // 10 seconds in milliseconds
Pcap pcap =
Pcap.openLive(device.getName(), snaplen, flags, timeout, errbuf);
if (pcap == null) {
System.err.printf("Error while opening device for capture: "
+ errbuf.toString());
return;
}
/***************************************************************************
* Third we create a packet handler which will receive packets from the
* libpcap loop.
**************************************************************************/
PcapPacketHandler<String> jpacketHandler = new PcapPacketHandler<String>() {
public void nextPacket(PcapPacket packet, String user) {
System.out.printf("Received packet at %s caplen=%-4d len=%-4d %s\n",
new Date(packet.getCaptureHeader().timestampInMillis()),
packet.getCaptureHeader().caplen(), // Length actually captured
packet.getCaptureHeader().wirelen(), // Original length
user // User supplied object
);
}
};
/***************************************************************************
* Fourth we enter the loop and tell it to capture 10 packets. The loop
* method does a mapping of pcap.datalink() DLT value to JProtocol ID, which
* is needed by JScanner. The scanner scans the packet buffer and decodes
* the headers. The mapping is done automatically, although a variation on
* the loop method exists that allows the programmer to specify exactly
* which protocol ID to use as the data link type for this pcap interface.
**************************************************************************/
//pcap.loop(5, jpacketHandler, "jNetPcap rocks!");//GETTING AN ERROR AT THIS LINE!!
pcap.close();
}
}

I started getting the same issue, when I downgraded to 1.3 from 1.4.r1425, which I was due to the 1.4.r1425 version jnetpcap.dll file which I had forgotten to replace with the 1.3 version dll. I would suggest you to ensure that you have the correct version of the native library file available (.dll for windows/.so for linux -as per ur environment).

Related

Java iTunes COM interface Jacob - Get PersistentIDs (PersistentIDLow and PersistentIDHigh)

I have some problems to understand the iTunes COM interface with Jacob in reference to the PersistentIDs (PersistentIDLow and PersistentIDHigh) to identify playlists and tracks by unique key in iTunes.
I hope someone can tell me what I'm doing wrong.
Here is my code example to read one playlist List 1 of iTunes.
The example shows in comments the trial to read the PersistentIDs for the playlist List 1 and for every track of the playlist.
But the Dispatch doesn't work, see code lines with comment "doesn't work".
Exception in thread "main" com.jacob.com.ComFailException: Can't map
name to dispid: GetObjectPersistentIDLow
Exception in thread "main" com.jacob.com.ComFailException: Can't map name to dispid: GetObjectPersistentIDHigh
public static void main(String args[]){
System.setProperty("jacob.dll.path", "C:\\Dev\\iTunesTest\\dll\\jacob-1.18-M2-x86.dll");
// build iTunes activeX component
ActiveXComponent g_iTunes = new ActiveXComponent("iTunes.Application");
// get iTunes Sources
Dispatch g_sources = Dispatch.call(g_iTunes, "Sources").toDispatch();
// determine iTunes Library Source ID
int sourceID = 0;
for (int i=1;i<=Dispatch.get(g_sources, "Count").getInt();i++){
Dispatch itemSearch = Dispatch.call(g_sources, "Item", i).toDispatch();
if (ITunesSourceKind.values()[Dispatch.get(itemSearch, "Kind").getInt()].toString().equals("ITSourceKindLibrary")){
sourceID = Dispatch.get(itemSearch, "Index").getInt();
}
itemSearch.safeRelease();
}
// get iTunes Library Source
Dispatch g_source = Dispatch.call(g_sources, "Item", sourceID).toDispatch();
// get iTunes Playlists
Dispatch g_playlists = Dispatch.get(g_source, "Playlists").toDispatch();
// get iTunes Playlist byName
Dispatch g_playlistItem = Dispatch.call(g_playlists, "ItemByName", "List 1").toDispatch();
// get iTunes Tracks for Playlist
Dispatch g_tracks = Dispatch.get(g_playlistItem, "Tracks").toDispatch();
String name = Dispatch.get(g_playlistItem, "Name").getString();
System.out.println("Playlist: " + name);
// doesn't work
System.out.println("ID Low : " + Dispatch.call(g_playlistItem, "GetObjectPersistentIDLow").getLong());
System.out.println("ID High : " + Dispatch.call(g_playlistItem, "GetObjectPersistentIDHigh").getLong());
// get every iTunes playlist track
int playlistSongCount = Dispatch.get(g_tracks, "Count").getInt();
for (int i=1; i<=playlistSongCount; i++){
// get single iTunes playlist track
Dispatch track = Dispatch.call(g_tracks, "Item", i).toDispatch();
System.out.println(" Song Name: " + Dispatch.get(track, "Name").getString());
// doesn't work
System.out.println(" ID Low : " + Dispatch.call(track, "GetObjectPersistentIDLow").getLong());
System.out.println(" ID High : " + Dispatch.call(track, "GetObjectPersistentIDHigh").getLong());
if(track != null){
track.safeRelease();
}
}
// release objects
if(g_tracks != null){
g_tracks.safeRelease();
}
if(g_playlistItem != null){
g_playlistItem.safeRelease();
}
if(g_playlists != null){
g_playlists.safeRelease();
}
if(g_source != null){
g_source.safeRelease();
}
if(g_sources != null){
g_sources.safeRelease();
}
if(g_iTunes != null){
// close iTunes
//g_iTunes.invoke("Quit");
g_iTunes.safeRelease();
}
}
I thought this should work, see reference url Java-iTunes-API
Summary:
/**
* Returns the high 32 bits of the persistent ID of the specified IITObject.
* See the documentation on IITObject for more information on persistent
* IDs.
*
* The object may be a source, playlist, or track.
*
* #param iObject
* The object to fetch the High Persistent ID.
* #return The high 32 bits of the 64-bit persistent ID.
*/
public long getITObjectPersistentIDHigh(ITObject iObject) {
Dispatch object = iObject.fetchDispatch();
return Dispatch.call(object, "GetObjectPersistentIDHigh", object)
.getLong();
}
/**
* Returns the low 32 bits of the persistent ID of the specified IITObject.
* See the documentation on IITObject for more information on persistent
* IDs.
*
* The object may be a source, playlist, or track.
*
* #param iObject
* The object to fetch the Low Persistent ID.
* #return The low 32 bits of the 64-bit persistent ID.
*/
public long getITObjectPersistentIDLow(ITObject iObject) {
Dispatch object = iObject.fetchDispatch();
return Dispatch.call(object, "GetObjectPersistentIDLow", object)
.getLong();
}
Any idea what I'm doing wrong?
Thank you so much.
Michael

How to take data from Arduino into an mysql database with an average calculated

For my groups final project we are using a Arduino with ph sensor attached. I have the supplied code provided from the ph sensor manufacture which is working great. I have communication from Arduino to java and also displays the information when the code is ran in the output. The mysql statement is correct and all the fields are added. However, when the data from the for loop is written to the database it does something like a sum and creates a large number. I was thinking of using an array list to have the data taken in and then a average performed. Would i need to have the in.read(); assigned to the array?
package Control;
import Model.fluidTest;
import java.io.*;
import java.util.*;
import com.fazecast.jSerialComm.SerialPort;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
public class ComPortReader {
/**
*
* #param args
* #throws FileNotFoundException
* #throws UnsupportedEncodingException
*/
public static void main(String[] args) throws FileNotFoundException,
UnsupportedEncodingException, ClassNotFoundException, SQLException,
IOException {
//**********************************************************************
/**
* These are preparation for the database to be connected and write data
* to database.
*
*/
PreparedStatement reading = null;
Connection con = null;
//**********************************************************************
/**
* This section of code will be used to take the information from the
* for loop and place it into a array list to perform the average of the
* ph input from the Arduino.
*
* The input is being added together while uploading to the MYSQL
* database which is producing incorrect ph level readings.
*/
// float average
//**********************************************************************
/**
* This is to open the communication port on the computer to talk to the
* input device.
*/
SerialPort comPort = SerialPort.getCommPorts()[0];
comPort.openPort();
comPort.setComPortTimeouts(SerialPort.TIMEOUT_READ_SEMI_BLOCKING, 100,
0);
InputStream in = comPort.getInputStream();
try {
for (int j = 0; j < 100; j++) {
System.out.print((char) in.read());
}
in.close();
} catch (Exception e) {
e.printStackTrace();
}
/**
* This begins the query to the MYSQL database we are connecting to the
* database using default parameters while using user defined user login
* and password.
*/
try {// Try catch to possibly write data to a mysql database.
con =
DriverManager.getConnection("jdbc:mysql://localhost:3306/phlevel?
relaxAutoReconnect=false&useSSL=false&relaxAutoCommit=true",
String sql = "INSERT INTO
phlevel.inputreadings(id,phInput,javaReading) values(?,?,?)";
reading = con.prepareStatement(sql);
reading.setInt(1, 0); // Creates a new ID Field For performed test.
reading.setFloat(2, in.read());
// Creates new Float Field for phLevel
reading.setString(3, "New Reading---->");
[enter image description here][1]// Creates Label for reading
reading.executeUpdate();
// End MYSQL update QUERY
con.commit();//End connection to mysql
reading.close();// Finish statements to mysql
comPort.closePort(); // Close the Serial port connected to ardiuno.
} catch (SQLException ex) {
ex.printStackTrace();
}
}
}
final int SAMPLE_DATA_COUNT = 100;
float averagePH = 0;
for (int j = 0; j < SAMPLE_DATA_COUNT; j++) {
System.out.print((char) in.read());
averagePH += Float.parseFloat( in.read() );
}
averagePH /= SAMPLE_DATA_COUNT; // Insert this value to SQL
...
reading.setFloat( 2, averagePH );
I think this line
reading.setInt(1, 0);
Should not be there in the first place, the id (Primary Key) column should be set to Auto Increment and you don't need to specify it when inserting data.

App Engine Backend with Google Cloud Messaging sending message to more than 1000 users

I want to send a message (e.g. Update available) to all users(~15,000). I have implemented App Engine Backend with Google Cloud Messaging to send message.
I have tested on 2 devices. Got message on both. But as google docs says "GCM is support for up to 1,000 recipients for a single message."
My question is how to send same message to remaining 14,000 users in my
case? Or the code below will take care of it?
Below is the code which sends message
import com.google.android.gcm.server.Constants;
import com.google.android.gcm.server.Message;
import com.google.android.gcm.server.Result;
import com.google.android.gcm.server.Sender;
import com.google.api.server.spi.config.Api;
import com.google.api.server.spi.config.ApiNamespace;
import java.io.IOException;
import java.util.List;
import java.util.logging.Logger;
import javax.inject.Named;
import static com.example.shani.myapplication.backend.OfyService.ofy;
/**
* An endpoint to send messages to devices registered with the backend
* <p/>
* For more information, see
* https://developers.google.com/appengine/docs/java/endpoints/
* <p/>
* NOTE: This endpoint does not use any form of authorization or
* authentication! If this app is deployed, anyone can access this endpoint! If
* you'd like to add authentication, take a look at the documentation.
*/
#Api(name = "messaging", version = "v1", namespace = #ApiNamespace(ownerDomain = "backend.myapplication.shani.example.com", ownerName = "backend.myapplication.shani.example.com", packagePath = ""))
public class MessagingEndpoint {
private static final Logger log = Logger.getLogger(MessagingEndpoint.class.getName());
/**
* Api Keys can be obtained from the google cloud console
*/
private static final String API_KEY = System.getProperty("gcm.api.key");
/**
* Send to the first 10 devices (You can modify this to send to any number of devices or a specific device)
*
* #param message The message to send
*/
public void sendMessage(#Named("message") String message) throws IOException {
if (message == null || message.trim().length() == 0) {
log.warning("Not sending message because it is empty");
return;
}
// crop longer messages
if (message.length() > 1000) {
message = message.substring(0, 1000) + "[...]";
}
Sender sender = new Sender(API_KEY);
Message msg = new Message.Builder().addData("message", message).build();
List<RegistrationRecord> records = ofy().load().type(RegistrationRecord.class).limit(1000).list();
for (RegistrationRecord record : records) {
Result result = sender.send(msg, record.getRegId(), 5);
if (result.getMessageId() != null) {
log.info("Message sent to " + record.getRegId());
String canonicalRegId = result.getCanonicalRegistrationId();
if (canonicalRegId != null) {
// if the regId changed, we have to update the datastore
log.info("Registration Id changed for " + record.getRegId() + " updating to " + canonicalRegId);
record.setRegId(canonicalRegId);
ofy().save().entity(record).now();
}
} else {
String error = result.getErrorCodeName();
if (error.equals(Constants.ERROR_NOT_REGISTERED)) {
log.warning("Registration Id " + record.getRegId() + " no longer registered with GCM, removing from datastore");
// if the device is no longer registered with Gcm, remove it from the datastore
ofy().delete().entity(record).now();
} else {
log.warning("Error when sending message : " + error);
}
}
}
}
}
I know there are simillar questions but I am using Java language. I found questions which uses php language at backend. so not helpful to me!
Google Cloud Messaging: Send message to "all" users
Sending Push Notification on multiple devices
Is there anyone who has successfully implemented App Engine+Google Cloud Messaging JAVA language?
In the below code line if I replace 1000 with 15,000 Will it solve my problem?
List<RegistrationRecord> records = ofy().load().type(RegistrationRecord.class).limit(1000).list();
Please please help as soon as possible. And very sorry for my English.. If anyone need other details you are welcome to ask.
Thanks for your time.
A few considerations,
1) Sending notifications to a possibly huge number of users might take significant time, consider using Task Queues to queue that work to be done "offline" outside the 60 sec limit.
2) Now as for the GCM limit, if you need to all your users but GCM allow you 1000 at a time just split them in batches of 1000 and send every batch a message separately.
If you combine both recommendations you should have a fairly scalable process where you query for all your users in 1 request , split that list and just queue sending the message to those users 1000 at a time.
Extension to the #jirungaray answer below is code for sending GCM messages to all registered users,
Here I assume that from android you are registering each mobile-devices for GCM services and storing those device tokens in database.
public class GCM {
private final static Logger LOGGER = Logger.getLogger(GCM.class.getName());
private static final String API_KEY = ConstantUtil.GCM_API_KEY;
public static void doSendViaGcm(List<String> tocken,String message) throws IOException {
Sender sender = new Sender(API_KEY);
// Trim message if needed.
if (message.length() > 1000) {
message = message.substring(0, 1000) + "[...]";
}
Message msg = new Message.Builder().addData("message", message).build();
try{
MulticastResult result = sender.send(msg, tocken, 5);
}catch(Exception ex){
LOGGER.severe("error is"+ex.getMessage());
ex.printStackTrace();
}
}
}
In above code snippet API_KEY can be obtain from google console project ,here I assume that you have already created one google console project and enable GCM api,
you can generate API_KEY as follows
your_google_console_project>> Credentials>> Create New Key >> Server
key >> enter ip address Which you want to allow
access to GCM api[i used 0.0.0.0/0]
Now doSendViaGcm(List tocken,String message) of GCM class performs task of sending messages to all register android mobile devices
here List<String> token is array-list of all device token on which messages will be delivered ,remember this list size should not more than 1000 or else http call will fail.
hope this will help you
thanks

Ways to automate svn process (using Java)

As we know we can perform svn operations like checkout, commit, update using tools like Tortoise svn etc.
Now I am trying to perform svn operations like svn checkout, commit, update using ant script (so svn process will be much easier).
I am trying using svnkit sdk with their given as follows:
/*
* ====================================================================
* Copyright (c) 2004-2010 TMate Software Ltd. All rights reserved.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://svnkit.com/license.html
* If newer versions of this license are posted there, you may use a
* newer version instead, at your option.
* ====================================================================
*/
import java.util.Collection;
import java.util.Iterator;
import java.util.Set;
import org.tmatesoft.svn.core.SVNCommitInfo;
import org.tmatesoft.svn.core.SVNException;
import org.tmatesoft.svn.core.SVNLogEntry;
import org.tmatesoft.svn.core.SVNLogEntryPath;
import org.tmatesoft.svn.core.SVNURL;
import org.tmatesoft.svn.core.auth.ISVNAuthenticationManager;
import org.tmatesoft.svn.core.internal.io.dav.DAVRepositoryFactory;
import org.tmatesoft.svn.core.internal.io.fs.FSRepositoryFactory;
import org.tmatesoft.svn.core.internal.io.svn.SVNRepositoryFactoryImpl;
import org.tmatesoft.svn.core.io.ISVNEditor;
import org.tmatesoft.svn.core.io.SVNRepository;
import org.tmatesoft.svn.core.io.SVNRepositoryFactory;
import org.tmatesoft.svn.core.wc.SVNWCUtil;
/*
* The following example program demonstrates how you can use SVNRepository to
* obtain a history for a range of revisions including (for each revision): all
* changed paths, log message, the author of the commit, the timestamp when the
* commit was made. It is similar to the "svn log" command supported by the
* Subversion client library.
*
* As an example here's a part of one of the program layouts (for the default
* values):
*
* ---------------------------------------------
* revision: 1240
* author: alex
* date: Tue Aug 02 19:52:49 NOVST 2005
* log message: 0.9.0 is now trunk
*
* changed paths:
* A /trunk (from /branches/0.9.0 revision 1239)
* ---------------------------------------------
* revision: 1263
* author: sa
* date: Wed Aug 03 21:19:55 NOVST 2005
* log message: updated examples, javadoc files
*
* changed paths:
* M /trunk/doc/javadoc-files/javadoc.css
* M /trunk/doc/javadoc-files/overview.html
* M /trunk/doc/examples/src/org/tmatesoft/svn/examples/wc/StatusHandler.java
* ...
*
*/
public class History {
/*
* args parameter is used to obtain a repository location URL, a start
* revision number, an end revision number, user's account name & password
* to authenticate him to the server.
*/
public static void main(String[] args) {
/*
* Default values:
*/
String url = "svnUrl";
String name = "username";
String password = "password";
long startRevision = 0;
long endRevision = -1;//HEAD (the latest) revision
/*
* Initializes the library (it must be done before ever using the
* library itself)
*/
setupLibrary();
if (args != null) {
/*
* Obtains a repository location URL
*/
url = (args.length >= 1) ? args[0] : url;
/*
* Obtains the start point of the revisions range
*/
startRevision = (args.length >= 2) ? Long.parseLong(args[1])
: startRevision;
/*
* Obtains the end point of the revisions range
*/
endRevision = (args.length >= 3) ? Long.parseLong(args[2])
: endRevision;
/*
* Obtains an account name (will be used to authenticate the user to
* the server)
*/
name = (args.length >= 4) ? args[3] : name;
/*
* Obtains a password
*/
password = (args.length >= 5) ? args[4] : password;
}
SVNRepository repository = null;
try {
/*
* Creates an instance of SVNRepository to work with the repository.
* All user's requests to the repository are relative to the
* repository location used to create this SVNRepository.
* SVNURL is a wrapper for URL strings that refer to repository locations.
*/
repository = SVNRepositoryFactory.create(SVNURL.parseURIEncoded(url));
} catch (SVNException svne) {
/*
* Perhaps a malformed URL is the cause of this exception.
*/
System.err
.println("error while creating an SVNRepository for the location '"
+ url + "': " + svne.getMessage());
System.exit(1);
}
ISVNAuthenticationManager authManager = SVNWCUtil.createDefaultAuthenticationManager(name, password);
repository.setAuthenticationManager(authManager);
/*
* Gets the latest revision number of the repository
*/
try {
endRevision = repository.getLatestRevision();
} catch (SVNException svne) {
System.err.println("error while fetching the latest repository revision: " + svne.getMessage());
System.exit(1);
}
Collection logEntries = null;
try {
logEntries = repository.log(new String[] {""}, null,
startRevision, endRevision, true, true);
} catch (SVNException svne) {
System.out.println("error while collecting log information for '"
+ url + "': " + svne.getMessage());
System.exit(1);
}
for (Iterator entries = logEntries.iterator(); entries.hasNext();) {
/*
* gets a next SVNLogEntry
*/
SVNLogEntry logEntry = (SVNLogEntry) entries.next();
System.out.println("---------------------------------------------");
/*
* gets the revision number
*/
System.out.println("revision: " + logEntry.getRevision());
/*
* gets the author of the changes made in that revision
*/
System.out.println("author: " + logEntry.getAuthor());
/*
* gets the time moment when the changes were committed
*/
System.out.println("date: " + logEntry.getDate());
/*
* gets the commit log message
*/
System.out.println("log message: " + logEntry.getMessage());
/*
* displaying all paths that were changed in that revision; cahnged
* path information is represented by SVNLogEntryPath.
*/
String logMessage = "log message";
try {
ISVNEditor editor = repository.getCommitEditor( logMessage , null /*locks*/ , true /*keepLocks*/ , null /*mediator*/ );
History.copyDir(editor, "C:/svnCommitCode/src","svnurl/src", logEntry.getRevision());
} catch (SVNException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (logEntry.getChangedPaths().size() > 0) {
System.out.println();
System.out.println("changed paths:");
/*
* keys are changed paths
*/
Set changedPathsSet = logEntry.getChangedPaths().keySet();
for (Iterator changedPaths = changedPathsSet.iterator(); changedPaths
.hasNext();) {
/*
* obtains a next SVNLogEntryPath
*/
SVNLogEntryPath entryPath = (SVNLogEntryPath) logEntry
.getChangedPaths().get(changedPaths.next());
/*
* SVNLogEntryPath.getPath returns the changed path itself;
*
* SVNLogEntryPath.getType returns a charecter describing
* how the path was changed ('A' - added, 'D' - deleted or
* 'M' - modified);
*
* If the path was copied from another one (branched) then
* SVNLogEntryPath.getCopyPath &
* SVNLogEntryPath.getCopyRevision tells where it was copied
* from and what revision the origin path was at.
*/
System.out.println(" "
+ entryPath.getType()
+ " "
+ entryPath.getPath()
+ ((entryPath.getCopyPath() != null) ? " (from "
+ entryPath.getCopyPath() + " revision "
+ entryPath.getCopyRevision() + ")" : ""));
}
}
}
}
/*
* Initializes the library to work with a repository via
* different protocols.
*/
private static void setupLibrary() {
/*
* For using over http:// and https://
*/
DAVRepositoryFactory.setup();
/*
* For using over svn:// and svn+xxx://
*/
SVNRepositoryFactoryImpl.setup();
/*
* For using over file:///
*/
FSRepositoryFactory.setup();
}
private static SVNCommitInfo copyDir( ISVNEditor editor , String srcDirPath , String dstDirPath , long revision ) throws SVNException {
editor.openRoot( -1 );
editor.addDir( dstDirPath , srcDirPath , revision );
System.out.println("done--------------------");
//Closes dstDirPath.
editor.closeDir( );
//Closes the root directory.
editor.closeDir( );
return editor.closeEdit( );
}
}
I am able to get some of the outputs as history:
revision: 7
author: username
date: Wed Apr 23 15:47:58 2014
log message: testing
changed paths:
A /testCode/src
But I am getting below error when I am calling SVNCommitInfo copyDir() method:
org.tmatesoft.svn.core.SVNException: svn: E160013: '/testCode/!svn/bc/2/C:/svnCommitCode/src' path not found: 404 Not Found (http://svnUrl)
I am providing both source (my local system directory path) and destination path (svn directory path), what I am doing wrong over here? (Means on svn same 'src' folder exist, I just want to replace with current local directory.)
Can anyone guide me in this context?
This solution worked for me.
First call this method getSVNClientManager to get authenticated,it will return clientManager which will be used to get different kind of svn clients instances to do different activities.
public SVNClientManager getSVNClientManager () throws IOException{
SVNURL url = SVNURL
.parseURIDecoded("<path to the base svn repository>");
SVNRepository repository = SVNRepositoryFactory.create(url, null);
ISVNOptions myOptions = SVNWCUtil.createDefaultOptions(true);
//provide svn username and password
//username = name used to connect to svn
//password = password used to connect to svn
ISVNAuthenticationManager myAuthManager = SVNWCUtil
.createDefaultAuthenticationManager("<username>", "<password>");
repository.setAuthenticationManager(myAuthManager);
//clientManager will be used to get different kind of svn clients instances to do different activities
//like update, commit, view diff etc.
SVNClientManager clientManager = SVNClientManager.newInstance(
myOptions, myAuthManager);
return clientManager ;
}
and then call method commitToSvn()
public void commitToSvn(SVNClientManager clientManager)
throws SVNException {
SVNCommitClient commitClient = clientManager.getCommitClient();
File fileToCheckin = new File("LocalDir/SampleFileFolder/SampleFile1");
boolean recursive = true;
SVNCommitInfo importInfo = commitClient
.doImport(
fileToCheckin ,
SVNURL.parseURIDecoded("<path at which we want to check-in the file>"),
"testing svn kit integration", recursive);
System.out.println(importInfo.getNewRevision());
}
Similarly we can call checkout method exportFromSvn()
public void exportFromSvn(SVNClientManager clientManager) throws SVNException {
SVNUpdateClient updateClient = clientManager.getUpdateClient();
SVNURL url = SVNURL.parseURIDecoded("<svn url to export from>");
//destination path
File dstPath = new File("LocalDirNew");
//the revision number which should be looked upon for the file path
SVNRevision pegRevision = SVNRevision.create(<right svn revision number>);
//the revision number which is required to be exported.
SVNRevision revision = SVNRevision.create(<right svn revision number>);
//if there is any special character for end of line (in the file) then it is required. For our use case, //it can be null, assuming there are no special characters. In this case the OS specific EoF style will //be assumed
String eolStyle = null;
//this would force the operation
boolean force = true;
//Till what extent under a directory, export is required, is determined by depth. INFINITY means the whole subtree of that directory will be exported
SVNDepth recursive = SVNDepth.INFINITY;
updateClient.doExport(url, dstPath, pegRevision, revision, eolStyle, force, recursive );
}

accumulo - batchscanner: one result per range

So my general question is "Is it possible to have an Accumulo BatchScanner only pull back the first result per Range I give it?"
Now some details about my use case as there may be a better way to approach this anyway. I have data that represent messages from different systems. There can be different types of messages. My users want to be able to ask the system questions, such as "give me the most recent message of a certain type as of a certain time for all these systems".
My table layout looks like this
rowid: system_name, family: message_type, qualifier: masked_timestamp, value: message_text
The idea is that the user gives me a list of systems they care about, the type of message, and a certain timestamp. I used masked timestamp so that the table sorts most recent first. That way when I scan for a timestamp, the first result is the most recent prior to that time. I am using a BatchScanner because I have multiple systems I am searching for per query. Can I make the BatchScanner only fetch the first result for each Range? I can't specify a specific key because the most recent may not match the datetime given by the user.
Currently, I am using the BatchScanner and ignoring all but the first result per Key. It works right now, but it seems like a waste to pull back all the data for a specific system/type over the network when I only care about the first result per system/type.
EDIT
My attempt using the FirstEntryInRowIterator
#Test
public void testFirstEntryIterator() throws Exception
{
Connector connector = new MockInstance("inst").getConnector("user", new PasswordToken("password"));
connector.tableOperations().create("testing");
BatchWriter writer = writer(connector, "testing");
writer.addMutation(mutation("row", "fam", "qual1", "val1"));
writer.addMutation(mutation("row", "fam", "qual2", "val2"));
writer.addMutation(mutation("row", "fam", "qual3", "val3"));
writer.close();
Scanner scanner = connector.createScanner("testing", new Authorizations());
scanner.addScanIterator(new IteratorSetting(50, FirstEntryInRowIterator.class));
Key begin = new Key("row", "fam", "qual2");
scanner.setRange(new Range(begin, begin.followingKey(PartialKey.ROW_COLFAM_COLQUAL)));
int numResults = 0;
for (Map.Entry<Key, Value> entry : scanner)
{
Assert.assertEquals("qual2", entry.getKey().getColumnQualifier().toString());
numResults++;
}
Assert.assertEquals(1, numResults);
}
My goal is that the returned entry will be the ("row", "fam", "qual2", "val2") but I get 0 results. It almost seems like the Iterator is being applied before the Range maybe? I haven't dug into this yet.
This sounds like a good use case for using one of Accumulo's SortedKeyValueIterators, specifically the FirstEntryInRowIterator (contained in the accumulo-core artifact).
Create an IteratorSetting with the FirstEntryInRowIterator and add it to your BatchScanner. This will return the first Key/Value in that system_name, and then stop avoiding the overhead of your client ignoring all other results.
A quick modification of the FirstEntryInRowIterator might get you what you want:
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.accumulo.core.iterators;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.accumulo.core.client.IteratorSetting;
import org.apache.accumulo.core.data.ByteSequence;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.PartialKey;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.hadoop.io.Text;
public class FirstEntryInRangeIterator extends SkippingIterator implements OptionDescriber {
// options
static final String NUM_SCANS_STRING_NAME = "scansBeforeSeek";
// iterator predecessor seek options to pass through
private Range latestRange;
private Collection<ByteSequence> latestColumnFamilies;
private boolean latestInclusive;
// private fields
private Text lastRowFound;
private int numscans;
/**
* convenience method to set the option to optimize the frequency of scans vs. seeks
*/
public static void setNumScansBeforeSeek(IteratorSetting cfg, int num) {
cfg.addOption(NUM_SCANS_STRING_NAME, Integer.toString(num));
}
// this must be public for OptionsDescriber
public FirstEntryInRangeIterator() {
super();
}
public FirstEntryInRangeIterator(FirstEntryInRangeIterator other, IteratorEnvironment env) {
super();
setSource(other.getSource().deepCopy(env));
}
#Override
public SortedKeyValueIterator<Key,Value> deepCopy(IteratorEnvironment env) {
return new FirstEntryInRangeIterator(this, env);
}
#Override
public void init(SortedKeyValueIterator<Key,Value> source, Map<String,String> options, IteratorEnvironment env) throws IOException {
super.init(source, options, env);
String o = options.get(NUM_SCANS_STRING_NAME);
numscans = o == null ? 10 : Integer.parseInt(o);
}
// this is only ever called immediately after getting "next" entry
#Override
protected void consume() throws IOException {
if (finished == true || lastRowFound == null)
return;
int count = 0;
while (getSource().hasTop() && lastRowFound.equals(getSource().getTopKey().getRow())) {
// try to efficiently jump to the next matching key
if (count < numscans) {
++count;
getSource().next(); // scan
} else {
// too many scans, just seek
count = 0;
// determine where to seek to, but don't go beyond the user-specified range
Key nextKey = getSource().getTopKey().followingKey(PartialKey.ROW);
if (!latestRange.afterEndKey(nextKey))
getSource().seek(new Range(nextKey, true, latestRange.getEndKey(), latestRange.isEndKeyInclusive()), latestColumnFamilies, latestInclusive);
else {
finished = true;
break;
}
}
}
lastRowFound = getSource().hasTop() ? getSource().getTopKey().getRow(lastRowFound) : null;
}
private boolean finished = true;
#Override
public boolean hasTop() {
return !finished && getSource().hasTop();
}
#Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
// save parameters for future internal seeks
latestRange = range;
latestColumnFamilies = columnFamilies;
latestInclusive = inclusive;
lastRowFound = null;
super.seek(range, columnFamilies, inclusive);
finished = false;
if (getSource().hasTop()) {
lastRowFound = getSource().getTopKey().getRow();
if (range.beforeStartKey(getSource().getTopKey()))
consume();
}
}
#Override
public IteratorOptions describeOptions() {
String name = "firstEntry";
String desc = "Only allows iteration over the first entry per range";
HashMap<String,String> namedOptions = new HashMap<String,String>();
namedOptions.put(NUM_SCANS_STRING_NAME, "Number of scans to try before seeking [10]");
return new IteratorOptions(name, desc, namedOptions, null);
}
#Override
public boolean validateOptions(Map<String,String> options) {
try {
String o = options.get(NUM_SCANS_STRING_NAME);
if (o != null)
Integer.parseInt(o);
} catch (Exception e) {
throw new IllegalArgumentException("bad integer " + NUM_SCANS_STRING_NAME + ":" + options.get(NUM_SCANS_STRING_NAME), e);
}
return true;
}
}

Categories