com.ibm.msg.client.wmq.WMQConstants cannot rsolve - java

I am implementing an IBM MQ client using java class as follows;
import javax.jms.JMSException;
import com.ibm.msg.client.jms.JmsConnectionFactory;
import com.ibm.msg.client.jms.JmsFactoryFactory;
import com.ibm.msg.client.wmq.WMQConstants;
import javax.jms.JMSContext;
import javax.jms.Topic;
import javax.jms.Queue;
import javax.jms.JMSConsumer;
import javax.jms.Message;
import javax.jms.JMSProducer;
/*
* Implements both Subscriber and Publisher
*/
class SharedNonDurableSubscriberAndPublisher implements Runnable {
private Thread t;
private String threadName;
SharedNonDurableSubscriberAndPublisher( String name){
threadName = name;
System.out.println("Creating Thread:" + threadName );
}
/*
* Demonstrates shared non-durable subscription in JMS 2.0
*/
private void sharedNonDurableSubscriptionDemo(){
JmsConnectionFactory cf = null;
JMSContext msgContext = null;
try {
// Create Factory for WMQ JMS provider
JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.WMQ_PROVIDER);
// Create connection factory
cf = ff.createConnectionFactory();
// Set MQ properties
cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, "QM3");
cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_BINDINGS);
// Create message context
msgContext = cf.createContext();
// Create a topic destination
Topic fifaScores = msgContext.createTopic("/FIFA2014/UPDATES");
// Create a consumer. Subscription name specified, required for sharing of subscription.
JMSConsumer msgCons = msgContext.createSharedConsumer(fifaScores, "FIFA2014SUBID");
// Loop around to receive publications
while(true){
String msgBody=null;
// Use JMS 2.0 receiveBody method as we are interested in message body only.
msgBody = msgCons.receiveBody(String.class);
if(msgBody != null){
System.out.println(threadName + " : " + msgBody);
}
}
}catch(JMSException jmsEx){
System.out.println(jmsEx);
}
}
/*
* Publisher publishes match updates like current attendance in the stadium, goal score and ball possession by teams.
*/
private void matchUpdatePublisher(){
JmsConnectionFactory cf = null;
JMSContext msgContext = null;
int nederlandsGoals = 0;
int chileGoals = 0;
int stadiumAttendence = 23231;
int switchIndex = 0;
String msgBody = "";
int nederlandsHolding = 60;
int chileHolding = 40;
try {
// Create Factory for WMQ JMS provider
JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.WMQ_PROVIDER);
// Create connection factory
cf = ff.createConnectionFactory();
// Set MQ properties
cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, "QM3");
cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_BINDINGS);
// Create message context
msgContext = cf.createContext();
// Create a topic destination
Topic fifaScores = msgContext.createTopic("/FIFA2014/UPDATES");
// Create publisher to publish updates from stadium
JMSProducer msgProducer = msgContext.createProducer();
while(true){
// Send match updates
switch(switchIndex){
// Attendance
case 0:
msgBody ="Stadium Attendence " + stadiumAttendence;
stadiumAttendence += 314;
break;
// Goals
case 1:
msgBody ="SCORE: The Netherlands: " + nederlandsGoals + " - Chile:" + chileGoals;
break;
// Ball possession percentage
case 2:
msgBody ="Ball possession: The Netherlands: " + nederlandsHolding + "% - Chile: " + chileHolding + "%";
if((nederlandsHolding > 60) && (nederlandsHolding < 70)){
nederlandsHolding -= 2;
chileHolding += 2;
}else{
nederlandsHolding += 2;
chileHolding -= 2;
}
break;
}
// Publish and wait for two seconds to publish next update
msgProducer.send (fifaScores, msgBody);
try{
Thread.sleep(2000);
}catch(InterruptedException iex){
}
// Increment and reset the index if greater than 2
switchIndex++;
if(switchIndex > 2)
switchIndex = 0;
}
}catch(JMSException jmsEx){
System.out.println(jmsEx);
}
}
/*
* (non-Javadoc)
* #see java.lang.Runnable#run()
*/
public void run() {
// If this is a publisher thread
if(threadName == "PUBLISHER"){
matchUpdatePublisher();
}else{
// Create subscription and start receiving publications
sharedNonDurableSubscriptionDemo();
}
}
// Start thread
public void start (){
System.out.println("Starting " + threadName );
if (t == null)
{
t = new Thread (this, threadName);
t.start ();
}
}
}
I am new to IBM MQ and can't understand how to resolve following imports.
import com.ibm.msg.client.jms.JmsConnectionFactory;
import com.ibm.msg.client.jms.JmsFactoryFactory;
import com.ibm.msg.client.wmq.WMQConstants;
Just resolved other dependencies through jars. Please help on this.

You want 'com.ibm.mq.allclient.jar'. You can find this on disk if you have a queue manager installed or MQC8 support pac under INSTALL_DIR/java/lib.

I recently used IBM MQ classes for JMS to place messages onto queues and was successful. Please ensure you have the following jar files in your build path before compilation. You can either google this and download or if you have installed WebSphere MQ on your pc then go to the installation folder(WebSphere MQ)/Java/lib to find the jar files:
com.ibm.mq.jmqi.jar
com.ibm.mqjms.jar
jms.jar

Related

How to use network proxy when connecting to Microsoft Azure Media Services

When I run Microsoft Azure Media Services code written using Java in local it is working but when I deploy the same code in dev environment , I am unable to access the Azure and its throwing java.net.HostNotFoundException.
What is the best approach to use network proxy to connect to Azure
Below is the code I am using via java and using azure-java-sdk
import java.io.*;
import java.security.NoSuchAlgorithmException;
import java.util.EnumSet;
import com.microsoft.windowsazure.Configuration;
import com.microsoft.windowsazure.exception.ServiceException;
import com.microsoft.windowsazure.services.media.MediaConfiguration;
import com.microsoft.windowsazure.services.media.MediaContract;
import com.microsoft.windowsazure.services.media.MediaService;
import com.microsoft.windowsazure.services.media.WritableBlobContainerContract;
import com.microsoft.windowsazure.services.media.models.AccessPolicy;
import com.microsoft.windowsazure.services.media.models.AccessPolicyInfo;
import com.microsoft.windowsazure.services.media.models.AccessPolicyPermission;
import com.microsoft.windowsazure.services.media.models.Asset;
import com.microsoft.windowsazure.services.media.models.AssetFile;
import com.microsoft.windowsazure.services.media.models.AssetFileInfo;
import com.microsoft.windowsazure.services.media.models.AssetInfo;
import com.microsoft.windowsazure.services.media.models.Job;
import com.microsoft.windowsazure.services.media.models.JobInfo;
import com.microsoft.windowsazure.services.media.models.JobState;
import com.microsoft.windowsazure.services.media.models.ListResult;
import com.microsoft.windowsazure.services.media.models.Locator;
import com.microsoft.windowsazure.services.media.models.LocatorInfo;
import com.microsoft.windowsazure.services.media.models.LocatorType;
import com.microsoft.windowsazure.services.media.models.MediaProcessor;
import com.microsoft.windowsazure.services.media.models.MediaProcessorInfo;
import com.microsoft.windowsazure.services.media.models.Task;
public class HelloMediaServices
{
// Media Services account credentials configuration
private static String mediaServiceUri = "https://media.windows.net/API/";
private static String oAuthUri = "https://wamsprodglobal001acs.accesscontrol.windows.net/v2/OAuth2-13";
private static String clientId = "account name";
private static String clientSecret = "account key";
private static String scope = "urn:WindowsAzureMediaServices";
private static MediaContract mediaService;
// Encoder configuration
private static String preferedEncoder = "Media Encoder Standard";
private static String encodingPreset = "H264 Multiple Bitrate 720p";
public static void main(String[] args)
{
try {
// Set up the MediaContract object to call into the Media Services account
Configuration configuration = MediaConfiguration.configureWithOAuthAuthentication(
mediaServiceUri, oAuthUri, clientId, clientSecret, scope);
mediaService = MediaService.create(configuration);
// Upload a local file to an Asset
AssetInfo uploadAsset = uploadFileAndCreateAsset("BigBuckBunny.mp4");
System.out.println("Uploaded Asset Id: " + uploadAsset.getId());
// Transform the Asset
AssetInfo encodedAsset = encode(uploadAsset);
System.out.println("Encoded Asset Id: " + encodedAsset.getId());
// Create the Streaming Origin Locator
String url = getStreamingOriginLocator(encodedAsset);
System.out.println("Origin Locator URL: " + url);
System.out.println("Sample completed!");
} catch (ServiceException se) {
System.out.println("ServiceException encountered.");
System.out.println(se.toString());
} catch (Exception e) {
System.out.println("Exception encountered.");
System.out.println(e.toString());
}
}
private static AssetInfo uploadFileAndCreateAsset(String fileName)
throws ServiceException, FileNotFoundException, NoSuchAlgorithmException {
WritableBlobContainerContract uploader;
AssetInfo resultAsset;
AccessPolicyInfo uploadAccessPolicy;
LocatorInfo uploadLocator = null;
// Create an Asset
resultAsset = mediaService.create(Asset.create().setName(fileName).setAlternateId("altId"));
System.out.println("Created Asset " + fileName);
// Create an AccessPolicy that provides Write access for 15 minutes
uploadAccessPolicy = mediaService
.create(AccessPolicy.create("uploadAccessPolicy", 15.0, EnumSet.of(AccessPolicyPermission.WRITE)));
// Create a Locator using the AccessPolicy and Asset
uploadLocator = mediaService
.create(Locator.create(uploadAccessPolicy.getId(), resultAsset.getId(), LocatorType.SAS));
// Create the Blob Writer using the Locator
uploader = mediaService.createBlobWriter(uploadLocator);
File file = new File("BigBuckBunny.mp4");
// The local file that will be uploaded to your Media Services account
InputStream input = new FileInputStream(file);
System.out.println("Uploading " + fileName);
// Upload the local file to the asset
uploader.createBlockBlob(fileName, input);
// Inform Media Services about the uploaded files
mediaService.action(AssetFile.createFileInfos(resultAsset.getId()));
System.out.println("Uploaded Asset File " + fileName);
mediaService.delete(Locator.delete(uploadLocator.getId()));
mediaService.delete(AccessPolicy.delete(uploadAccessPolicy.getId()));
return resultAsset;
}
// Create a Job that contains a Task to transform the Asset
private static AssetInfo encode(AssetInfo assetToEncode)
throws ServiceException, InterruptedException {
// Retrieve the list of Media Processors that match the name
ListResult<MediaProcessorInfo> mediaProcessors = mediaService
.list(MediaProcessor.list().set("$filter", String.format("Name eq '%s'", preferedEncoder)));
// Use the latest version of the Media Processor
MediaProcessorInfo mediaProcessor = null;
for (MediaProcessorInfo info : mediaProcessors) {
if (null == mediaProcessor || info.getVersion().compareTo(mediaProcessor.getVersion()) > 0) {
mediaProcessor = info;
}
}
System.out.println("Using Media Processor: " + mediaProcessor.getName() + " " + mediaProcessor.getVersion());
// Create a task with the specified Media Processor
String outputAssetName = String.format("%s as %s", assetToEncode.getName(), encodingPreset);
String taskXml = "<taskBody><inputAsset>JobInputAsset(0)</inputAsset>"
+ "<outputAsset assetCreationOptions=\"0\"" // AssetCreationOptions.None
+ " assetName=\"" + outputAssetName + "\">JobOutputAsset(0)</outputAsset></taskBody>";
Task.CreateBatchOperation task = Task.create(mediaProcessor.getId(), taskXml)
.setConfiguration(encodingPreset).setName("Encoding");
// Create the Job; this automatically schedules and runs it.
Job.Creator jobCreator = Job.create()
.setName(String.format("Encoding %s to %s", assetToEncode.getName(), encodingPreset))
.addInputMediaAsset(assetToEncode.getId()).setPriority(2).addTaskCreator(task);
JobInfo job = mediaService.create(jobCreator);
String jobId = job.getId();
System.out.println("Created Job with Id: " + jobId);
// Check to see if the Job has completed
checkJobStatus(jobId);
// Done with the Job
// Retrieve the output Asset
ListResult<AssetInfo> outputAssets = mediaService.list(Asset.list(job.getOutputAssetsLink()));
return outputAssets.get(0);
}
public static String getStreamingOriginLocator(AssetInfo asset) throws ServiceException {
// Get the .ISM AssetFile
ListResult<AssetFileInfo> assetFiles = mediaService.list(AssetFile.list(asset.getAssetFilesLink()));
AssetFileInfo streamingAssetFile = null;
for (AssetFileInfo file : assetFiles) {
if (file.getName().toLowerCase().endsWith(".ism")) {
streamingAssetFile = file;
break;
}
}
AccessPolicyInfo originAccessPolicy;
LocatorInfo originLocator = null;
// Create a 30-day readonly AccessPolicy
double durationInMinutes = 60 * 24 * 30;
originAccessPolicy = mediaService.create(
AccessPolicy.create("Streaming policy", durationInMinutes, EnumSet.of(AccessPolicyPermission.READ)));
// Create a Locator using the AccessPolicy and Asset
originLocator = mediaService
.create(Locator.create(originAccessPolicy.getId(), asset.getId(), LocatorType.OnDemandOrigin));
// Create a Smooth Streaming base URL
return originLocator.getPath() + streamingAssetFile.getName() + "/manifest";
}
private static void checkJobStatus(String jobId) throws InterruptedException, ServiceException {
boolean done = false;
JobState jobState = null;
while (!done) {
// Sleep for 5 seconds
Thread.sleep(5000);
// Query the updated Job state
jobState = mediaService.get(Job.get(jobId)).getState();
System.out.println("Job state: " + jobState);
if (jobState == JobState.Finished || jobState == JobState.Canceled || jobState == JobState.Error) {
done = true;
}
}
}
}
I verified following code below which is working through fiddler proxy. Thanks to how to Capture https with fiddler, in java post which gave me hints:
System.setProperty("http.proxyHost", "127.0.0.1");
System.setProperty("https.proxyHost", "127.0.0.1");
System.setProperty("http.proxyPort", "8888");
System.setProperty("https.proxyPort", "8888");
System.setProperty("javax.net.ssl.trustStore", "C:\\Program Files\\Java\\jdk1.8.0_102\\bin\\FiddlerKeyStore");
System.setProperty("javax.net.ssl.trustStorePassword", "mypassword");
For others who face issue like me we can connect to azure mediaservices using network proxy by using below code
// Set up the MediaContract object to call into the Media Services account
Configuration configuration = MediaConfiguration.configureWithOAuthAuthentication(
mediaServiceUri, oAuthUri, clientId, clientSecret, scope);
configuration.getProperties().put(Configuration.PROPERTY_HTTP_PROXY_HOST, "Hostvalue");
configuration.getProperties().put(Configuration.PROPERTY_HTTP_PROXY_PORT, "Portvalue");
configuration.getProperties().put(Configuration.PROPERTY_HTTP_PROXY_SCHEME, "http");
MediaContract mediaService = MediaService.create(configuration);
Now use the mediaService to perform other operations.

CometD doesnt start Jetty server in Java Multithreading

I`m using CometD-3.0.2 in Java multi-threading application following this tutorial. Everything works fine when I create a service channel. I have created an object of my Socket Connection class, that also starts 10 Java Threads, in the CometD initialise class
public class Initializer extends GenericServlet
{
private MainConnect connect;
#Override
public void init() throws ServletException
{
connect = new MainConnect();
// Retrieve the CometD service instantiated by AnnotationCometdServlet
StockPriceService service = (StockPriceService)getServletContext().getAttribute(StockPriceService.class.getName());
// Register the service as a listener of the emitter
connect.getListeners().add(service);
System.out.println("Service Added!");
//Initiate the Threads
connect.InitiateConnection();
}
My Socket class gets initiated and java threads are running as well. However, the jetty server never get started here.
My Service class
#Service
public class StockPriceService implements StockPriceEmitter.Listener
{
#Inject
private BayeuxServer bayeuxServer;
#Session
private LocalSession sender;
public void onUpdates(List<StockPriceEmitter.Update> updates)
{
for (StockPriceEmitter.Update update : updates)
{
// Create the channel name using the stock symbol
String channelName = "/stock/" + update.getSymbol().toLowerCase(Locale.ENGLISH);
// Initialize the channel, making it persistent and lazy
bayeuxServer.createIfAbsent(channelName, new ConfigurableServerChannel.Initializer()
{
public void configureChannel(ConfigurableServerChannel channel)
{
channel.setPersistent(true);
channel.setLazy(true);
}
});
// Convert the Update business object to a CometD-friendly format
Map<String, Object> data = new HashMap<String, Object>(4);
data.put("symbol", update.getSymbol());
data.put("oldValue", update.getOldValue());
data.put("newValue", update.getNewValue());
// Publish to all subscribers
ServerChannel channel = bayeuxServer.getChannel(channelName);
channel.publish(sender, data, null);
System.out.println("Service hit!!");
}
}
}
Java multi-thread run method
if (workerID == 0) {
try {
System.out.println("Running");
Random random = new Random();
List<Update> updates = new ArrayList<Update>();
// Randomly choose how many stocks to update
int howMany = random.nextInt(symbols.size()) + 1;
for (int i = 0; i < howMany; ++i)
{
// Randomly choose which one to update
int which = random.nextInt(symbols.size());
String symbol = symbols.get(1);
float oldValue = values.get(symbol);
// Randomly choose how much to update
boolean sign = random.nextBoolean();
float howMuch = random.nextFloat();
float newValue = oldValue + (sign ? howMuch : -howMuch);
// Store the new value
values.put(symbol, newValue);
updates.add(new Update(symbol, oldValue, newValue));
// System.out.println("Updates from Emitter Class: " + updates);
}
// Notify the listeners
for (Listener listener : MainConnect.getListeners())
{
System.out.println("Listners value: " + listener);
listener.onUpdates(updates);
}
// Randomly choose how long for the next update
// We use a max delay of 1 second to simulate a high rate of updates
long howLong = random.nextInt(1000);
scheduler.schedule(this, howLong, TimeUnit.MILLISECONDS);
Thread.sleep(3000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
Overall scenario, Im implementing a Sever that uses Sockets to receive client A connection then receive data from clients. I need this data to stream to client B. Jetty server doesnt start when I initiate the Socket class/ open a socket connection.
Any information is much appreciated

How to register an agent from different platform to a different platform located remotely in JADE?

I have two pc's on which i am running agents.Both are connected by LAN(or wifi). I want these agents to communicate. One of the ways i found is by giving agent's full addresses.Below is the code snippet.
AID a = new AID("A#192.168.14.51:1099/JADE",AID.ISGUID);
a.addAddresses("http://192.168.14.51:7778/acc");
msg.addReceiver(a);
send(msg);
however Once i start agents at one platform, i want the agents on other platform to be able to register services on its yellow pages so that i can search for appropriate agent from a list of same.I looked but could not find anything about it. Please give me suggestion on how i can achieve this.
Well, you are looking for DF federation. As far as I understand, it is nothing but 'connecting' DFs.
There is an example in yelloPages package in 'jade all examples' folder. It creates register,subscriber,searcher and a subDF agent. registrer agent registers agent with soe property and other agents do their jobs. SubDF creates child DF which involves DF Federation. For you, I modified the code as this:
Next three agents run on port 1099 as:
1)
package examples.yellowPages;
import jade.core.Agent;
import jade.core.AID;
import jade.domain.DFService;
import jade.domain.FIPAException;
import jade.domain.FIPANames;
import jade.domain.FIPAAgentManagement.DFAgentDescription;
import jade.domain.FIPAAgentManagement.ServiceDescription;
import jade.domain.FIPAAgentManagement.Property;
/**
This example shows how to register an application specific service in the Yellow Pages
catalogue managed by the DF Agent so that other agents can dynamically discover it.
In this case in particular we register a "Weather-forecast" service for
Italy. The name of this service is specified as a command line argument.
#author Giovanni Caire - TILAB
*/
public class DFRegisterAgent extends Agent {
protected void setup() {
String serviceName = "unknown";
// Read the name of the service to register as an argument
Object[] args = getArguments();
if (args != null && args.length > 0) {
serviceName = (String) args[0];
}
// Register the service
System.out.println("Agent "+getLocalName()+" registering service \""+serviceName+"\" of type \"weather-forecast\"");
try {
DFAgentDescription dfd = new DFAgentDescription();
dfd.setName(getAID());
ServiceDescription sd = new ServiceDescription();
sd.setName(serviceName);
sd.setType("weather-forecast");
// Agents that want to use this service need to "know" the weather-forecast-ontology
sd.addOntologies("weather-forecast-ontology");
// Agents that want to use this service need to "speak" the FIPA-SL language
sd.addLanguages(FIPANames.ContentLanguage.FIPA_SL);
sd.addProperties(new Property("country", "Italy"));
dfd.addServices(sd);
DFService.register(this, dfd);
}
catch (FIPAException fe) {
fe.printStackTrace();
}
}
}
2)
package examples.yellowPages;
import jade.core.Agent;
import jade.core.AID;
import jade.domain.DFService;
import jade.domain.FIPAException;
import jade.domain.FIPANames;
import jade.domain.FIPAAgentManagement.DFAgentDescription;
import jade.domain.FIPAAgentManagement.ServiceDescription;
import jade.domain.FIPAAgentManagement.SearchConstraints;
import jade.util.leap.Iterator;
/**
This example shows how to search for services provided by other agents
and advertised in the Yellow Pages catalogue managed by the DF agent.
In this case in particular we search for agents providing a
"Weather-forecast" service.
#author Giovanni Caire - TILAB
*/
public class DFSearchAgent extends Agent {
protected void setup() {
// Search for services of type "weather-forecast"
System.out.println("Agent "+getLocalName()+" searching for services of type \"weather-forecast\"");
try {
// Build the description used as template for the search
DFAgentDescription template = new DFAgentDescription();
ServiceDescription templateSd = new ServiceDescription();
templateSd.setType("weather-forecast");
template.addServices(templateSd);
SearchConstraints sc = new SearchConstraints();
// We want to receive 10 results at most
sc.setMaxResults(new Long(10));
DFAgentDescription[] results = DFService.search(this, template, sc);
if (results.length > 0) {
System.out.println("Agent "+getLocalName()+" found the following weather-forecast services:");
for (int i = 0; i < results.length; ++i) {
DFAgentDescription dfd = results[i];
AID provider = dfd.getName();
// The same agent may provide several services; we are only interested
// in the weather-forcast one
Iterator it = dfd.getAllServices();
while (it.hasNext()) {
ServiceDescription sd = (ServiceDescription) it.next();
if (sd.getType().equals("weather-forecast")) {
System.out.println("- Service \""+sd.getName()+"\" provided by agent "+provider.getName());
}
}
}
}
else {
System.out.println("Agent "+getLocalName()+" did not find any weather-forecast service");
}
}
catch (FIPAException fe) {
fe.printStackTrace();
}
}
}
3)
package examples.yellowPages;
import jade.core.Agent;
import jade.core.AID;
import jade.domain.DFService;
import jade.domain.FIPAException;
import jade.domain.FIPANames;
import jade.domain.FIPAAgentManagement.DFAgentDescription;
import jade.domain.FIPAAgentManagement.ServiceDescription;
import jade.domain.FIPAAgentManagement.Property;
import jade.domain.FIPAAgentManagement.SearchConstraints;
import jade.proto.SubscriptionInitiator;
import jade.lang.acl.ACLMessage;
import jade.util.leap.Iterator;
/**
This example shows how to subscribe to the DF agent in order to be notified
each time a given service is published in the yellow pages catalogue.
In this case in particular we want to be informed whenever a service of type
"Weather-forecast" for Italy becomes available.
#author Giovanni Caire - TILAB
*/
public class DFSubscribeAgent extends Agent {
protected void setup() {
// Build the description used as template for the subscription
DFAgentDescription template = new DFAgentDescription();
ServiceDescription templateSd = new ServiceDescription();
templateSd.setType("weather-forecast");
templateSd.addProperties(new Property("country", "Italy"));
template.addServices(templateSd);
SearchConstraints sc = new SearchConstraints();
// We want to receive 10 results at most
sc.setMaxResults(new Long(10));
addBehaviour(new SubscriptionInitiator(this, DFService.createSubscriptionMessage(this, getDefaultDF(), template, sc)) {
protected void handleInform(ACLMessage inform) {
System.out.println("Agent "+getLocalName()+": Notification received from DF");
try {
DFAgentDescription[] results = DFService.decodeNotification(inform.getContent());
if (results.length > 0) {
for (int i = 0; i < results.length; ++i) {
DFAgentDescription dfd = results[i];
AID provider = dfd.getName();
// The same agent may provide several services; we are only interested
// in the weather-forcast one
Iterator it = dfd.getAllServices();
while (it.hasNext()) {
ServiceDescription sd = (ServiceDescription) it.next();
if (sd.getType().equals("weather-forecast")) {
System.out.println("Weather-forecast service for Italy found:");
System.out.println("- Service \""+sd.getName()+"\" provided by agent "+provider.getName());
}
}
}
}
System.out.println();
}
catch (FIPAException fe) {
fe.printStackTrace();
}
}
} );
}
}
4) This is the last one. It creates a DF and registers in DFRegister agent i.e. DF federation is done. I ran this on 1331 port. Remember to change IP addresses. (u can run agent on different port by using -local-port 1331.
Remember to run previous agents before this.
You can put it in different eclipse project and run it.
import jade.core.*;
import jade.core.behaviours.*;
import jade.domain.FIPAAgentManagement.*;
import jade.domain.FIPAException;
import jade.domain.DFService;
import jade.domain.FIPANames;
import jade.util.leap.Iterator;
/**
This is an example of an agent that plays the role of a sub-df by
automatically registering with a parent DF.
Notice that exactly the same might be done by using the GUI of the DF.
<p>
This SUBDF inherits all the functionalities of the default DF, including
its GUI.
#author Giovanni Rimassa - Universita` di Parma
#version $Date: 2003-12-03 17:57:03 +0100 (mer, 03 dic 2003) $ $Revision: 4638 $
*/
public class SubDF2 extends jade.domain.df {
public void setup() {
// Input df name
int len = 0;
byte[] buffer = new byte[1024];
try {
// AID parentName = getDefaultDF();
AID parentName = new AID("df#10.251.216.135:1099/JADE");
parentName.addAddresses("http://NikhilChilwant:7778/acc");
//Execute the setup of jade.domain.df which includes all the default behaviours of a df
//(i.e. register, unregister,modify, and search).
super.setup();
//Use this method to modify the current description of this df.
setDescriptionOfThisDF(getDescription());
//Show the default Gui of a df.
super.showGui();
DFService.register(this,parentName,getDescription());
addParent(parentName,getDescription());
System.out.println("Agent: " + getName() + " federated with default df.");
DFAgentDescription template = new DFAgentDescription();
ServiceDescription templateSd = new ServiceDescription();
templateSd.setType("weather-forecast");
templateSd.addProperties(new Property("country", "Italy"));
template.addServices(templateSd);
SearchConstraints sc = new SearchConstraints();
// We want to receive 10 results at most
sc.setMaxResults(new Long(10));
DFAgentDescription[] results = DFService.search(this,parentName, template, sc);
/* if (results.length > 0) {*/
System.out.println("SUB DF ***Agent "+getLocalName()+" found the following weather-forecast services:");
for (int i = 0; i < results.length; ++i) {
DFAgentDescription dfd = results[i];
AID provider = dfd.getName();
// The same agent may provide several services; we are only interested
// in the weather-forcast one
Iterator it = dfd.getAllServices();
while (it.hasNext()) {
ServiceDescription sd = (ServiceDescription) it.next();
if (sd.getType().equals("weather-forecast")) {
System.out.println("- Service \""+sd.getName()+"\" provided by agent "+provider.getName());
}
}
}/*}*/
String serviceName = "unknown2";
DFAgentDescription dfd = new DFAgentDescription();
dfd.setName(getAID());
ServiceDescription sd = new ServiceDescription();
sd.setName(serviceName);
sd.setType("weather-forecast2");
// Agents that want to use this service need to "know" the weather-forecast-ontology
sd.addOntologies("weather-forecast-ontology2");
// Agents that want to use this service need to "speak" the FIPA-SL language
sd.addLanguages(FIPANames.ContentLanguage.FIPA_SL);
sd.addProperties(new Property("country2", "Italy2"));
dfd.addServices(sd);
DFService.register(this, parentName,dfd);
}catch(FIPAException fe){fe.printStackTrace();}
}
private DFAgentDescription getDescription()
{
DFAgentDescription dfd = new DFAgentDescription();
dfd.setName(getAID());
ServiceDescription sd = new ServiceDescription();
sd.setName(getLocalName() + "-sub-df");
sd.setType("fipa-df");
sd.addProtocols(FIPANames.InteractionProtocol.FIPA_REQUEST);
sd.addOntologies("fipa-agent-management");
sd.setOwnership("JADE");
dfd.addServices(sd);
return dfd;
}
}
After running the code you can see that, subDF agent is able to find agent which is registered on its federated DF.
You can download complete code here also: http://tinyurl.com/Agent-on-different-platforms

Samples with JmDNS

I've been able to get the samples that come with JmDNS to compile and run, however I can't get any of the classes to discover my services.
I'm running a Windows environment with multiple PC's running VNC, SSH & Apache and I've been trying to get JmDNS to discover at least one of these...
What I ideally want is to be able to detect all running VNC servers on my network. Is there some sort of client and server pairing where I can only discover a service if I've registered it using JmDNS?
Any help getting some results out of the samples will be appreciated, the documentation isn't much help.
import java.io.IOException;
import java.util.logging.ConsoleHandler;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.jmdns.JmDNS;
import javax.jmdns.ServiceEvent;
import javax.jmdns.ServiceListener;
/**
* Sample Code for Service Discovery using JmDNS and a ServiceListener.
* <p>
* Run the main method of this class. It listens for HTTP services and lists all changes on System.out.
*
* #author Werner Randelshofer
*/
public class DiscoverServices {
static class SampleListener implements ServiceListener {
#Override
public void serviceAdded(ServiceEvent event) {
System.out.println("Service added : " + event.getName() + "." + event.getType());
}
#Override
public void serviceRemoved(ServiceEvent event) {
System.out.println("Service removed : " + event.getName() + "." + event.getType());
}
#Override
public void serviceResolved(ServiceEvent event) {
System.out.println("Service resolved: " + event.getInfo());
}
}
/**
* #param args
* the command line arguments
*/
public static void main(String[] args) {
try {
// Activate these lines to see log messages of JmDNS
boolean log = false;
if (log) {
Logger logger = Logger.getLogger(JmDNS.class.getName());
ConsoleHandler handler = new ConsoleHandler();
logger.addHandler(handler);
logger.setLevel(Level.FINER);
handler.setLevel(Level.FINER);
}
final JmDNS jmdns = JmDNS.create();
String type = "_http._tcp.local.";
if(args.length > 0) {
type = args[0];
}
jmdns.addServiceListener(type, new SampleListener());
System.out.println("Press q and Enter, to quit");
int b;
while ((b = System.in.read()) != -1 && (char) b != 'q') {
/* Stub */
}
jmdns.close();
System.out.println("Done");
} catch (IOException e) {
e.printStackTrace();
}
}
}
To discover a specific type of service, you need to know the correct service type name, check out DNS SRV (RFC 2782) Service Types:
String bonjourServiceType = "_http._tcp.local.";
bonjourService = JmDNS.create();
bonjourService.addServiceListener(bonjourServiceType, bonjourServiceListener);
ServiceInfo[] serviceInfos = bonjourService.list(bonjourServiceType);
for (ServiceInfo info : serviceInfos) {
System.out.println("## resolve service " + info.getName() + " : " + info.getURL());
}
bonjourService.close();
For VNC, use _rfb._tcp.local.
For SSH, use _ssh._tcp.local.
For Apache, use _http._tcp.local.

Is there a useDirtyFlag option for Tomcat 6 cluster configuration?

In Tomcat 5.0.x you had the ability to set useDirtyFlag="false" to force replication of the session after every request rather than checking for set/removeAttribute calls.
<Cluster className="org.apache.catalina.cluster.tcp.SimpleTcpCluster"
managerClassName="org.apache.catalina.cluster.session.SimpleTcpReplicationManager"
expireSessionsOnShutdown="false"
**useDirtyFlag="false"**
doClusterLog="true"
clusterLogName="clusterLog"> ...
The comments in the server.xml stated this may be used to make the following work:
<%
HashMap map = (HashMap)session.getAttribute("map");
map.put("key","value");
%>
i.e. change the state of an object that has already been put in the session and you can be sure that this object still be replicated to the other nodes in the cluster.
According to the Tomcat 6 documentation you only have two "Manager" options - DeltaManager & BackupManager ... neither of these seem to allow this option or anything like it. In my testing the default setup:
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
where you get the DeltaManager by default, it's definitely behaving as useDirtyFlag="true" (as I'd expect).
So my question is - is there an equivalent in Tomcat 6?
Looking at the source I can see a manager implementation "org.apache.catalina.ha.session.SimpleTcpReplicationManager" which does have the useDirtyFlag but the javadoc comments in this state it's "Tomcat Session Replication for Tomcat 4.0" ... I don't know if this is ok to use - I'm guessing not as it's not mentioned in the main cluster configuration documentation.
I posted essentially the same question on the tomcat-users mailing list and the responses to this along with some information in the tomcat bugzilla ([43866]) led me to the following conclusions:
There is no equivalent to the useDirtyFlag, if you're putting mutable (ie changing) objects in the session you need a custom coded solution.
A Tomcat ClusterValve seems to be an effecting place for this solution - plug into the cluster mechanism, manipulate attributes to make it appear to the DeltaManager that all attributes in the session have changed. This forces replication of the entire session.
Step 1: Write the ForceReplicationValve (extends ValveBase implements ClusterValve)
I won't include the whole class but the key bit of logic (taking out the logging and instanceof checking):
#Override
public void invoke(Request request, Response response)
throws IOException, ServletException {
getNext().invoke(request, response);
Session session = request.getSessionInternal();
HttpSession deltaSession = (HttpSession) session;
for (Enumeration<String> names = deltaSession.getAttributeNames();
names.hasMoreElements(); ) {
String name = names.nextElement();
deltaSession.setAttribute(name, deltaSession.getAttribute(name));
}
}
Step 2: Alter the cluster config (in conf/server.xml)
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"
channelSendOptions="8">
<Valve className="org.apache.catalina.ha.tcp.ForceReplicationValve"/>
<Valve className="org.apache.catalina.ha.tcp.ReplicationValve"
filter=".*\.gif;.*\.jpg;.*\.png;.*\.js;.*\.htm;.*\.html;.*\.txt;.*\.css;"/>
<Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>
<ClusterListener className="org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"/>
<ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>
</Cluster>
Replication of the session to all cluster nodes will now happen after every request.
Aside: Note the channelSendOptions setting. This replaces the replicationMode=asynchronous/synchronous/pooled from Tomcat 5.0.x. See the cluster documentation for the possible int values.
Appendix: Full Valve source as requested
package org.apache.catalina.ha.tcp;
import java.io.IOException;
import java.util.Enumeration;
import java.util.LinkedList;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpSession;
import org.apache.catalina.Lifecycle;
import org.apache.catalina.LifecycleException;
import org.apache.catalina.LifecycleListener;
import org.apache.catalina.Session;
import org.apache.catalina.connector.Request;
import org.apache.catalina.connector.Response;
import org.apache.catalina.ha.CatalinaCluster;
import org.apache.catalina.ha.ClusterValve;
import org.apache.catalina.ha.session.ReplicatedSession;
import org.apache.catalina.ha.session.SimpleTcpReplicationManager;
import org.apache.catalina.util.LifecycleSupport;
//import org.apache.catalina.util.StringManager;
import org.apache.catalina.valves.ValveBase;
/**
* <p>With the {#link SimpleTcpReplicationManager} effectively deprecated, this allows
* mutable objects to be replicated in the cluster by forcing the "dirty" status on
* every request.</p>
*
* #author Jon Brisbin (via post on tomcat-users http://markmail.org/thread/rdo3drcir75dzzrq)
* #author Kevin Jansz
*/
public class ForceReplicationValve extends ValveBase implements Lifecycle, ClusterValve {
private static org.apache.juli.logging.Log log =
org.apache.juli.logging.LogFactory.getLog( ForceReplicationValve.class );
#SuppressWarnings("hiding")
protected static final String info = "org.apache.catalina.ha.tcp.ForceReplicationValve/1.0";
// this could be used if ForceReplicationValve messages were setup
// in org/apache/catalina/ha/tcp/LocalStrings.properties
//
// /**
// * The StringManager for this package.
// */
// #SuppressWarnings("hiding")
// protected static StringManager sm =
// StringManager.getManager(Constants.Package);
/**
* Not actually required but this must implement {#link ClusterValve} to
* be allowed to be added to the Cluster.
*/
private CatalinaCluster cluster = null ;
/**
* Also not really required, implementing {#link Lifecycle} to allow
* initialisation and shutdown to be logged.
*/
protected LifecycleSupport lifecycle = new LifecycleSupport(this);
/**
* Default constructor
*/
public ForceReplicationValve() {
super();
if (log.isInfoEnabled()) {
log.info(getInfo() + ": created");
}
}
#Override
public String getInfo() {
return info;
}
#Override
public void invoke(Request request, Response response) throws IOException,
ServletException {
getNext().invoke(request, response);
Session session = null;
try {
session = request.getSessionInternal();
} catch (Throwable e) {
log.error(getInfo() + ": Unable to perform replication request.", e);
}
String context = request.getContext().getName();
String task = request.getPathInfo();
if(task == null) {
task = request.getRequestURI();
}
if (session != null) {
if (log.isDebugEnabled()) {
log.debug(getInfo() + ": [session=" + session.getId() + ", instanceof=" + session.getClass().getName() + ", context=" + context + ", request=" + task + "]");
}
if (session instanceof ReplicatedSession) {
// it's a SimpleTcpReplicationManager - can just set to dirty
((ReplicatedSession) session).setIsDirty(true);
if (log.isDebugEnabled()) {
log.debug(getInfo() + ": [session=" + session.getId() + ", context=" + context + ", request=" + task + "] maked DIRTY");
}
} else {
// for everything else - cycle all attributes
List cycledNames = new LinkedList();
// in a cluster where the app is <distributable/> this should be
// org.apache.catalina.ha.session.DeltaSession - implements HttpSession
HttpSession deltaSession = (HttpSession) session;
for (Enumeration<String> names = deltaSession.getAttributeNames(); names.hasMoreElements(); ) {
String name = names.nextElement();
deltaSession.setAttribute(name, deltaSession.getAttribute(name));
cycledNames.add(name);
}
if (log.isDebugEnabled()) {
log.debug(getInfo() + ": [session=" + session.getId() + ", context=" + context + ", request=" + task + "] cycled atrributes=" + cycledNames + "");
}
}
} else {
String id = request.getRequestedSessionId();
log.warn(getInfo() + ": [session=" + id + ", context=" + context + ", request=" + task + "] Session not available, unable to send session over cluster.");
}
}
/*
* ClusterValve methods - implemented to ensure this valve is not ignored by Cluster
*/
public CatalinaCluster getCluster() {
return cluster;
}
public void setCluster(CatalinaCluster cluster) {
this.cluster = cluster;
}
/*
* Lifecycle methods - currently implemented just for logging startup
*/
/**
* Add a lifecycle event listener to this component.
*
* #param listener The listener to add
*/
public void addLifecycleListener(LifecycleListener listener) {
lifecycle.addLifecycleListener(listener);
}
/**
* Get the lifecycle listeners associated with this lifecycle. If this
* Lifecycle has no listeners registered, a zero-length array is returned.
*/
public LifecycleListener[] findLifecycleListeners() {
return lifecycle.findLifecycleListeners();
}
/**
* Remove a lifecycle event listener from this component.
*
* #param listener The listener to remove
*/
public void removeLifecycleListener(LifecycleListener listener) {
lifecycle.removeLifecycleListener(listener);
}
public void start() throws LifecycleException {
lifecycle.fireLifecycleEvent(START_EVENT, null);
if (log.isInfoEnabled()) {
log.info(getInfo() + ": started");
}
}
public void stop() throws LifecycleException {
lifecycle.fireLifecycleEvent(STOP_EVENT, null);
if (log.isInfoEnabled()) {
log.info(getInfo() + ": stopped");
}
}
}
Many thanks to kevinjansz for providing the source for ForceReplicationValve.
I adjusted it for Tomcat7, here it is if anyone needs it:
package org.apache.catalina.ha.tcp;
import java.io.IOException;
import java.util.Enumeration;
import java.util.LinkedList;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpSession;
import org.apache.catalina.Lifecycle;
import org.apache.catalina.LifecycleException;
import org.apache.catalina.LifecycleListener;
import org.apache.catalina.Session;
import org.apache.catalina.connector.Request;
import org.apache.catalina.connector.Response;
import org.apache.catalina.ha.CatalinaCluster;
import org.apache.catalina.ha.ClusterValve;
import org.apache.catalina.util.LifecycleSupport;
import org.apache.catalina.valves.ValveBase;
import org.apache.catalina.LifecycleState;
// import org.apache.tomcat.util.res.StringManager;
/**
* <p>With the {#link SimpleTcpReplicationManager} effectively deprecated, this allows
* mutable objects to be replicated in the cluster by forcing the "dirty" status on
* every request.</p>
*
* #author Jon Brisbin (via post on tomcat-users http://markmail.org/thread/rdo3drcir75dzzrq)
* #author Kevin Jansz
*/
public class ForceReplicationValve extends ValveBase implements Lifecycle, ClusterValve {
private static org.apache.juli.logging.Log log =
org.apache.juli.logging.LogFactory.getLog( ForceReplicationValve.class );
#SuppressWarnings("hiding")
protected static final String info = "org.apache.catalina.ha.tcp.ForceReplicationValve/1.0";
// this could be used if ForceReplicationValve messages were setup
// in org/apache/catalina/ha/tcp/LocalStrings.properties
//
// /**
// * The StringManager for this package.
// */
// #SuppressWarnings("hiding")
// protected static StringManager sm =
// StringManager.getManager(Constants.Package);
/**
* Not actually required but this must implement {#link ClusterValve} to
* be allowed to be added to the Cluster.
*/
private CatalinaCluster cluster = null;
/**
* Also not really required, implementing {#link Lifecycle} to allow
* initialisation and shutdown to be logged.
*/
protected LifecycleSupport lifecycle = new LifecycleSupport(this);
/**
* Default constructor
*/
public ForceReplicationValve() {
super();
if (log.isInfoEnabled()) {
log.info(getInfo() + ": created");
}
}
#Override
public String getInfo() {
return info;
}
#Override
public void invoke(Request request, Response response) throws IOException,
ServletException {
getNext().invoke(request, response);
Session session = null;
try {
session = request.getSessionInternal();
} catch (Throwable e) {
log.error(getInfo() + ": Unable to perform replication request.", e);
}
String context = request.getContext().getName();
String task = request.getPathInfo();
if(task == null) {
task = request.getRequestURI();
}
if (session != null) {
if (log.isDebugEnabled()) {
log.debug(getInfo() + ": [session=" + session.getId() + ", instanceof=" + session.getClass().getName() + ", context=" + context + ", request=" + task + "]");
}
//cycle all attributes
List<String> cycledNames = new LinkedList<String>();
// in a cluster where the app is <distributable/> this should be
// org.apache.catalina.ha.session.DeltaSession - implements HttpSession
HttpSession deltaSession = (HttpSession) session;
for (Enumeration<String> names = deltaSession.getAttributeNames(); names.hasMoreElements(); ) {
String name = names.nextElement();
deltaSession.setAttribute(name, deltaSession.getAttribute(name));
cycledNames.add(name);
}
if (log.isDebugEnabled()) {
log.debug(getInfo() + ": [session=" + session.getId() + ", context=" + context + ", request=" + task + "] cycled atrributes=" + cycledNames + "");
}
} else {
String id = request.getRequestedSessionId();
log.warn(getInfo() + ": [session=" + id + ", context=" + context + ", request=" + task + "] Session not available, unable to send session over cluster.");
}
}
/*
* ClusterValve methods - implemented to ensure this valve is not ignored by Cluster
*/
public CatalinaCluster getCluster() {
return cluster;
}
public void setCluster(CatalinaCluster cluster) {
this.cluster = cluster;
}
/*
* Lifecycle methods - currently implemented just for logging startup
*/
/**
* Add a lifecycle event listener to this component.
*
* #param listener The listener to add
*/
public void addLifecycleListener(LifecycleListener listener) {
lifecycle.addLifecycleListener(listener);
}
/**
* Get the lifecycle listeners associated with this lifecycle. If this
* Lifecycle has no listeners registered, a zero-length array is returned.
*/
public LifecycleListener[] findLifecycleListeners() {
return lifecycle.findLifecycleListeners();
}
/**
* Remove a lifecycle event listener from this component.
*
* #param listener The listener to remove
*/
public void removeLifecycleListener(LifecycleListener listener) {
lifecycle.removeLifecycleListener(listener);
}
protected synchronized void startInternal() throws LifecycleException {
setState(LifecycleState.STARTING);
if (log.isInfoEnabled()) {
log.info(getInfo() + ": started");
}
}
protected synchronized void stopInternal() throws LifecycleException {
setState(LifecycleState.STOPPING);
if (log.isInfoEnabled()) {
log.info(getInfo() + ": stopped");
}
}
}

Categories