Java Extract count based on column data from csv file - java

I have my below Java code and TestData.csv (Input file)
and my expected output is like below. But it show actual count
I tried lot. Any one hava any idea on this. Any help is valuable one. Based on column data I want the count for the particular value.
package com;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Arrays;
import com.opencsv.CSVWriter;
import com.opencsv.CSVReader;
import java.time.format.DateTimeFormatter;
import java.time.LocalDateTime;
public class TestDataProcess {
public static void main(String args[]) throws IOException {
processData();
}
public static void processData() {
String[] trafficDetails;
int locColumnPosition, subCcolumnPosition, j, i, msgTypePosition, k, m, trafficLevelPosition;
String masterCSVFile, dayFolderPath;
String[] countryID = { "LOC1" };
String[] subID = { "S1" };
String[] mType = { "MSG1" };
String[] trafficLevel = { "1", "2", "3" };
String columnNameLocation = "CountryID";
String columnNameSubsystem = "SubID";
String columnNameMsgType = "Type";
String columnNameAlrmLevel = "TrafficLevel";
masterCSVFile = "D:\\TestData.csv";
dayFolderPath = "D:\\output\\";
DateTimeFormatter dtf = DateTimeFormatter.ofPattern("dd_MM_yyyy");
LocalDateTime now = LocalDateTime.now();
System.out.println(dtf.format(now));
int count = 0;
for (i = 0; i < countryID.length; i++) {
count = 0;
for (j = 0; j < subID.length; j++) {
count = 0;
String locaIdSubsysId = dtf.format(now) + "_" + countryID[i] + "_" + subID[j] + ".csv";
try (CSVWriter csvWriter = new CSVWriter(new FileWriter(dayFolderPath + locaIdSubsysId, true));
CSVReader csvReader = new CSVReader(new FileReader(masterCSVFile));) {
trafficDetails = csvReader.readNext();
csvWriter.writeNext(trafficDetails);
locColumnPosition = getHeaderLocation(trafficDetails, columnNameLocation);
subCcolumnPosition = getHeaderLocation(trafficDetails, columnNameSubsystem);
msgTypePosition = getHeaderLocation(trafficDetails, columnNameMsgType);
trafficLevelPosition = getHeaderLocation(trafficDetails, columnNameAlrmLevel);
while ((trafficDetails = csvReader.readNext()) != null && locColumnPosition > -1
&& subCcolumnPosition > -1) {
for (k = 0; k < mType.length; k++) {
for (m = 0; m < trafficLevel.length; m++) {
if (trafficDetails[locColumnPosition].matches(countryID[i])
& trafficDetails[subCcolumnPosition].matches(subID[j])
& trafficDetails[trafficLevelPosition].matches(trafficLevel[m])
& trafficDetails[msgTypePosition].matches(mType[k]))
{
count = count + 1;
csvWriter.writeNext(trafficDetails);
}
}
}
}
} catch (Exception ee) {
ee.printStackTrace();
}
}
}
}
public static int getHeaderLocation(String[] headers, String columnName) {
return Arrays.asList(headers).indexOf(columnName);
}
}

You can have that using a Map to store the traffic level as a key and all the rows from your csv file in a List as its value. Then just print the size of the List.
See the following example and have a look at the code comments:
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
public class ExampleMain {
public static void main(String[] args) {
// create a Path object from the path to your file
Path csvFilePath = Paths.get("Y:\\our\\path\\to\\file.csv");
// create a data structure that stores data rows per traffic level
Map<Integer, List<DataRow>> dataRowsPerTrafficLevel = new TreeMap<Integer, List<DataRow>>();
try {
// read all the lines of the file
List<String> lines = Files.readAllLines(csvFilePath);
// iterate all the lines, skipping the header line
for (int i = 1; i < lines.size(); i++) {
// split the lines by the separator (WHICH MAY DIFFER FROM THE ONE USED HERE)
String[] lineValues = lines.get(i).split(",");
// store the value from column 6 (index 5) as the traffic level
int trafficLevel = Integer.valueOf(lineValues[5]);
// if the map already contains this key, just add the next data row
if (dataRowsPerTrafficLevel.containsKey(trafficLevel)) {
DataRow dataRow = new DataRow();
dataRow.subId = lineValues[1];
dataRow.countryId = lineValues[2];
dataRow.type = lineValues[3];
dataRowsPerTrafficLevel.get(trafficLevel).add(dataRow);
} else {
/* otherwise create a list, then a data row, add it to the list and put it in
* the map along with the new key
*/
List<DataRow> dataRows = new ArrayList<DataRow>();
DataRow dataRow = new DataRow();
dataRow.subId = lineValues[1];
dataRow.countryId = lineValues[2];
dataRow.type = lineValues[3];
dataRows.add(dataRow);
dataRowsPerTrafficLevel.put(trafficLevel, dataRows);
}
}
// print the result
dataRowsPerTrafficLevel.forEach((trafficLevel, dataRows) -> {
System.out.println("For TrafficLevel " + trafficLevel + " there are " + dataRows.size()
+ " data rows in the csv file");
});
} catch (IOException e) {
e.printStackTrace();
}
}
/*
* small holder class that just holds the values of columns 3, 4 and 5.
* If you want to have distinct values, make this one a full POJO implementing Comparable
*/
static class DataRow {
String subId;
String countryId;
String type;
}

Related

Import Data as Array from TXT File in Java

I want to import a data like this from .txt file using Java.
I want to import this as an Array. The class of data is like this:
class SolidStateDrive {
String brand;
String model;
int capacityInGB;
}
So if the array from txt file is hardcoded, that would be like this:
ssd[0].brand = Samsung;
ssd[1].brand = Adata;
ssd[0].model = Evo970;
ssd[1].model = SU650;
ssd[0].capacityInGB = 512;
ssd[1].capacityInGB 240;
The problem is, when i try to read the .txt file, it only can store 1 data from first line. If more than one, it will give error ArrayOutOfBound Exception.
Im using while loop so that as long as the nextLine is not null, it will loop. This is my code:
int n = 0;
SolidStateDrive[] ssd = new SolidStateDrive[n+1];
try {
BufferedReader br = new BufferedReader(new FileReader("SSD.txt"));
String line = null;
while((line = br.readLine()) != null) {
String tmp[] = line.split("\t");
ssd[n] = new SolidStateDrive();
ssd[n].brand = tmp[0];
ssd[n].model = tmp[1];
ssd[n].capacityInGB = Integer.parseInt(tmp[2]);
n++;
}
br.close();
} catch(IOException ex) {
ex.printStackTrace();
}
Update : i already tried this but doesnt work either
SolidStateDrive[] ssd = new SolidStateDrive[2];
For complete code in that file : pastebin
The problem is in splitting line String tmp[] = line.split("\t");.
Take line one from txt ssd[0].brand = Samsung; output of split is same as input ssd[0].brand = Samsung;.
while ((line = br.readLine()) != null) {
String tmp[] = line.split("\t");
ssd[n] = new SolidStateDrive();
ssd[n].brand = tmp[0];
ssd[n].model = tmp[1];
ssd[n].capacityInGB = Integer.parseInt(tmp[2]);
n++;
}
So the tmp[] will contain only tmp[0] = ssd[0].brand = Samsung;.
When you try to access tmp[1] you will get
Exception in thread "main" java.lang.ArrayIndexOutOfBoundsException: Index 1 out of bounds for length 1.
The solution to your problem,
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class ReadData {
public static void main(String[] str) {
readData();
}
public static void readData() {
List<SolidStateDrive> ssd = new ArrayList<>();
try (BufferedReader br = new BufferedReader(new FileReader("SSD.txt"))) {
ssd = br.lines().map(s-> {
String[] tmp = s.split("\\s+");
return new SolidStateDrive(tmp[0], tmp[1], Integer.parseInt(tmp[2]));
}).collect(Collectors.toList());
} catch (IOException ex) {
ex.printStackTrace();
}
ssd.stream().forEach(System.out::println);
}
}
class SolidStateDrive {
String brand;
String model;
int capacityInGB;
public SolidStateDrive(String brand, String model, int capacityInGB) {
super();
this.brand = brand;
this.model = model;
this.capacityInGB = capacityInGB;
}
#Override
public String toString() {
return "SolidStateDrive [brand=" + brand + ", model=" + model + ", capacityInGB=" + capacityInGB + "]";
}
}
The ArrayOutOfBounds exception occurs in the code you posted in pastebin; you have a for loop that loops n+1 times, which will always be 1 more than the number of SSDs in the array. This is the offending for loop:
for (int j=0; j<=n; j++) {
System.out.println(j+ "\t" +ssd[j].brand+ "\t" +ssd[j].model+ "\t" +ssd[j].capacityInGB);
}
To fix it, just change the <= to <, so the loop goes up to, but not including, n, since you started it at 0, not 1. It should look like this:
for (int j = 0; j < n; j++) {
System.out.println(j + "\t" + ssd[j].brand + "\t" + ssd[j].model + "\t" + ssd[j].capacityInGB);
}

Hadoop mapreduce job create too big intermediate files

I wrote a mapreduce program, but when I tried to run on hadoop it can't succeed since it generates that much amount of intermediate data that I get an error message: the node has no more space on it. After it tries with the second node, but the result is the same. I would like process two text files: approximately ~60k lines.
I have tried:
- enable snappy compression, but it didn't help.
- add more space, so the two node have 50-50gb storage
Since none of them are helped maybe the problem is with the code, not with the setup.
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class FirstMapper extends Mapper<LongWritable, Text, Text, Text> {
enum POS_TAG {
CC, CD, DT, EX,
FW, IN, JJ, JJR,
JJS, LS, MD, NN,
NNS, NNP, NNPS, PDT,
WDT, WP, POS, PRP,
PRP$, RB, RBR, RBS,
RP, SYM, TO, UH,
VB, VBD, VBG, VBN,
VBP, VBZ, WP$, WRB
}
private static final List<String> tags = Stream.of(POS_TAG.values())
.map(Enum::name)
.collect(Collectors.toList());
private static final int MAX_NGRAM = 5;
private static String[][] cands = {
new String[3],
new String[10],
new String[32],
new String[10]
};
#Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
String location = conf.get("job.cands.path");
if (location != null) {
BufferedReader br = null;
try {
FileSystem fs = FileSystem.get(conf);
Path path = new Path(location);
if (fs.exists(path)) {
FSDataInputStream fis = fs.open(path);
br = new BufferedReader(new InputStreamReader(fis));
String line;
int i = 0;
while ((line = br.readLine()) != null) {
String[] splitted = line.split(" ");
cands[i] = splitted;
i++;
}
}
} catch (IOException e) {
//
} finally {
br.close();
}
}
}
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String[] tokens = value.toString().split(" ");
int m = tokens.length;
for (int n = 2; n <= MAX_NGRAM; n++) {
for (int s = 0; s <= m - n; s++) {
for (int i = 0; i < cands[n - 2].length; i++) {
List<String> pattern = new ArrayList<>();
List<String> metWords = new ArrayList<>();
for (int j = 0; j <= n - 1; j++) {
String[] pair = tokens[s + j].split("/");
String word = pair[0];
String pos = pair[1];
char c = cands[n - 2][i].charAt(j);
addToPattern(word, pos, c, pattern);
if (c > 0 && tags.contains(pos)) {
metWords.add(word);
}
}
if (metWords.isEmpty()) {
metWords.add("_NONE");
}
Text resultKey = new Text(pattern.toString() + ";" + metWords.toString());
context.write(resultKey, new Text(key.toString()));
}
}
}
}
public void addToPattern(String word, String pos, char c, List<String> pattern) {
switch (c) {
case 'w':
pattern.add(word);
break;
case 'p':
pattern.add(pos);
break;
default:
pattern.add("_WC_");
break;
}
}
}
public class Main {
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.set("job.cands.path", "/user/thelfter/pwp");
Job job1 = Job.getInstance(conf, "word pattern1");
job1.setJarByClass(Main.class);
job1.setMapperClass(FirstMapper.class);
job1.setCombinerClass(FirstReducer.class);
job1.setReducerClass(FirstReducer.class);
job1.setMapOutputKeyClass(Text.class);
job1.setMapOutputValueClass(Text.class);
FileInputFormat.addInputPath(job1, new Path(args[0]));
FileOutputFormat.setOutputPath(job1, new Path("/user/thelfter/output"));
System.exit(job1.waitForCompletion(true) ? 0 : 1);
}
}
If your using YARN, then the Node Manager's disk space is controlled by yarn.nodemanager.local-dirs in your yarn-site.xml file so whatever that is pointing to needs to have enough disk space.

counting the number of occurences of each word in a pdf file java

I am making a java program using PDFbox that reads any pdf file and counts how many times each word appears in the file but for some reason nothing appears when I run the program, I expect it to print each word and the number of occurrences of that word next to it. thanks in advance.
here is my code:
package lab8;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import java.util.Scanner;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
public class Extractor {
public static void main(String[] args) throws FileNotFoundException {
Map<String, Integer> frequencies = new TreeMap<String, Integer>();
PDDocument pd;
File input = new File("C:\\Users\\Ammar\\Desktop\\Application.pdf");
Scanner in = new Scanner(input);
try {
pd = PDDocument.load(input);
PDFTextStripper stripper = new PDFTextStripper();
stripper.setEndPage(20);
String text = stripper.getText(pd);
while (in.hasNext()) {
String word = clean(in.next());
if (word != "") {
Integer count = frequencies.get(word);
if (count == null) {
count = 1;
} else {
count = count + 1;
}
frequencies.put(word, count);
}
}
for (String key : frequencies.keySet()) {
System.out.println(key + ": " + frequencies.get(key));
}
if (pd != null) {
pd.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
private static String clean(String s) {
String r = "";
for (int i = 0; i < s.length(); i++) {
char c = s.charAt(i);
if (Character.isLetter(c)) {
r = r + c;
}
}
return r.toLowerCase();
}
}
I have tried to resolve the logic.
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.pdfbox.pdmodel.PDDocument;
import org.apache.pdfbox.text.PDFTextStripper;
public class Extractor {
public static void main(String[] args) throws FileNotFoundException {
Map<String, Integer> wordFrequencies = new TreeMap<String, Integer>();
Map<Character, Integer> charFrequencies = new TreeMap<Character, Integer>();
PDDocument pd;
File input = new File("C:\\Users\\Ammar\\Desktop\\Application.pdf");
try {
pd = PDDocument.load(input);
PDFTextStripper stripper = new PDFTextStripper();
stripper.setEndPage(20);
String text = stripper.getText(pd);
for(int i=0; i<text.length(); i++)
{
char c = text.charAt(i);
int count = charFrequencies.get(c) != null ? (charFrequencies.get(c)) + 1 : 1;
charFrequencies.put(c, count);
}
String[] texts = text.split(" ");
for (String txt : texts) {
int count = wordFrequencies.get(txt) != null ? (wordFrequencies.get(txt)) + 1 : 1;
wordFrequencies.put(txt, count);
}
System.out.println("Printing the number of words");
for (String key : wordFrequencies.keySet()) {
System.out.println(key + ": " + wordFrequencies.get(key));
}
System.out.println("Printing the number of characters");
for (char charKey : charFrequencies.keySet()) {
System.out.println(charKey + ": " + charFrequencies.get(charKey));
}
if (pd != null) {
pd.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
Try this code. If there is still some problem and you are not able to resolve. I can try to resolve.
In your code you can also use StringTokenizer's object by passing your string i.e
StringTokenizer st = new StringTokenizer(stripper.getText(pd));
And in while loop st.hasMoreTokens() and to render each word String word = clean(st.nextToken()); This is also working fine.

Compare two ArrayLists and print data of merged arraylist based on condition

The two files that I use are
1. Details.txt
Code|Account No.|City
100051001|999001|Delhi
200051003|999002|Kanpur
180051012|999003|Jammu
2.Sal.txt
Code|Amount
100051001|100
200051001|200
180051012|123
Output should be
100051001 999001 Amount 100 INR Delhi
180051012 999003 Amount 123 INR Jammu
The code that i have tried is as under:-
//Code to get the output
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
public class compareArrayList {
public static void main(String[] args) throws FileNotFoundException {
String credit_file_name ="C:/transfer/Sal.txt";
String file_branches ="C:/transfer/Details.txt";
BufferedReader reader0;
BufferedReader reader1;
ArrayList<String> branch_list = new ArrayList<String>();
ArrayList<String> credit_file = new ArrayList<String>();
try {
reader0 = new BufferedReader(new FileReader(file_branches));
reader1 = new BufferedReader(new FileReader(credit_file_name));
String data ="";
String acct_num = "";
String city ="";
String amount1 ="";
String line0 = reader0.readLine();
String line1 = reader1.readLine();
while (line0 != null) {
branch_list.add(line0);
line0 = reader0.readLine();
}
Collections.sort(branch_list);
reader0.close();
while (line1 != null) {
credit_file.add(line1);
line1 = reader1.readLine();
}
Collections.sort(credit_file);
reader1.close();
for ( int i = 0; i < branch_list.size(); i++){
for (int j = 0; j < credit_file.size(); j++) {
int firstIndex0 = branch_list.get(i).indexOf('|',1);
int secondIndex0 = branch_list.get(i).indexOf('|', firstIndex0 +1);
int firstIndex1 = credit_file.get(j).indexOf('|',1);
acct_num = branch_list.get(i).substring(firstIndex0+1, secondIndex0);
city = branch_list.get(i).substring(secondIndex0+1, branch_list.get(i).length());
amount1 = credit_file.get(j).substring(firstIndex1+1, credit_file.get(j).length());
if ( branch_list.get(i).substring(0, 9).equals(credit_file.get(j).substring(0, 9)))
data = branch_list.get(i).substring(0, 9) + " " + acct_num + " Amount " + amount1 + " INR " + city;
}
System.out.println(data);
}
}
catch (IOException e) {
e.printStackTrace();
}
}
}
Output that i am getting is as under:-
100051001 999001 Amount 100 INR Delhi
180051012 999003 Amount 123 INR Jammu
180051012 999003 Amount 123 INR Jammu
There is duplicate of records in this.
You can use map to eliminate duplicates values. I have assumed that acc_no is unique so modified code is:-
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class compareArrayList {
public static void main(String[] args) throws FileNotFoundException {
String credit_file_name = "C:/transfer/Sal.txt";
String file_branches = "C:/transfer/Details.txt";
BufferedReader reader0;
BufferedReader reader1;
ArrayList<String> branch_list = new ArrayList<String>();
ArrayList<String> credit_file = new ArrayList<String>();
try {
reader0 = new BufferedReader(new FileReader(file_branches));
reader1 = new BufferedReader(new FileReader(credit_file_name));
String data = "";
Long acct_num = null;
String city = "";
String amount1 = "";
String line0 = reader0.readLine();
String line1 = reader1.readLine();
Map<Long, String> maps = new HashMap<>();
while (line0 != null) {
branch_list.add(line0);
line0 = reader0.readLine();
}
Collections.sort(branch_list);
reader0.close();
while (line1 != null) {
credit_file.add(line1);
line1 = reader1.readLine();
}
Collections.sort(credit_file);
reader1.close();
for (int i = 0; i < branch_list.size(); i++) {
for (int j = 0; j < credit_file.size(); j++) {
int firstIndex0 = branch_list.get(i).indexOf('|', 1);
int secondIndex0 = branch_list.get(i).indexOf('|', firstIndex0 + 1);
int firstIndex1 = credit_file.get(j).indexOf('|', 1);
acct_num = new Long(branch_list.get(i).substring(firstIndex0 + 1, secondIndex0));
city = branch_list.get(i).substring(secondIndex0 + 1, branch_list.get(i).length());
amount1 = credit_file.get(j).substring(firstIndex1 + 1, credit_file.get(j).length());
if (branch_list.get(i).substring(0, 9).equals(credit_file.get(j).substring(0, 9)))
data = branch_list.get(i).substring(0, 9) + " " + acct_num + " Amount " + amount1 + " INR " + city;
maps.put(acct_num, data);
}
//System.out.println(data);
}
for (String value : maps.values())
System.out.println(value);
} catch (IOException e) {
e.printStackTrace();
}
}
}
This is the approach I will take for solving this type of problem.
I would use a library that can read CSV file like supercsv or
opencsv to read the files and convert them into Java objects.
Create a Java class that can represent your data
Create a map and add the items from the first file with acct_num as key
Iterate through the items from the second file and update the map

prediction method is giving error in java-weka integration

package demo_thesis;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import weka.classifiers.Classifier;
import weka.classifiers.Evaluation;
import weka.classifiers.evaluation.NominalPrediction;
import weka.classifiers.rules.DecisionTable;
import weka.classifiers.rules.PART;
import weka.classifiers.trees.DecisionStump;
import weka.classifiers.trees.J48;
import weka.core.FastVector;
import weka.core.Instances;
public class WekaTest {
public static BufferedReader readDataFile(String filename) {
BufferedReader inputReader = null;
try {
inputReader = new BufferedReader(new FileReader(filename));
} catch (FileNotFoundException ex) {
System.err.println("File not found: " + filename);
}
return inputReader;
}
public static Evaluation classify(Classifier model,
Instances trainingSet, Instances testingSet) throws Exception {
Evaluation evaluation = new Evaluation(trainingSet);
model.buildClassifier(trainingSet);
evaluation.evaluateModel(model, testingSet);
return evaluation;
}
public static double calculateAccuracy(FastVector predictions) {
double correct = 0;
for (int i = 0; i < predictions.size(); i++) {
NominalPrediction np = (NominalPrediction) predictions.elementAt(i);
if (np.predicted() == np.actual()) {
correct++;
}
}
return 100 * correct / predictions.size();
}
public static Instances[][] crossValidationSplit(Instances data, int numberOfFolds) {
Instances[][] split = new Instances[2][numberOfFolds];
for (int i = 0; i < n`enter code here`umberOfFolds; i++) {
split[0][i] = data.trainCV(numberOfFolds, i);
split[1][i] = data.testCV(numberOfFolds, i);
}
return split;
}
public static void main(String[] args) throws Exception {
BufferedReader datafile = readDataFile("C:\\Users\\user\\Desktop\\demo_thesis\\src\\input_file\\weather.txt");
Instances data = new Instances(datafile);
data.setClassIndex(data.numAttributes() - 1);
// Do 10-split cross validation
Instances[][] split = crossValidationSplit(data, 10);
// Separate split into training and testing arrays
Instances[] trainingSplits = split[0];
Instances[] testingSplits = split[1];
// Use a set of classifiers
Classifier[] models = {
new J48(), // a decision tree
new PART(),
new DecisionTable(),//decision table majority classifier
new DecisionStump() //one-level decision tree
};
// Run for each model
for (int j = 0; j < models.length; j++) {
// Collect every group of predictions for current model in a FastVector
FastVector predictions = new FastVector();
// For each training-testing split pair, train and test the classifier
for (int i = 0; i < trainingSplits.length; i++) {
Evaluation validation = classify(models[j], trainingSplits[i], testingSplits[i]);
predictions.appendElements(validation.predictions());
// Uncomment to see the summary for each training-testing pair.
System.out.println(models[j].toString());
}
// Calculate overall accuracy of current classifier on all splits
double accuracy = calculateAccuracy(predictions);
// Print current classifier's name and accuracy in a complicated,
// but nice-looking way.
System.out.println("Accuracy of " + models[j].getClass().getSimpleName() + ": "
+ String.format("%.2f%%", accuracy)
+ "\n---------------------------------");
}
}
}
I have integrated weka jar file to a java package. i have used this code.but there is a error show in the line predictions.appendElements(validation.predictions()); Giving a error like "cannot find symbol symbol: method predictions()".I have used Netbeans IDE 8.2 and Jdk 1.8. I have tried in many ways but can not solve it. how should I solve the error??

Categories