Remove duplicate strings from an arraylist<Object> - java

My program is opening a file and then saves its words and their byte distance from the file beginning . Though the file has too many duplicate words that i don't want . Also i want my list to be in alphabetical order . The problem is that when i fix the order the duplicate are messed and vice versa . Here is my code:
import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Set;
class MyMain {
public static void main(String[] args) throws IOException {
ArrayList<DictPage> listOfWords = new ArrayList<DictPage>();
LinkedList<Page> Eurethrio = new LinkedList<Page>();
File file = new File("C:\\Kennedy.txt");
BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(file)));
//This will reference one line at a time...
String line = null;
int line_count=0;
int byte_count;
int total_byte_count=0;
int fromIndex;
int kat = 0;
while( (line = br.readLine())!= null ){
line_count++;
fromIndex=0;
String [] tokens = line.split(",\\s+|\\s*\\\"\\s*|\\s+|\\.\\s*|\\s*\\:\\s*");
String line_rest=line;
for (int i=1; i <= tokens.length; i++) {
byte_count = line_rest.indexOf(tokens[i-1]);
//if ( tokens[i-1].length() != 0)
//System.out.println("\n(line:" + line_count + ", word:" + i + ", start_byte:" + (total_byte_count + fromIndex) + "' word_length:" + tokens[i-1].length() + ") = " + tokens[i-1]);
fromIndex = fromIndex + byte_count + 1 + tokens[i-1].length();
if (fromIndex < line.length())
line_rest = line.substring(fromIndex);
if(!listOfWords.contains(tokens[i-1])){//Na mhn apothikevetai h idia leksh
//listOfWords.add(tokens[i-1]);
listOfWords.add(new DictPage(tokens[i-1],kat));
kat++;
}
Eurethrio.add(new Page("Kennedy",fromIndex));
}
total_byte_count += fromIndex;
Eurethrio.add(new Page("Kennedy", total_byte_count));
}
Set<DictPage> hs = new HashSet<DictPage>();
hs.addAll(listOfWords);
listOfWords.clear();
listOfWords.addAll(hs);
if (listOfWords.size() > 0) {
Collections.sort(listOfWords, new Comparator<DictPage>() {
#Override
public int compare(final DictPage object1, final DictPage object2) {
return object1.getWord().compareTo(object2.getWord());
}
} );
}
//Ektypwsh leksewn...
for (int i = 0; i<listOfWords.size();i++){
System.out.println(""+listOfWords.get(i).getWord()+" "+listOfWords.get(i).getPage());
}
for (int i = 0;i<Eurethrio.size();i++){
System.out.println(""+Eurethrio.get(i).getFile()+" "+Eurethrio.get(i).getBytes());
}
}
}

Use the TreeSet instead of ArrayList, and you'll get automatically order and no repeatings.

In the first place, why are you using ArrayList to store your list of words.
ArrayList<DictPage> listOfWords = new ArrayList<DictPage>();
You should use Set (like HashSet, TreeSet or some implementation of Set) to store your words if you don't want duplicates.
Set<DictPage> listOfWords = new Hashset<DictPage>(); //no duplicates but not sorted
Or
Set<DictPage> listOfWords = new Treeset<DictPage>(); //no duplicates and sorted as well
This would make sure that your list of words does not contain any duplicates.
And if you want them sorted straight away, you can use TreeSet which will make it more easier for you.

use this.
public void stripDuplicatesFromFile(String filename) {
try {
BufferedReader reader = new BufferedReader(new FileReader(filename));
Set<String> lines = new HashSet<String>();
String line;
while ((line = reader.readLine()) != null) {
lines.add(line);
}
reader.close();
BufferedWriter writer = new BufferedWriter(new FileWriter(filename));
for (String unique : lines) {
writer.write(unique);
writer.newLine();
}
writer.close();
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
it takes filepath as an input, find duplicate lines and remove them. But if you have large file do not use this. I'm using this method on a very small size of a .txt file (kind of log file and order is not imported).

Related

Sorting .csv Id's in natural order Java

I'm trying to write some code that will take in a list of IDs (numbers and letters) from a .csv file and output them to a new file with the IDs in "natural order". My files are compiling, but I am getting the error:
java.lang.NumberFormatException: For input string: "Alpha"
I think the issue is I am not accounting for both number and letter values in the .csv file. What am I doing wrong?! Sorry if my variable Id's are confusing...
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
public class IdReader {
public static String CSV_FILE_PATH = "/Users/eringray/Desktop/idreader/idData.csv";
public static void main(String[] args){
try {
BufferedReader br = new BufferedReader(new FileReader(CSV_FILE_PATH));
BufferedWriter bw = new BufferedWriter(new FileWriter(CSV_FILE_PATH + ".tsv"));
ArrayList<String> textIds = new ArrayList<>();
ArrayList<Integer> numberIds = new ArrayList<>();
String line = "";
while((line = br.readLine()) != null) {
String[] values = line.split(" ");
if(values.length == 1) {
String idAsString = values[0];
try{
int id = Integer.parseInt(idAsString);
numberIds.add(id);
}
catch(NumberFormatException e){
textIds.add(idAsString);
}
}
}
Collections.sort(textIds);
Collections.sort(numberIds);
for(int i = 0; i < textIds.size(); i++){
String stu = textIds.get(i);
String lineText = stu.toString();
bw.write(lineText);
bw.newLine();
}
for(int i = 0; i < numberIds.size(); i++){
int numValues = numberIds.get(i);
bw.write(numValues);
bw.newLine();
}
br.close();
bw.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
The exception is coming at this line
int id = Integer.parseInt(idAsString);
Clearly alpha is not an integer, so it will throw NumberFormatException. In a case, where you encounter such Strings which cannot be converted into numbers, you can either skips them or throw an exception.
Update
//Use two seperate lists, one for maintaining numbers and other for text
ArrayList<String> textIds = new ArrayList<>();
ArrayList<Integer> numberIds = new ArrayList<>();
String line = "";
while((line = br.readLine()) != null) {
String[] values = line.split(" ");
if(values.length == 1) {
String idAsString = values[0];
try {
//Parse the value. If successful, it means it was a number. Add to integer array.
int id = Integer.parseInt(idAsString);
numberIds.add(id);
}catch (NumberFormatException e){
//If not successful, it means it was a string.
textIds.add(idAsString);
}
}
}
//In the end sort both the list
Collections.sort(textIds);
Collections.synchronizedList(numberIds);
for(int i = 0; i < textIds.size(); i++){
String stu = textIds.get(i);
bw.write(stu);
bw.newLine();
}
for(int i = 0; i < numberIds.size(); i++){
int numValues = numberIds.get(i);
bw.write(numValues+"");
bw.newLine();
}
br.close();
bw.close();
I am not putting code for writing this data to a new file. I hope you can do that.
Sample Input
4
6
33
2
5632
23454
Alpha
So after running my code
numberIds will have [ 2,4,6,33,5632,23454]
textIds will have ["Alpha"]
NumberFormatException occurs because of AlphaNumeric characters in the input.
Please use isNumeric(str) metod in https://commons.apache.org/proper/commons-lang/javadocs/api-2.6/org/apache/commons/lang/StringUtils.html api to verify whether the input is numeric or not and convert to int , only it is numeric

Wrong value while getting the triple duplicate

I have developed a java code that takes a text file as input and selects the duplicate words and gives output by creating a new text file containing the duplicate words, now I need it to select triple duplicated words, but i cannot get it correctly. below is my java code-
import java.util.*;
import java.io.*;
public class CheckDuplicate {
public static void main(String[] args) throws Exception{
// TODO Auto-generated method stub
FileReader file1=new FileReader("/home/goutam/workspace/DuplicateWord/clean_2014.txt");
BufferedReader reader1=new BufferedReader(file1);
File f=new File("Reduplication.txt");
FileWriter fw=new FileWriter(f);
String line=reader1.readLine();
while(line!=null){
String[] arr=line.split(" ");
if(arr.length>1){
for(int i=0;i<arr.length;i++){
if(i<arr.length-1){
int cmp=arr[i].compareTo(arr[i+1]);
if(cmp==0){
fw.write(arr[i].toString());
fw.write("\n");
}
}
}
}
line=reader1.readLine();
}
reader1.close();
file1.close();
}
}
Your code doesn't work because you're only considering adjacent elements.
Instead of having nested loops, you can achieve what you want easily using Map that String as a value and an integer that indicates the count as the value.
When you first encounter a string, you insert it with a value of 1
When you have a string that's already in the map, you simply increment its value
Then you can iterate on the values and pick the keys with value > what you want.
I highly recommend you using the debugger, it helps you better understanding the flow of your program.
This should do the job, note: I did not compile nor test it but at least it should provide you some directions.
public void findRepeatingWords( int atLeastNRepetitions ) {
try ( BufferedReader reader1 = new BufferedReader( new FileReader("/home/goutam/workspace/DuplicateWord/clean_2014.txt") ) ) {
// There are libraries that can do this, but yeah... doing it old style here
// Note that usage of AtomicInteger is just a convenience so that we can reduce some lines of codes, not used for atomic operations
Map<String, AtomicInteger> m = new LinkedHashMap<String, AtomicInteger>() {
#Override
public AtomicInteger get( Object key ) {
AtomicInteger cnt = super.get( key );
if ( cnt == null ) {
cnt = new AtomicInteger( 0 );
super.put( key, cnt );
}
return cnt;
}
};
String line = reader1.readLine();
while( line!=null ){
// Note we use \\W here that means non-word character (e.g. spaces, tabs, punctuation,...)
String[] arr = line.split( "\\W" );
for ( String word : arr ) {
m.get( word ).incrementAndGet();
}
line = reader1.readLine();
}
}
}
private void writeRepeatedWords( int atLeastNRepetitions, Map<String, AtomicInteger> m ) {
File f = new File( "Reduplication.txt" );
try ( PrintWriter pw = new PrintWriter( new FileWriter( f ) ) ) {
for ( Map.Entry<String, AtomicInteger> entry : m.entrySet() ) {
if ( entry.getValue().get() >= atLeastNRepetitions ) {
pw.println( entry.getKey() );
}
}
}
}
Here is the thing you are searching for, I have performed it using LinkedHashMap, It's a dynamic code, you select not only double, triple but also go for n number of time.
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintWriter;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Map.Entry;
public class A3 {
public static void main(String[] args) throws IOException {
BufferedReader reader1 = new BufferedReader(new java.io.FileReader(
"src/Source/A3_data"));
PrintWriter duplicatewriter = new PrintWriter(
"src/Source/A3_out_double", "UTF-8");
PrintWriter tripleduplicatewriter = new PrintWriter(
"src/Source/A3_out_tripple", "UTF-8");
LinkedHashMap<String, Integer> map = new LinkedHashMap<>();
String line = reader1.readLine();
while (line != null) {
String[] words = line.split(" ");
int count = 0;
while (count < words.length) {
String key = words[count];
Integer value = map.getOrDefault(key, 0) + 1;
map.put(key, value);
count++;
}
line = reader1.readLine();
}
for (Entry<String, Integer> entry : map.entrySet()) {
if (entry.getValue() == 2)
duplicatewriter.println(entry.getKey());
if (entry.getValue() == 3)
tripleduplicatewriter.println(entry.getKey());
}
duplicatewriter.close();
tripleduplicatewriter.close();
}
}
Since you want the items to appear 3 times in a row, I modified my code to achieve your goal:
public static void main(String[] args) throws Exception {
FileReader file1 = new FileReader("/home/goutam/workspace/DuplicateWord/clean_2014.txt");
BufferedReader reader1 = new BufferedReader(file1);
File f = new File("Reduplication.txt");
FileWriter fw = new FileWriter(f);
String line = reader1.readLine();
while (line != null) {
String[] arr = line.split(" ");
if (arr.length > 1) {
for (int i = 0; i < arr.length; i++) {
if (i < arr.length - 2) { // change from length-1 to length-2
int cmp = arr[i].compareTo(arr[i + 1]);
if (cmp == 0) {
if (arr[i + 1].equals(arr[i + 2])) { // keep comparing the next 2 items
System.out.println(arr[i].toString() + "\n");
fw.write(arr[i].toString());
fw.write("\n");
}
}
}
}
}
line = reader1.readLine();
}
reader1.close();
file1.close();
}
Try This this code print if count is greater than 3 you can use any number
public static void getStringTripple(String a){
String s[]=a.split(" ");
List<String> asList = Arrays.asList(s);
Set<String> mySet = new HashSet<String>(asList);
for(String ss: mySet){
if(Collections.frequency(asList,ss)>=3)
System.out.println(ss + " " +Collections.frequency(asList,ss));
}
}

Read a file into Map<Integer, ArrayList<Double>>

I saw some similar questions, but mine is a little different.
I define a
Map<Integer, ArrayList<Double>> fl;
My input .txt file:
1 0.56 0.57 0.73 ..
2 2.3 3.50 ...
9 4.98 0.99 ..
How to read the file into the map fl?
Thanks!
Use a Scanner and first call Scanner.readInt() that will give you the first integer.
Then call Scanner.readLine() that will give you all the remaining double in the line as a String. Split it and parse everything to double.
Repeat the same till end of file.
Here's a try.
I've compiled and run the code.
Make sure the input file is in the same directory as your project if you use an IDE.-- This only applies if you do not modify the path below.
package fileread;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
public class FileRead {
private static HashMap<Integer, ArrayList<Double>> map = new HashMap<>();
private static BufferedReader reader;
public static void main(String[] args) {
try
{
reader = new BufferedReader(new FileReader("input"));
//or reader = new BufferedReader(new FileReader("C:\\full-path-to-your-file));
String line;
while((line = reader.readLine()) != null)
{
String[] tokens = line.split(" ");
Integer i;
Double d;
ArrayList<Double> list = new ArrayList<>();
i = Integer.valueOf(tokens[0]);
for(int j = 1; j < tokens.length; j++)
list.add(Double.valueOf(tokens[j]));
map.put(i, list);
}
}catch(IOException ex)
{
//break execution
}finally
{
if(reader != null)
try
{
reader.close();
}catch (IOException ex) {
//don't break :)
}
}
for(Integer i : map.keySet())
{
ArrayList<Double> l = map.get(i);
System.out.print("Line " + i + ": ");
for(Double d: l)
System.out.print(d + " ");
System.out.println();
}
}
}
The code for parsing the file and populating the map should be like below
try {
BufferedReader bReader = new BufferedReader(new FileReader(new File("c:/input .txt")));
String line = "";
Map<Integer, ArrayList<Double>> fl = new HashMap<Integer, ArrayList<Double>>();
while ((line = bReader.readLine()) != null) {
String[] strArray = line.split(" ");
for (int i=0;i<strArray.length;i++) {
ArrayList<Double> value = new ArrayList<Double>();
int key=0;
if(i==0){
key =Integer.valueOf(strArray[0]);
}
else{
value.add(Double.valueOf(strArray[i]));
}
fl.put(key, value);
}
}
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}

JAVA : file I/O

I have got two text files with data in the following format
data.txt file as following format
A 10
B 20
C 15
data1.txt file is in format (start node,end node, distance):
A B 5
A C 10
B C 20
I am trying to implement a search strategy, for that I need to load the data from data.txt and ONLY the start node and end node from data1.txt (i.e. I dont need the distance). I need to store this information in a stack as I think it would be a best data structure for implementing greedy search.
Actually I am not sure how to get started with file I/O to read these files and store them in array to implement greedy search. So I would highly appreciate any starting idea on how to proceed.
I am new to this, so please bear with me. Any help is much appreciated. Thanks.
EDIT:
Here is what I have got till now
String heuristic_file = "data.txt";
try
{
FileReader inputHeuristic = new FileReader(heuristic_file);
BufferedReader bufferReader = new BufferedReader(inputHeuristic);
String line;
while ((line = bufferReader.readLine()) != null)
{
System.out.println(line);
}
bufferReader.close();
} catch(Exception e) {
System.out.println("Error reading file " + e.getMessage());
}
My approach, doesn't differ fundamentally from the others. Please regard the try/catch/finally blocks. Always put the closing statements into the finally block, so the opened file is guaranteed to be closed, even if an exception was thrown while reading the file.
The part between the two //[...] could surely be done more efficient. Maybe reading the whole file in one take and then parsing the text backwards and searching for a line-break? Maybe a Stream-API supports to set the reading position. I honestly don't know. I didn't need that, up to now.
I chose to use the verbose initialization of the BufferedReader, because then you can specify the expected encoding of the file. In your case it doesn't matter, since your files do not contain symbols out of the standard ASCII range, but I believe it's a semi-best-practice.
Before you ask: r.close() takes care of closing the underlying InputStreamReader and FileInputStream in the right order, till all readers and streams are closed.
public static void readDataFile(String dir, String file1, String file2)
throws IOException
{
File datafile1 = new File(dir, file1);
File datafile2 = new File(dir, file2);
if (datafile1.exists())
{
BufferedReader r = null;
try
{
r = new BufferedReader(
new InputStreamReader(
new FileInputStream(datafile1),
"UTF-8"
)
);
String row;
Stack<Object[]> s = new Stack<Object[]>();
String[] pair;
Integer datapoint;
while((row = r.readLine()) != null)
{
if (row != null && row.trim().length() > 0)
{
// You could use " " instead of "\\s"
// but the latter regular expression
// shorthand-character-class will
// split the row on tab-symbols, too
pair = row.split("\\s");
if (pair != null && pair.length == 2)
{
datapoint = null;
try
{
datapoint = Integer.parseInt(pair[1], 10);
}
catch(NumberFormatException f) { }
// Later you can validate datapairs
// by using
// if (s.pop()[1] != null)
s.add(new Object[] { pair[0], datapoint});
}
}
}
}
catch (UnsupportedEncodingException e1) { }
catch (FileNotFoundException e2) { }
catch (IOException e3) { }
finally
{
if (r != null) r.close();
}
}
// Do something similar with datafile2
if (datafile2.exists())
{
// [...do the same as in the first try/catch block...]
String firstrow = null, lastrow = null;
String row = null;
int i = 0;
do
{
lastrow = row;
row = r.readLine();
if (i == 0)
firstrow = row;
i++;
} while(row != null);
// [...parse firstrow and lastrow into a datastructure...]
}
}
use split
while ((line = bufferReader.readLine()) != null)
{
String[] tokens = line.split(" ");
System.out.println(line + " -> [" + tokens[0] + "]" + "[" + tokens[1] + "][" + tokens[2] + "]");
}
if you must have this in an array you can use the following:
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;
public class NodeTest {
public static void main(String[] args) throws ParseException {
try {
File first = new File("data.txt");
File second = new File("data1.txt");
Node[] nodes1 = getNodes(first);
Node[] nodes2 = getNodes(second);
print(nodes1);
print(nodes2);
}
catch(Exception e) {
System.out.println("Error reading file " + e.getMessage());
}
}
public static final void print(Node[] nodes) {
System.out.println("======================");
for(Node node : nodes) {
System.out.println(node);
}
System.out.println("======================");
}
public static final Node[] getNodes(File file) throws IOException {
FileReader inputHeuristic = new FileReader(file);
BufferedReader bufferReader = new BufferedReader(inputHeuristic);
String line;
List<Node> list = new ArrayList<Node>();
while ((line = bufferReader.readLine()) != null) {
String[] tokens = line.split(" ");
list.add(new Node(tokens[0], tokens[1]));
}
bufferReader.close();
return list.toArray(new Node[list.size()]);
}
}
class Node {
String start;
String end;
public Node(String start, String end){
this.start = start;
this.end = end;
}
public String toString() {
return "[" + start + "][" + end + "]";
}
}
Something like this?
HashSet<String> nodes = new HashSet<String>();
try(BufferedReader br = new BufferedReader(new FileReader("data.txt"))) {
String line = br.readLine();
while (line != null) {
String[] l = line.split(" ");
nodes.add(l[0]);
line = br.readLine();
}
}
try(BufferedReader br = new BufferedReader(new FileReader("data1.txt"))) {
String line = br.readLine();
while (line != null) {
String[] l = line.split(" ");
if (nodes.contains(l[0]) || nodes.contains(l[1]))
// Do whatever you want ...
line = br.readLine();
}
}

Checking the elements of a set for multiple integers

I want to change the form of a given graph. The graph is in the form userID, number of followers, follower1, follower2, ..followerN, delimiter '---', userID2, ...etc.
I have to replace the followers with place values from a second file of the form
ID1 place1
ID2 place2
....
by matching the IDs.
Thus, I want to check if the followers id each time exists in a set.
Both my graph and the set where I seek for the follower ids are huge.
Is there a more efficient way than the one I give you below?
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.FileInputStream;
import java.io.FileWriter;
import java.io.InputStreamReader;
import java.util.*;
public class Changer {
public static void main(String[] args) {
Set set = new HashSet();
int[][] users=new int[61578415][];
try{
FileInputStream fstream2 = new FileInputStream(args[0]);
DataInputStream in2 = new DataInputStream(fstream2);
BufferedReader br2 = new BufferedReader(new InputStreamReader(in2));
String strLine2;
while ((strLine2 = br2.readLine()) != null) {
set.add(strLine2);
}
in2.close();
fstream2.close();}
catch (Exception e){
System.err.println("Error: " + e.getMessage()+"!\n"+e.toString()+"!\n");
e.printStackTrace();
System.exit(-1);
}
try{
FileInputStream fstream = new FileInputStream("InputGraph.txt");
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
String strLine;
int flag=0;
int pos=0;
FileWriter fstream3 = new FileWriter("OutputGraph.txt");
BufferedWriter out = new BufferedWriter(fstream3);
int currentUser=0,counter=0;
int theNum=0;
while ((strLine = br.readLine()) != null) {
if(strLine.equals("---")){
if(counter!=pos){
System.out.println("Error reading graph");
System.out.println("For:"+currentUser);
System.exit(-1);
}
flag=0;
pos=0;
continue;
}
theNum=Integer.parseInt(strLine);
if (flag==0){
out.write("---"+"\n");
out.write(""+theNum);
out.write("\n");
currentUser=theNum;
flag+=1;
}
else if (flag==1){
counter=theNum;
users[currentUser]=new int [counter];
flag+=1;
out.write(""+theNum+"\n");
}
else{
users[currentUser][pos]=theNum;
++pos;
Iterator it = set.iterator();
while (it.hasNext()) {
Object element = it.next();
String[] arr = (String.valueOf(element)).split(" ");
if (Integer.parseInt(arr[0])==theNum)
{theNum=Integer.parseInt(arr[1]);break;}
}
out.write(""+theNum);
out.write("\n");
}
}
in.close();
out.close();
}catch (Exception e){
System.err.println("Error: " + e.getMessage());
}
System.out.println("Graph has been read");
System.gc();
System.gc();
System.out.println("Finished");
}
}
It would be more efficient to do the for loop over intersection inside, so that you don't split and parse so much:
Iterator it = set.iterator();
while (it.hasNext()) {
Object element = it.next();
String[] arr = (String.valueOf(element)).split(" ");
int arr0 = Integer.parseInt(arr[0]);
int arr1 = Integer.parseInt(arr[1]);
for (int integer : intersection) {
if (arr0 == integer) {
out.write(integer + " " + arr1 + "\n");
}
}
}
But this changes the order the write will be called in.
However I suspect you might benefit from loading it in to (or just replacing with) a HashMap or SparseArray. It's just hard to tell give then info you have given.
For Integer detection, you can use the comparation with instanceof , but you must use objects and not primitives, e.g:
Integer a=Integer.parseInt("12345");
if(a instanceof Integer){
System.out.println("We have an integer !");
}
Another way for Integer detection is to override the equals method.

Categories