Create Lucene Index in database using JdbcDirectory - java

I have a problem with MySQL and Postgresql9.2 this is the problem :
org.apache.lucene.store.jdbc.JdbcStoreException: Failed to execute sql [insert into LUCENE_INDEX_TABLE (name_, value_, size_, lf_, deleted_) values ( ?, ?, ?, current_timestamp, ? )]; nested exception is org.postgresql.util.PSQLException: Les Large Objects ne devraient pas être utilisés en mode auto-commit.
org.postgresql.util.PSQLException: Les Large Objects ne devraient pas être utilisés en mode auto-commit.
at org.postgresql.largeobject.LargeObjectManager.createLO(LargeObjectManager.java:239)
at org.postgresql.largeobject.LargeObjectManager.createLO(LargeObjectManager.java:226)
at org.postgresql.jdbc2.AbstractJdbc2Statement.setBlob(AbstractJdbc2Statement.java:3048)
at org.apache.lucene.store.jdbc.index.AbstractJdbcIndexOutput$1.fillPrepareStatement(AbstractJdbcIndexOutput.java:55)
at org.apache.lucene.store.jdbc.support.JdbcTemplate.executeUpdate(JdbcTemplate.java:174)
at org.apache.lucene.store.jdbc.index.AbstractJdbcIndexOutput.close(AbstractJdbcIndexOutput.java:47)
at org.apache.lucene.store.jdbc.index.RAMAndFileJdbcIndexOutput.close(RAMAndFileJdbcIndexOutput.java:81)
at org.apache.lucene.util.IOUtils.close(IOUtils.java:141)
at org.apache.lucene.index.FieldsWriter.close(FieldsWriter.java:139)
at org.apache.lucene.index.StoredFieldsWriter.flush(StoredFieldsWriter.java:55)
at org.apache.lucene.index.DocFieldProcessor.flush(DocFieldProcessor.java:59)
at org.apache.lucene.index.DocumentsWriter.flush(DocumentsWriter.java:581)
at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3587)
at org.apache.lucene.index.IndexWriter.prepareCommit(IndexWriter.java:3376)
at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3485)
at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3467)
at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3451)
at test.lucene.chaima.JDBCIndexer.addIndex(JDBCIndexer.java:137)
at test.lucene.chaima.JDBCIndexer.index(JDBCIndexer.java:92)
at test.lucene.chaima.JDBCIndexer.createAndBuildIndex(JDBCIndexer.java:78)
at test.lucene.chaima.JDBCIndexer.buildIndex(JDBCIndexer.java:69)
at test.lucene.chaima.JDBCIndexer.main(JDBCIndexer.java:172)
org.apache.lucene.store.jdbc.JdbcStoreException: Failed to execute sql [insert into LUCENE_INDEX_TABLE (name_, value_, size_, lf_, deleted_) values ( ?, ?, ?, current_timestamp, ? )]; nested exception is org.postgresql.util.PSQLException: Les Large Objects ne devraient pas être utilisés en mode auto-commit.
org.postgresql.util.PSQLException: Les Large Objects ne devraient pas être utilisés en mode auto-commit.
at org.postgresql.largeobject.LargeObjectManager.createLO(LargeObjectManager.java:239)
at org.postgresql.largeobject.LargeObjectManager.createLO(LargeObjectManager.java:226)
at org.postgresql.jdbc2.AbstractJdbc2Statement.setBlob(AbstractJdbc2Statement.java:3048)
at org.apache.lucene.store.jdbc.index.AbstractJdbcIndexOutput$1.fillPrepareStatement(AbstractJdbcIndexOutput.java:55)
at org.apache.lucene.store.jdbc.support.JdbcTemplate.executeUpdate(JdbcTemplate.java:174)
at org.apache.lucene.store.jdbc.index.AbstractJdbcIndexOutput.close(AbstractJdbcIndexOutput.java:47)
at org.apache.lucene.store.jdbc.index.RAMAndFileJdbcIndexOutput.close(RAMAndFileJdbcIndexOutput.java:81)
at org.apache.lucene.store.ChecksumIndexOutput.close(ChecksumIndexOutput.java:61)
at org.apache.lucene.index.SegmentInfos.finishCommit(SegmentInfos.java:863)
at org.apache.lucene.index.IndexWriter.finishCommit(IndexWriter.java:3501)
at org.apache.lucene.index.IndexWriter.commitInternal(IndexWriter.java:3490)
at org.apache.lucene.index.IndexWriter.closeInternal(IndexWriter.java:1873)
at org.apache.lucene.index.IndexWriter.close(IndexWriter.java:1812)
at org.apache.lucene.index.IndexWriter.close(IndexWriter.java:1776)
at test.lucene.chaima.JDBCIndexer.index(JDBCIndexer.java:102)
at test.lucene.chaima.JDBCIndexer.createAndBuildIndex(JDBCIndexer.java:78)
at test.lucene.chaima.JDBCIndexer.buildIndex(JDBCIndexer.java:69)
at test.lucene.chaima.JDBCIndexer.main(JDBCIndexer.java:172)
org.apache.lucene.index.IndexNotFoundException: no segments* file found in test.lucene.chaima.MyJDBCDirectory#9506dc4 lockFactory=null: files: [write.lock]
at org.apache.lucene.index.SegmentInfos$FindSegmentsFile.run(SegmentInfos.java:667)
at org.apache.lucene.index.DirectoryReader.open(DirectoryReader.java:72)
at org.apache.lucene.index.IndexReader.open(IndexReader.java:256)
at test.lucene.chaima.JDBCSearcher.search(JDBCSearcher.java:56)
at test.lucene.chaima.JDBCIndexer.buildIndex(JDBCIndexer.java:70)
at test.lucene.chaima.JDBCIndexer.main(JDBCIndexer.java:172)
i found many solution but no one solve my problem
please i need the solution if any one can help me
thanks.
I put here the source code of my application : i have 3 classes
MyJDBCDirectory.java
package test.lucene.chaima;
import java.io.IOException;
import javax.sql.DataSource;
import org.apache.lucene.store.jdbc.JdbcDirectory;
import org.apache.lucene.store.jdbc.JdbcDirectorySettings;
import org.apache.lucene.store.jdbc.JdbcStoreException;
import org.apache.lucene.store.jdbc.dialect.Dialect;
import org.apache.lucene.store.jdbc.support.JdbcTable;
/**
* The Class MyJDBCDirectory.
*
* #author prabhat.jha
*/
public class MyJDBCDirectory extends JdbcDirectory {
/**
* Instantiates a new my jdbc directory.
*
* #param dataSource
* the data source
* #param dialect
* the dialect
* #param settings
* the settings
* #param tableName
* the table name
*/
public MyJDBCDirectory(DataSource dataSource, Dialect dialect, JdbcDirectorySettings settings, String tableName) {
super(dataSource, dialect, settings, tableName);
}
/**
* Instantiates a new my jdbc directory.
*
* #param dataSource the data source
* #param dialect the dialect
* #param tableName the table name
*/
public MyJDBCDirectory(DataSource dataSource, Dialect dialect, String tableName) {
super(dataSource, dialect, tableName);
}
/**
* Instantiates a new my jdbc directory.
*
* #param dataSource the data source
* #param settings the settings
* #param tableName the table name
* #throws JdbcStoreException the jdbc store exception
*/
public MyJDBCDirectory(DataSource dataSource, JdbcDirectorySettings settings, String tableName) throws JdbcStoreException {
super(dataSource, settings, tableName);
}
/**
* Instantiates a new my jdbc directory.
*
* #param dataSource the data source
* #param table the table
*/
public MyJDBCDirectory(DataSource dataSource, JdbcTable table) {
super(dataSource, table);
}
/**
* Instantiates a new my jdbc directory.
*
* #param dataSource the data source
* #param tableName the table name
* #throws JdbcStoreException the jdbc store exception
*/
public MyJDBCDirectory(DataSource dataSource, String tableName) throws JdbcStoreException {
super(dataSource, tableName);
}
/**
* (non-Javadoc).
*
* #return the string[]
* #throws IOException Signals that an I/O exception has occurred.
* #see org.apache.lucene.store.Directory#listAll()
*/
#Override
public String[] listAll() throws IOException {
return super.list();
}
}
JDBCDatabaseUtil.java
package test.lucene.chaima;
import java.sql.Connection;
import java.sql.SQLException;
import javax.sql.DataSource;
import org.postgresql.ds.PGPoolingDataSource;
import org.postgresql.ds.PGSimpleDataSource;
//import com.mysql.jdbc.jdbc2.optional.MysqlDataSource;
/**
* The Class JDBCDatabaseUtil.
* #author prabhat.jha
*/
public class JDBCDatabaseUtil {
/**
* Gets the data source.
*
* #return the data source
*/
public static DataSource getDataSource() {
PGSimpleDataSource dataSource = new PGSimpleDataSource();
dataSource.setUser("postgres");
dataSource.setPassword("root");
dataSource.setDatabaseName("postgres");
/*MysqlDataSource dataSource = new MysqlDataSource();
dataSource.setUser("root");
dataSource.setPassword("root");
dataSource.setEmulateLocators(true);
dataSource.setUrl("jdbc:mysql://localhost:3306/lucene?emulateLocators=true&useUnicode=true&characterEncoding=UTF-8&useFastDateParsing=false");
*/
return dataSource;
}
/**
* Gets the connection.
*
* #return the connection
* #throws SQLException
* the sQL exception
*/
public static Connection getConnection() throws SQLException {
//getDataSource().getConnection().setAutoCommit(false);
return getDataSource().getConnection();
}
}
JDBCIndexer.java
package test.lucene.chaima;
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.jdbc.JdbcDirectory;
import org.apache.lucene.store.jdbc.dialect.PostgreSQLDialect;
import org.apache.lucene.util.Version;
/**
* The Class JDBCIndexer.
*
* #author chaima
*/
public class JDBCIndexer {
/** The jdbc directory. */
private Directory jdbcDirectory = null;
/**
* Instantiates a new jDBC indexer.
*
* #param jdbcDirectory
* the jdbc directory
*/
public JDBCIndexer(Directory jdbcDirectory) {
super();
this.jdbcDirectory = jdbcDirectory;
}
/**
* Gets the jdbc directory.
*
* #return the jdbc directory
*/
public Directory getJdbcDirectory() {
if (jdbcDirectory == null) {
throw new IllegalStateException("Index not yet build, rerun indexing");
}
return jdbcDirectory;
}
/**
* Sets the jdbc directory.
*
* #param jdbcDirectory
* the new jdbc directory
*/
public void setJdbcDirectory(Directory jdbcDirectory) {
this.jdbcDirectory = jdbcDirectory;
}
/**
* Builds the index.
*/
public void buildIndex() {
createAndBuildIndex();
}
/**
* Creates the and build index.
*/
private void createAndBuildIndex() {
createIndexTable();
index();
}
/**
* Index.
*/
private void index() {
Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_36);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_36, analyzer);
IndexWriter indexWriter = null;
try {
indexWriter = new IndexWriter(getJdbcDirectory(), analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED);
Boolean locked=indexWriter.isLocked(jdbcDirectory);
addIndex(indexWriter);
} catch (CorruptIndexException e) {
e.printStackTrace();
} catch (LockObtainFailedException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
if (indexWriter != null) {
try {
indexWriter.close();
} catch (CorruptIndexException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
indexWriter = null;
}
}
}
}
/**
*
*
* #param indexWriter
* the index writer
*/
private void addIndex(IndexWriter indexWriter) throws CorruptIndexException, IOException {
try {
Connection connection = JDBCDatabaseUtil.getConnection();
connection.setAutoCommit(false);
String query = "SELECT id, name, lastname FROM users";
PreparedStatement pstmt = connection.prepareStatement(query);
ResultSet resultSet = pstmt.executeQuery();
while (resultSet.next()) {
Document document = new Document();
document.add(new Field("id", String.valueOf(resultSet.getInt(1)), Field.Store.YES, Field.Index.ANALYZED));
document.add(new Field("name", String.valueOf(resultSet.getString(2)), Field.Store.YES, Field.Index.ANALYZED));
document.add(new Field("lastname", String.valueOf(resultSet.getString(3)), Field.Store.YES, Field.Index.ANALYZED));
indexWriter.addDocument(document);
indexWriter.commit();
}
indexWriter.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
/**
* Creates the index table.
*/
private void createIndexTable() {
if (this.jdbcDirectory == null) {
setJdbcDirectory(new MyJDBCDirectory(JDBCDatabaseUtil.getDataSource(), new PostgreSQLDialect(), "LUCENE_INDEX_TABLE"));
}
try {
/**
* No need to manually create index table, create method will
* automatically create it.
*/
boolean existe= ((JdbcDirectory) getJdbcDirectory()).tableExists();
if(existe)
System.out.println("table existe");
else{
System.out.println("table non existe");
((JdbcDirectory) getJdbcDirectory()).create();
}
} catch (IOException e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
new JDBCIndexer(null).buildIndex();
}
}
and the table users :
CREATE TABLE users
(
id integer NOT NULL,
name character(20),
lastname character(20),
CONSTRAINT pk_id PRIMARY KEY (id)
)
WITH (
OIDS=FALSE
);
ALTER TABLE users
OWNER TO postgres;
and the jars :
commons-logging-1.0.4.jar
compass-2.2.0.jar
lucene-core-3.6.1.jar
postgresql-9.2-1002.jdbc4.jar

I have created my own JdbcDirectory implementation, though it would rely on JEE6 to take advantage of the #Singleton annotation. The code itself isn't too trivial to paste into a StackOverflow post and it still has a few limitations. Key part being you cannot do multiple operations on a single transaction using multiple threads because of the database locking semantics.
https://github.com/trajano/doxdb/tree/jdbc-directory-example/doxdb-ejb/src/main/java/net/trajano/doxdb/search/lucene
Looking at your implementation, it seems like you're also keeping the deleted "files" probably because it would have less fragmentation on the database store, whereas mine I had removed the record itself.
I have tagged a version that I am working with which seems stable enough for my test loads. Feel free to make comments or suggestions on it.

Compass is defunct, and I believe the last version of Lucene supported by it, explicitly, is 2.4.1.
I'd recommend you either upgrade to ElasticSearch, or downgrade Lucene.
You could also take a look at this blog post: "Create Lucene Index in database using JdbcDirectory", which attempts to create a version of JdbcDirectory compatible with Lucene 3.6. It would likely be much better to avoid JdbcDirectory all together.

Related

How can I execute a native query to the database from any class in a Java SpringBoot project?

I'm trying to drop a table when deleting a record in the database, but it is giving me the following error:
Error logging in: Request processing failed; nested exception is javax.persistence.TransactionRequiredException: Executing an update/delete query
I have read couple of articles and even some questions in Stack overflow, this one Question about the errorbut none of the answers are working, the one I see that might help the most is adding the note #Transactional which I put over the method executeDropTable() but it is giving me the same error., this is my code:
package com.ssc.test.cb3.service;
import com.ssc.test.cb3.dto.ReportRequestDTO;
import com.ssc.test.cb3.dto.mapper.ReportRequestMapper;
import com.ssc.test.cb3.repository.ReportRequestRepository;
import java.util.List;
import org.springframework.stereotype.Service;
import com.ssc.test.cb3.model.ReportRequest;
import com.ssc.test.cb3.repository.ReportTableRepository;
import java.util.Map;
import javax.persistence.EntityManager;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.transaction.annotation.Transactional;
/**
* Class to prepare the services to be dispatched to the database upon request.
*
* #author ssc
*/
#Service
#RequiredArgsConstructor
#Slf4j
public class ReportRequestService {
private final ReportRequestRepository reportRequestRepository;
private final EntityManager entityManager;
private final ReportTableRepository reportTableRepository;
private static String SERVER_LOCATION = "D:\\JavaProjectsNetBeans\\sscb3Test\\src\\main\\resources\\";
/**
* Function to delete a report from the database
*
* #param id from the report request objet to identify what is the specific
* report
*/
public void delete(int id) {
ReportRequest reportRequest = reportRequestRepository.findById(id).orElse(null);
ReportTable reportTable =
String fileName = reportRequest.getFileName();
if (reportRequest == null || reportRequest.getStatus() == 1) {
log.error("It was not possible to delete the selected report as it hasn't been processed yet or it was not found");
} else {
reportRequestRepository.deleteById(id);
log.info("The report request {} was successfully deleted", id);
new File(SERVER_LOCATION + reportRequest.getFileName()).delete(); // Delete file
log.info("The file {} was successfully deleted from the server", fileName);
// DROP created tables with file name without extention
executeDropTable(fileName);
log.info("The table {} was successfully deleted from the data base", fileName);
}
}
/**
* Service to Drop report request tables created on the database when a
* report request is generated and serviced to be downloaded This method
* will be called when a user deletes in the fron-end a report request in
* finished status.
*
* #param tableName will be the name of the table that was created on the
* database
*/
#Transactional
public void executeDropTable(String tableName) {
int substract = 4;
tableName = tableName.substring(0, tableName.length() - substract);
System.out.println("Table name: " + tableName);
String query = "DROP TABLE :tableName"; // IF EXISTS
entityManager.createNativeQuery(query)
.setParameter("tableName", tableName)
.executeUpdate();
}
}
Can anyone please help me to sort this out?
A native query literally means "execute this SQL statement on the database", but you are trying to use JPL or something else with variable expansion.
your SQL string is invalid, try:
String query = "DROP TABLE " + tablename;
entityManager.executeNativeQuery(query);

My code wont print out the data within my database

/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package ai_assignment;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.text.ParseException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
*
* #author ADAM KENYON
*/
public class AI_Assignment {
String connectString = "D:\\Users\\ADAM KENYON\\Documents\\NetBeansProjects\\AI_Assignment\\Database\\AI_assignment";
private static Object ex;
/**
* #param args the command line arguments
* #throws java.text.ParseException
*/
public static void main(String[] args) throws ParseException {
// TODO code application logic here
AI_Assignment assignment = new AI_Assignment(); assignment.database();
}
public void database() throws ParseException {
try {
Connection con;
Class.forName("org.hsqldb.jdbcDriver");
con = DriverManager.getConnection("jdbc:hsqldb:file:" + connectString, // filenames
"", // username
""); // password
try (Statement statement = con.createStatement()) {
ResultSet rs = statement.executeQuery("SELECT * FROM TEST");
while (rs.next()) {
int ID = rs.getInt("ID");
System.out.print(ID);
}
statement.close();
con.close();
}
} catch (SQLException | ClassNotFoundException SQLException) {
Logger.getLogger(AI_Assignment.class.getName()).log(Level.SEVERE, null, SQLException);
}
}
}
So far this is what I have and it all compiles but none of the data gets printed out into the output. Any help?
I was following an online tutorial and I guess I did something wrong but I dont know. My guess is that the connectString variable is wrong gonna mess with that while I wait for responses.
EDIT: THe code is now up to date and this is the error the gets thrown
"Nov 25, 2015 5:35:13 AM ai_assignment.AI_Assignment database
SEVERE: null
java.lang.ClassNotFoundException: jdbcDriver"
Based on the most recent exception listed in the question you are having problems loading the JDBC driver. Checking hsqldb's documentation you should replace Class.forName("org.hsqldb.jdbcDriver"); with Class.forName("org.hsqldb.jdbc.JDBCDriver");

JavaEE + socialauth library, what to store after login

I am using the socialauth library following this tutorial:
https://github.com/3pillarlabs/socialauth/wiki/Getting-Started-with-implementing-SocialAuth
Everything works find, I just do not understand where/what to store after the end of step 3. I mean I do not want to force the user to login every click. I tried to figure this out from the examples but I could not ....
Here is what I have:
#WebServlet("/success")
public class AfterOAuth extends HttpServlet {
#Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
try {
// get the auth provider manager from session
SocialAuthManager manager = (SocialAuthManager) req.getSession().getAttribute("authManager");
// call connect method of manager which returns the provider object.
// Pass request parameter map while calling connect method.
Map<String, String> paramsMap = SocialAuthUtil.getRequestParametersMap(req);
AuthProvider provider = manager.connect(paramsMap);
// get profile
Profile p = provider.getUserProfile();
// you can obtain profile information
resp.getOutputStream().print(p.getFirstName());
// OK, everything is fine by now what should I store in my Session?
} catch (Exception e) {
throw new ServletException(e);
}
}
}
Ok, I found a solution by using the provided CDI Class and overwrote simply the init() and servlet sections to so:
package com.test.oauth;
import java.io.Serializable;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;
import java.util.Properties;
import javax.enterprise.context.SessionScoped;
import javax.faces.context.ExternalContext;
import javax.faces.context.FacesContext;
import javax.inject.Named;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import config.KicEngineRootRessourceLoader;
import org.apache.log4j.Logger;
import org.brickred.socialauth.AuthProvider;
import org.brickred.socialauth.AuthProviderFactory;
import org.brickred.socialauth.Contact;
import org.brickred.socialauth.Profile;
import org.brickred.socialauth.SocialAuthConfig;
import org.brickred.socialauth.SocialAuthManager;
import org.brickred.socialauth.util.SocialAuthUtil;
/**
* Created by kic on 19.02.15.
*/
#Named("socialauth")
#SessionScoped
public class SocialAuth implements Serializable {
/**
* Serial version UID generated by Eclipse
*/
private static final long serialVersionUID = 1789108831048043099L;
private static final Logger log = Logger.getLogger( SocialAuth.class);
private String id;
private Profile profile;
private AuthProvider provider;
private String status;
private String viewUrl;
private SocialAuthManager manager;
private SocialAuthConfig config;
public void init() {
id = null;
provider = null;
config = new SocialAuthConfig().getDefault();
try {
Properties oauth = new Properties();
KicEngineRootRessourceLoader.loadProperties(oauth, "oauth_consumer");
config.load(oauth);
manager = new SocialAuthManager();
manager.setSocialAuthConfig(config);
} catch (Exception e) {
e.printStackTrace();
}
}
public SocialAuth() {
init();
}
public String getId() {
return id;
}
/**
* Sets the authentication provider. It is mandatory to do this before
* calling login
*
* #param id
* Can either have values facebook, foursquare, google, hotmail,
* linkedin, myspace, twitter, yahoo OR an OpenID URL
*/
public void setId(final String id) {
this.id = id;
}
/**
* Sets the view URL to which the user will be redirected after
* authentication
*
* #param viewUrl
* Relative URL of the view, for example "/openid.xhtml"
*/
public void setViewUrl(final String viewUrl) {
this.viewUrl = viewUrl;
}
/**
* Gets the relative URL of the view to which user will be redirected after
* authentication
*
* #return relative URL of the view
*/
public String getViewUrl() {
return viewUrl;
}
/**
* This is the most important action. It redirects the browser to an
* appropriate URL which will be used for authentication with the provider
* you set using setId()
*
* #throws Exception
*/
public void login(HttpServletRequest req, HttpServletResponse resp) throws Exception {
//String url = manager.getAuthenticationUrl(req.getParameter("provider"), successUrl);
String returnToUrl = viewUrl;
String url = manager.getAuthenticationUrl(id, returnToUrl);
// Store in session
req.getSession().setAttribute("authManager", manager);
// redirect
log.info("Redirecting to:" + url);
resp.sendRedirect(url);
}
/**
* Verifies the user when the external provider redirects back to our
* application
*
* #throws Exception
*/
public void connect(HttpServletRequest request) throws Exception {
provider = manager.connect(SocialAuthUtil.getRequestParametersMap(request));
profile= provider.getUserProfile();
}
/**
* Reinitializes the bean
*/
public void logout() {
init();
}
/**
* Returns the Profile information for the user. Should be called only after
* loginImmediately()
*
* #return Profile of the user
*/
public Profile getProfile() {
return profile;
}
/**
* Status of the user to be updated on a provider like Facebook or Twitter.
* Remember this will not give us the current status of the user
*
* #return status message to be updated
*/
public String getStatus() {
return status;
}
/**
* Status of the user to be updated on a provider like Facebook or Twitter.
* To actually update the status, call updateStatus action.
*
* #param status
*/
public void setStatus(final String status) {
this.status = status;
}
/**
* Updates the status on the given provider. Exception will be thrown if the
* provider does not provide this facility
*/
public void updateStatus() throws Exception {
provider.updateStatus(status);
}
/**
* Gets the list of contacts available from the provider. This may be used
* to import contacts of any user in your web application from your chosen
* provider like Gmail, Yahoo or Hotmail
*
* #return list of contacts
*/
public List<Contact> getContactList() throws Exception {
return provider.getContactList();
}
/**
* Retrieves the user profile from the provider.
*
* #return Profile object containing the profile information.
* #throws Exception
*/
public Profile getUserProfile() throws Exception {
return provider.getUserProfile();
}
}
Now I simply can use #Inject SocialAuth wherever needed.

Integrating Pentaho Reporting with Java web application

I am trying to integrate my Pentaho generated reports with a Java application. My reports are based on OLAP data and written using MDX queries. I found an example on one of the blogs and used it as foundation. My code is:
package org.pentaho.reporting.engine.classic.samples;
import in.nic.spaconsole.ServletContextProvider;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.util.Map;
import java.util.HashMap;
import org.pentaho.reporting.engine.classic.core.DataFactory;
import org.pentaho.reporting.engine.classic.core.MasterReport;
import org.pentaho.reporting.engine.classic.core.ReportProcessingException;
import org.pentaho.reporting.libraries.resourceloader.Resource;
import org.pentaho.reporting.libraries.resourceloader.ResourceException;
import org.pentaho.reporting.libraries.resourceloader.ResourceManager;
/**
* Generates a report in the following scenario:
* <ol>
* <li>The report definition file is a .prpt file which will be loaded and parsed
* <li>The data factory is a simple JDBC data factory using HSQLDB
* <li>There are no runtime report parameters used
* </ol>
*/
public class Sample1 extends AbstractReportGenerator
{
/**
* Default constructor for this sample report generator
*/
public Sample1()
{
}
/**
* Returns the report definition which will be used to generate the report. In this case, the report will be
* loaded and parsed from a file contained in this package.
*
* #return the loaded and parsed report definition to be used in report generation.
*/
public MasterReport getReportDefinition(String reportPath)
{
ResourceManager manager = new ResourceManager();
manager.registerDefaults();
try {
Resource res = manager.createDirectly(new URL(reportPath),
MasterReport.class);
MasterReport report = (MasterReport) res.getResource();
return report;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
/**
* Returns the data factory which will be used to generate the data used during report generation. In this example,
* we will return null since the data factory has been defined in the report definition.
*
* #return the data factory used with the report generator
*/
public DataFactory getDataFactory()
{
return null;
}
/**
* Returns the set of runtime report parameters. This sample report uses the following three parameters:
* <ul>
* <li><b>Report Title</b> - The title text on the top of the report</li>
* <li><b>Customer Names</b> - an array of customer names to show in the report</li>
* <li><b>Col Headers BG Color</b> - the background color for the column headers</li>
* </ul>
*
* #return <code>null</code> indicating the report generator does not use any report parameters
*/
public Map<String, Object> getReportParameters()
{
final Map parameters = new HashMap<String, Object>();
parameters.put("stday", 28);
parameters.put("styear", 2012);
parameters.put("stmonth", 10);
parameters.put("eday", 28);
parameters.put("eyear", 2012);
parameters.put("emonth", 10);
parameters.put("Sitesearch","india.gov.in");
parameters.put("firstResult",1);
parameters.put("lastResult", 100);
parameters.put("Pagenumber",1);
return parameters;
}
/**
* Simple command line application that will generate a PDF version of the report. In this report,
* the report definition has already been created with the Pentaho Report Designer application and
* it located in the same package as this class. The data query is located in that report definition
* as well, and there are a few report-modifying parameters that will be passed to the engine at runtime.
* <p/>
* The output of this report will be a PDF file located in the current directory and will be named
* <code>SimpleReportGeneratorExample.pdf</code>.
*
* #param args none
* #throws IOException indicates an error writing to the filesystem
* #throws ReportProcessingException indicates an error generating the report
*/
public static void main(String[] args) throws IOException, ReportProcessingException
{
final File outputFilenamehtml = new File(Sample1.class.getSimpleName() + ".html");
final File outputFilenamepdf = new File(Sample1.class.getSimpleName() + ".pdf");
// Generate the report
// new Sample1().generateReport(AbstractReportGenerator.OutputType.PDF, outputFilenamepdf);
// System.err.println("Generated the report [" + outputFilenamepdf.getAbsolutePath() + "]");
// new Sample1().generateReport(AbstractReportGenerator.OutputType.HTML, outputFilenamehtml);
// Output the location of the file
//System.err.println("Generated the report111 [" + outputFilenamehtml.getAbsolutePath() + "]");
}
public void report(String Path) throws IllegalArgumentException, ReportProcessingException, IOException {
// TODO Auto-generated method stub
// final File outputFilenamehtml = new File(Sample1.class.getSimpleName() + ".html");
final File outputFilenamepdf = new File(Sample1.class.getSimpleName() + ".pdf");
// Generate the report
new Sample1().generateReport(AbstractReportGenerator.OutputType.PDF, outputFilenamepdf,Path);
System.err.println("Generated the report [" + outputFilenamepdf.getAbsolutePath() + "]");
// new Sample1().generateReport(AbstractReportGenerator.OutputType.HTML, outputFilenamehtml,Path);
// Output the location of the file
// System.err.println("Generated the report111 [" + outputFilenamehtml.getAbsolutePath() + "]");
}
}
Abstractreportgenerator.java
/*
* This program is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License, version 2.1 as published by the Free Software
* Foundation.
*
* You should have received a copy of the GNU Lesser General Public License along with this
* program; if not, you can obtain a copy at http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
* or from the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* Copyright (c) 2009 Pentaho Corporation.. All rights reserved.
*/
package org.pentaho.reporting.engine.classic.samples;
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Map;
import org.pentaho.reporting.engine.classic.core.ClassicEngineBoot;
import org.pentaho.reporting.engine.classic.core.DataFactory;
import org.pentaho.reporting.engine.classic.core.MasterReport;
import org.pentaho.reporting.engine.classic.core.ReportProcessingException;
import org.pentaho.reporting.engine.classic.core.layout.output.AbstractReportProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.pageable.base.PageableReportProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.pageable.pdf.PdfOutputProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.table.base.FlowReportProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.table.base.StreamReportProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.table.html.AllItemsHtmlPrinter;
import org.pentaho.reporting.engine.classic.core.modules.output.table.html.FileSystemURLRewriter;
import org.pentaho.reporting.engine.classic.core.modules.output.table.html.HtmlOutputProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.table.html.HtmlPrinter;
import org.pentaho.reporting.engine.classic.core.modules.output.table.html.StreamHtmlOutputProcessor;
import org.pentaho.reporting.engine.classic.core.modules.output.table.xls.FlowExcelOutputProcessor;
import org.pentaho.reporting.libraries.repository.ContentLocation;
import org.pentaho.reporting.libraries.repository.DefaultNameGenerator;
import org.pentaho.reporting.libraries.repository.stream.StreamRepository;
/**
* This is the base class used with the report generation examples. It contains the actual <code>embedding</code>
* of the reporting engine and report generation. All example embedded implementations will need to extend this class
* and perform the following:
* <ol>
* <li>Implement the <code>getReportDefinition()</code> method and return the report definition (how the report
* definition is generated is up to the implementing class).
* <li>Implement the <code>getTableDataFactory()</code> method and return the data factory to be used (how
* this is created is up to the implementing class).
* <li>Implement the <code>getReportParameters()</code> method and return the set of report parameters to be used.
* If no report parameters are required, then this method can simply return <code>null</code>
* </ol>
*/
public abstract class AbstractReportGenerator
{
/**
* The supported output types for this sample
*/
public static enum OutputType
{
PDF, EXCEL, HTML
}
/**
* Performs the basic initialization required to generate a report
*/
public AbstractReportGenerator()
{
// Initialize the reporting engine
ClassicEngineBoot.getInstance().start();
}
/**
* Returns the report definition used by this report generator. If this method returns <code>null</code>,
* the report generation process will throw a <code>NullPointerException</code>.
*
* #return the report definition used by thus report generator
*/
public abstract MasterReport getReportDefinition(String Path);
/**
* Returns the data factory used by this report generator. If this method returns <code>null</code>,
* the report generation process will use the data factory used in the report definition.
*
* #return the data factory used by this report generator
*/
public abstract DataFactory getDataFactory();
/**
* Returns the set of parameters that will be passed to the report generation process. If there are no parameters
* required for report generation, this method may return either an empty or a <code>null</code> <code>Map</code>
*
* #return the set of report parameters to be used by the report generation process, or <code>null</code> if no
* parameters are required.
*/
public abstract Map<String, Object> getReportParameters();
/**
* Generates the report in the specified <code>outputType</code> and writes it into the specified
* <code>outputFile</code>.
*
* #param outputType the output type of the report (HTML, PDF, HTML)
* #param outputStream2 the file into which the report will be written
* #throws IllegalArgumentException indicates the required parameters were not provided
* #throws IOException indicates an error opening the file for writing
* #throws ReportProcessingException indicates an error generating the report
*/
public void generateReport(final OutputType outputType,File outputFile,String Path)
throws IllegalArgumentException, IOException, ReportProcessingException
{
if (outputFile == null)
{
throw new IllegalArgumentException("The output file was not specified");
}
OutputStream outputStream = null;
try
{
// Open the output stream
outputStream = new BufferedOutputStream(new FileOutputStream(outputFile));
// Generate the report to this output stream
generateReport(outputType, outputStream,Path);
}
finally
{
if (outputStream != null)
{
outputStream.close();
}
}
}
/**
* Generates the report in the specified <code>outputType</code> and writes it into the specified
* <code>outputStream</code>.
* <p/>
* It is the responsibility of the caller to close the <code>outputStream</code>
* after this method is executed.
*
* #param outputType the output type of the report (HTML, PDF, HTML)
* #param outputStream the stream into which the report will be written
* #throws IllegalArgumentException indicates the required parameters were not provided
* #throws ReportProcessingException indicates an error generating the report
*/
public void generateReport(final OutputType outputType, OutputStream outputStream,String Path)
throws IllegalArgumentException, ReportProcessingException
{
if (outputStream == null)
{
throw new IllegalArgumentException("The output stream was not specified");
}
// Get the report and data factory
final MasterReport report = getReportDefinition(Path);
final DataFactory dataFactory = getDataFactory();
// Set the data factory for the report
if (dataFactory != null)
{
report.setDataFactory(dataFactory);
}
// Add any parameters to the report
final Map<String, Object> reportParameters = getReportParameters();
if (null != reportParameters)
{
for (String key : reportParameters.keySet())
{
report.getParameterValues().put(key, reportParameters.get(key));
}
}
// Prepare to generate the report
AbstractReportProcessor reportProcessor = null;
try
{
// Greate the report processor for the specified output type
switch (outputType)
{
case PDF:
{
final PdfOutputProcessor outputProcessor =
new PdfOutputProcessor(report.getConfiguration(), outputStream, report.getResourceManager());
reportProcessor = new PageableReportProcessor(report, outputProcessor);
break;
}
case EXCEL:
{
final FlowExcelOutputProcessor target =
new FlowExcelOutputProcessor(report.getConfiguration(), outputStream, report.getResourceManager());
reportProcessor = new FlowReportProcessor(report, target);
break;
}
case HTML:
{
final StreamRepository targetRepository = new StreamRepository(outputStream);
final ContentLocation targetRoot = targetRepository.getRoot();
final HtmlOutputProcessor outputProcessor = new StreamHtmlOutputProcessor(report.getConfiguration());
final HtmlPrinter printer = new AllItemsHtmlPrinter(report.getResourceManager());
printer.setContentWriter(targetRoot, new DefaultNameGenerator(targetRoot, "index", "html"));
printer.setDataWriter(null, null);
printer.setUrlRewriter(new FileSystemURLRewriter());
outputProcessor.setPrinter(printer);
reportProcessor = new StreamReportProcessor(report, outputProcessor);
break;
}
}
// Generate the report
reportProcessor.processReport();
}
finally
{
if (reportProcessor != null)
{
reportProcessor.close();
}
}
}
}
and I have a controller that ivokes this sample1.java
#POST
#Path("/get/reportDisplay")
#Consumes(MediaType.APPLICATION_JSON)
#Produces(MediaType.TEXT_HTML)
public Response exportReport(final ReportBean reportBean,
#Context final HttpServletRequest request,
#Context final HttpServletResponse response,
#Context final ServletContext context) throws IOException,
ParseException, InstantiationException, IllegalAccessException,
ClassNotFoundException, SQLException, JRException, IllegalArgumentException,ReportProcessingException {
String reportPath ="file://" +context.getRealPath("anor_admin.prpt");
Sample1 sample=new Sample1();
sample.report(reportPath);
return Response.status(200).build();
}
//End of Methods
} //End of class
But I am getting errors and not able to preview my reports. Please help me with this.
I've successfully embedded Report Engine in a web application but I had to fix some classpath errors in order to get it work.
Be sure your war includes report-engine-classic.core.jar and lib*.jar from pentaho-library.
You'll need extra dependencies if you use charts.
What kind of errors are you getting?

How to write from one database to another using SPRING-Batch w/out translating the resultset into a list of temporary objects or a map?

Steps:
Read "Select x, y, z from TABLE_1" from Database1 into a ResultSet.
pass ResultSet to a Writer
Write all records returned by the ResultSet to TABLE_2 in Database2.
Requirement:
Do not create any unused Objects to hold the data after reading from the ResultSet. (i.e. no Table1.class)
Use as much pre-built functionality as possible from the SPRING-Batch framework.
No DB Link.
NOTE: Class names for me to reference are enough to get me on the right path.
assuming you use JdbcPagingItemReader and JdbcBatchItemWriter you can use:
the ColumnRowMapper from spring-jdbc
an self implemented ItemSqlParameterSourceProvider
Your wish to save on memory allocations are clear but think twice if your desire for maximum optimization is worse side effects and problems.
First of all, if you just want to read rows from table A and write them to table B without any transformation of the data, then Spring Batch is not the best choice. You wish to use Spring Batch in this scenario perhaps if you want to retry (using RetryTemplate) in case some exception occurred during writing, or you want to skip certain exceptions (e.g. DataIntegrityViolationException = ignore duplicate entries).
So what you can do (but that is not very good approach) is to use Flyweight objects, e.g. the object that you return to framework is always the same, however it is each time filled with new contents (the code is not tested, AS IS):
package org.epo.lifesciences.chepo.service;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import javax.sql.DataSource;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.batch.item.ItemStreamException;
import org.springframework.batch.item.support.AbstractItemStreamItemReader;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.datasource.DataSourceUtils;
import org.springframework.jdbc.support.JdbcUtils;
public class FlyweightItemReader extends AbstractItemStreamItemReader<Object[]> {
#Autowired
private DataSource dataSource;
/*
* State objects
*/
private Connection con;
private Statement stmt;
private ResultSet rs;
private Object[] row;
/**
* #see org.springframework.batch.item.ItemStreamSupport#open(org.springframework.batch.item.ExecutionContext)
*/
#Override
public void open(ExecutionContext executionContext) throws ItemStreamException {
row = null;
con = DataSourceUtils.getConnection(dataSource);
try {
stmt = con.createStatement();
rs = stmt.executeQuery("some sql");
}
catch (SQLException e) {
DataSourceUtils.releaseConnection(con, dataSource);
throw new ItemStreamException(e);
}
}
/**
* #see org.springframework.batch.item.ItemStreamSupport#close()
*/
#Override
public void close() {
DataSourceUtils.releaseConnection(con, dataSource);
JdbcUtils.closeResultSet(rs);
JdbcUtils.closeStatement(stmt);
JdbcUtils.closeConnection(con);
}
/**
* #see org.springframework.batch.item.ItemReader#read()
*/
public Object[] read() throws SQLException {
if (!rs.next()) {
// End of result set is reached:
return null;
}
ResultSetMetaData rsmd = rs.getMetaData();
int columnCount = rsmd.getColumnCount();
if (row == null && columnCount > 0) {
// Create the flyweight:
row = new Object[columnCount];
}
// Copy all column values to flyweight:
for (int i = 1; i <= columnCount; i++) {
row[i - 1] = JdbcUtils.getResultSetValue(rs, i);
}
return row;
}
}
Be aware about this approach: it works only if your batch size is 1 (otherwise you end up with N equal objects in a batch) and only if your reader has prototype scope (because it is stateful).

Categories