myBATIS foreach hitting limit of 1000 - java

Here's what myBATIS has on their own documentation for foreach.
<select id="selectPostIn" resultType="domain.blog.Post">
SELECT *
FROM POST P
WHERE ID in
<foreach item="item" index="index" collection="list"
open="(" separator="," close=")">
#{item}
</foreach>
</select>
However, if list contains over 1000 items and you're using Oracle DB, you get this exception:
java.sql.SQLSyntaxErrorException: ORA-01795: maximum number of expressions in a list is 1000
What can I do to fix this so it works with more than 1000 elements?

I'm not sure if this is the most elegant solution or not, but here's what I did:
<select id="selectPostIn" resultType="domain.blog.Post">
SELECT *
FROM POST P
WHERE ID in
<trim suffixOverrides=" OR ID IN ()">
<foreach item="item" index="index" collection="list"
open="(" close=")">
<if test="index != 0">
<choose>
<when test="index % 1000 == 999">) OR ID IN (</when>
<otherwise>,</otherwise>
</choose>
</if>
#{item}
</foreach>
</trim>
</select>
Explanation
Lets start with the foreach. We want to surround it in ( and ). Most elements we want commas between, except every thousand elements we want to stop the list and OR with another one. That's what the choose, when, otherwise construct handles. Except we don't want either of those before the first element, thus the if that the choose is inside of. Finally, the foreach ends with actually having the #{item} inserted.
The outer trim is just so that if we have exactly 1000 elements, for example, we don't end with OR ID IN () which would be invalid ((), specifically, is the invalid part. That's a syntax error in SQL, not an empty list like I hoped it would be.)

We have tried delete query in clause more then 1000 records with above reference:
<delete id="delete" parameterType="Map">
The following query working:
DELETE FROM Employee
where
emp_id = #{empId}
<foreach item="deptId" index= "index" collection="ids" open="AND DEPT_ID NOT IN (" close=")" >
<if test="index != 0">
<choose>
<when test="index % 1000 == 999">) AND DEPT_ID NOT IN (</when>
<otherwise>,</otherwise>
</choose>
</if>
#{deptId}
</foreach>
</delete>

Mybatis plugin query and then combine partitioned params :
#Intercepts({
#Signature(type = Executor.class, method = "query", args = {MappedStatement.class, Object.class, RowBounds.class, ResultHandler.class}),
#Signature(type = Executor.class, method = "query", args = {MappedStatement.class, Object.class, RowBounds.class, ResultHandler.class, CacheKey.class, BoundSql.class})}
)
public class BigSizeParamQueryPlugin implements Interceptor {
private final int singleBatchSize;
private static final HeavyParamContext NO_BIG_PARAM = new HeavyParamContext();
public BigSizeParamQueryPlugin() {
this.singleBatchSize = 1000;
}
public BigSizeParamQueryPlugin(Integer singleBatchSize) {
if (singleBatchSize < 500) {
throw new IllegalArgumentException("batch size less than 500 is not recommended");
}
this.singleBatchSize = singleBatchSize;
}
#Override
public Object intercept(Invocation invocation) throws Throwable {
Object[] args = invocation.getArgs();
Object parameter = args[1];
if (parameter instanceof MapperMethod.ParamMap && RowBounds.DEFAULT == args[2]) {
MapperMethod.ParamMap paramMap = (MapperMethod.ParamMap) parameter;
if (MapUtils.isNotEmpty(paramMap)) {
try {
HeavyParamContext context = findHeavyParam(paramMap);
if (context.hasHeavyParam()) {
QueryExecutor queryExecutor = new QueryExecutor(invocation, context);
return queryExecutor.query();
}
} catch (Throwable e) {
log.warn("BigSizeParamQueryPlugin process error", e);
return invocation.proceed();
}
}
}
return invocation.proceed();
}
private class QueryExecutor {
private final MappedStatement ms;
private final Map<String, Object> paramMap;
private final RowBounds rowBounds;
private final ResultHandler resultHandler;
private final Executor executor;
private final List<Object> finalResult;
private final Iterator<HeavyParam> heavyKeyIter;
public QueryExecutor(Invocation invocation, HeavyParamContext context) {
Object[] args = invocation.getArgs();
this.ms = (MappedStatement) args[0];
this.paramMap = context.getParameter();
this.rowBounds = (RowBounds) args[2];
this.resultHandler = (ResultHandler) args[3];
this.executor = (Executor) invocation.getTarget();
List<HeavyParam> heavyParams = context.getHeavyParams();
this.finalResult = new ArrayList<>(heavyParams.size() * singleBatchSize);
this.heavyKeyIter = heavyParams.iterator();
}
public Object query() throws SQLException {
while (heavyKeyIter.hasNext()) {
HeavyParam currKey = heavyKeyIter.next();
List<List<Object>> param = partitionParam(currKey.getParam());
doQuery(currKey, param);
}
return finalResult;
}
private void doQuery(HeavyParam currKey, List<List<Object>> param) throws SQLException {
if (!heavyKeyIter.hasNext()) {
for (List<Object> currentParam : param) {
updateParamMap(currKey, currentParam);
List<Object> oneBatchResult = executor.query(ms, paramMap, rowBounds, resultHandler);
finalResult.addAll(oneBatchResult);
}
return;
} else {
HeavyParam nextKey = heavyKeyIter.next();
log.warn("get mutil heavy key [{}], batchSize[{}]", nextKey.shadowHeavyKeys, nextKey.getParam().size());
List<List<Object>> nextParam = partitionParam(nextKey.getParam());
for (List<Object> currParam : param) {
updateParamMap(currKey, currParam);
doQuery(nextKey, nextParam);
}
}
}
private void updateParamMap(HeavyParam currKey, List<Object> param) {
for (String shadowKey : currKey.getShadowHeavyKeys()) {
paramMap.put(shadowKey, param);
}
}
}
private HeavyParamContext findHeavyParam(Map<String, Object> parameterMap) {
List<Map.Entry<String, Object>> heavyKeys = doFindHeavyParam(parameterMap);
if (heavyKeys == null) {
return BigSizeParamQueryPlugin.NO_BIG_PARAM;
} else {
HeavyParamContext result = new HeavyParamContext();
List<HeavyParam> heavyParams;
if (heavyKeys.size() == 1) {
heavyParams = buildSingleHeavyParam(heavyKeys);
} else {
heavyParams = buildMultiHeavyParam(heavyKeys);
}
result.setHeavyParams(heavyParams);
result.setParameter(new HashMap<>(parameterMap));
return result;
}
}
private List<HeavyParam> buildSingleHeavyParam(List<Map.Entry<String, Object>> heavyKeys) {
Map.Entry<String, Object> single = heavyKeys.get(0);
return Collections.singletonList(new HeavyParam((Collection) single.getValue(), Collections.singletonList(single.getKey())));
}
private List<List<Object>> partitionParam(Object o) {
Collection c = (Collection) o;
List res;
if (c instanceof List) {
res = (List) c.stream().distinct().collect(Collectors.toList());
} else {
res = new ArrayList(c);
}
return Lists.partition(res, singleBatchSize);
}
private List<HeavyParam> buildMultiHeavyParam(List<Map.Entry<String, Object>> heavyKeys) {
//when heavy keys used multi time in xml, its name will be different.
TreeMap<Collection, List<String>> params = new TreeMap<>(new Comparator<Collection>() {
#Override
public int compare(Collection o1, Collection o2) {
//fixme workable but have corner case.
return CollectionUtils.isEqualCollection(o1, o2) == true ? 0 : o1.hashCode() - o2.hashCode();
}
});
for (Map.Entry<String, Object> keyEntry : heavyKeys) {
String key = keyEntry.getKey();
List<String> keys = params.computeIfAbsent((Collection) keyEntry.getValue(), k -> new ArrayList<>(1));
keys.add(key);
}
List<HeavyParam> hps = new ArrayList<>(params.size());
for (Map.Entry<Collection, List<String>> heavyEntry : params.entrySet()) {
List<String> shadowKeys = heavyEntry.getValue();
hps.add(new HeavyParam(heavyEntry.getKey(), shadowKeys));
}
return hps;
}
private List<Map.Entry<String, Object>> doFindHeavyParam(Map<String, Object> parameterMap) {
List<Map.Entry<String, Object>> result = null;
for (Map.Entry<String, Object> p : parameterMap.entrySet()) {
if (p != null) {
Object value = p.getValue();
if (value != null && value instanceof Collection) {
int size = CollectionUtils.size(value);
if (size > singleBatchSize) {
if (result == null) {
result = new ArrayList<>(1);
}
result.add(p);
}
}
}
}
return result;
}
#Getter
#Setter
private static class HeavyParamContext {
private Boolean hasHeavyParam;
private List<HeavyParam> heavyParams;
private Map<String, Object> parameter;
public Boolean hasHeavyParam() {
return heavyParams != null;
}
}
#Data
#AllArgsConstructor
#NoArgsConstructor
private class HeavyParam {
private Collection param;
private List<String> shadowHeavyKeys;
}
#Override
public Object plugin(Object o) {
return Plugin.wrap(o, this);
}
#Override
public void setProperties(Properties properties) {
}
}

Related

How to retrieve tables which exists in a pdf using AWS Textract in java

I found article below to do in python.
https://docs.aws.amazon.com/textract/latest/dg/examples-export-table-csv.html
also I used article below to extract text.
https://docs.aws.amazon.com/textract/latest/dg/detecting-document-text.html
but above article helped to get only text, I also used function "block.getBlockType()"
of Block but none of block returned its type as "CELL" even tables are there in image/pdf.
Help me found java library similar to "boto3" to extract all tables.
What I did, I created models of each dataset in the json response and can use this models to build a table view in jsf.
public static List<TableModel> getTablesFromTextract(TextractModel textractModel) {
List<TableModel> tables = null;
try {
if (textractModel != null) {
tables = new ArrayList<>();
List<BlockModel> tableBlocks = new ArrayList<>();
Map<String, BlockModel> blockMap = new HashMap<>();
for (BlockModel block : textractModel.getBlocks()) {
if (block.getBlockType().equals("TABLE")) {
tableBlocks.add(block);
}
blockMap.put(block.getId(), block);
}
for (BlockModel blockModel : tableBlocks) {
Map<Long, Map<Long, String>> rowMap = new HashMap<>();
for (RelationshipModel relationship : blockModel.getRelationships()) {
if (relationship.getType().equals("CHILD")) {
for (String id : relationship.getIds()) {
BlockModel cell = blockMap.get(id);
if (cell.getBlockType().equals("CELL")) {
long rowIndex = cell.getRowIndex();
long columnIndex = cell.getColumnIndex();
if (!rowMap.containsKey(rowIndex)) {
rowMap.put(rowIndex, new HashMap<>());
}
Map<Long, String> columnMap = rowMap.get(rowIndex);
columnMap.put(columnIndex, getCellText(cell, blockMap));
}
}
}
}
tables.add(new TableModel(blockModel, rowMap));
}
System.out.println("row Map " + tables.toString());
}
} catch (Exception e) {
LOG.error("Could not get table from textract model", e);
}
return tables;
}
private static String getCellText(BlockModel cell, Map<String, BlockModel> blockMap) {
String text = "";
try {
if (cell != null
&& CollectionUtils.isNotEmpty(cell.getRelationships())) {
for (RelationshipModel relationship : cell.getRelationships()) {
if (relationship.getType().equals("CHILD")) {
for (String id : relationship.getIds()) {
BlockModel word = blockMap.get(id);
if (word.getBlockType().equals("WORD")) {
text += word.getText() + " ";
} else if (word.getBlockType().equals("SELECTION_ELEMENT")) {
if (word.getSelectionStatus().equals("SELECTED")) {
text += "X ";
}
}
}
}
}
}
} catch (Exception e) {
LOG.error("Could not get cell text of table", e);
}
return text;
}
TableModel to create the view from:
public class TableModel {
private BlockModel table;
private Map<Long, Map<Long, String>> rowMap;
public TableModel(BlockModel table, Map<Long, Map<Long, String>> rowMap) {
this.table = table;
this.rowMap = rowMap;
}
public BlockModel getTable() {
return table;
}
public void setTable(BlockModel table) {
this.table = table;
}
public Map<Long, Map<Long, String>> getRowMap() {
return rowMap;
}
public void setRowMap(Map<Long, Map<Long, String>> rowMap) {
this.rowMap = rowMap;
}
#Override
public String toString() {
return table.getId() + " - " + rowMap.toString();
}
I have something similar:
public class AnalyzeDocument {
public DocumentModel startProcess(byte[] content) {
Region region = Region.EU_WEST_2;
TextractClient textractClient = TextractClient.builder().region(region)
.credentialsProvider(EnvironmentVariableCredentialsProvider.create()).build();
return analyzeDoc(textractClient, content);
}
public DocumentModel analyzeDoc(TextractClient textractClient, byte[] content) {
try {
SdkBytes sourceBytes = SdkBytes.fromByteArray(content);
Util util = new Util();
Document myDoc = Document.builder().bytes(sourceBytes).build();
List<FeatureType> featureTypes = new ArrayList<FeatureType>();
featureTypes.add(FeatureType.FORMS);
featureTypes.add(FeatureType.TABLES);
AnalyzeDocumentRequest analyzeDocumentRequest = AnalyzeDocumentRequest.builder().featureTypes(featureTypes)
.document(myDoc).build();
AnalyzeDocumentResponse analyzeDocument = textractClient.analyzeDocument(analyzeDocumentRequest);
List<Block> docInfo = analyzeDocument.blocks();
// util.displayBlockInfo(docInfo);
PageModel pageModel = util.getTableResults(docInfo);
DocumentModel documentModel = new DocumentModel();
documentModel.getPages().add(pageModel);
Iterator<Block> blockIterator = docInfo.iterator();
while (blockIterator.hasNext()) {
Block block = blockIterator.next();
log.debug("The block type is " + block.blockType().toString());
}
return documentModel;
} catch (TextractException e) {
System.err.println(e.getMessage());
}
return null;
}
and this is the util file:
public PageModel getTableResults(List<Block> blocks) {
List<Block> tableBlocks = new ArrayList<>();
Map<String, Block> blockMap = new HashMap<>();
for (Block block : blocks) {
blockMap.put(block.id(), block);
if (block.blockType().equals(BlockType.TABLE)) {
tableBlocks.add(block);
log.debug("added table: " + block.text());
}
}
PageModel page = new PageModel();
if (tableBlocks.size() == 0) {
return null;
}
int i = 0;
for (Block table : tableBlocks) {
page.getTables().add(generateTable(table, blockMap, i++));
}
return page;
}
private TableModel generateTable(Block table, Map<String, Block> blockMap, int index) {
TableModel model = new TableModel();
Map<Integer, Map<Integer, String>> rows = getRowsColumnsMap(table, blockMap);
model.setTableId("Table_" + index);
for (Map.Entry<Integer, Map<Integer, String>> entry : rows.entrySet()) {
RowModel rowModel = new RowModel();
Map<Integer, String> value = entry.getValue();
for (int i = 0; i < value.size(); i++) {
rowModel.getCells().add(value.get(i));
}
model.getRows().add(rowModel);
}
return model;
}
private Map<Integer, Map<Integer, String>> getRowsColumnsMap(Block block, Map<String, Block> blockMap) {
Map<Integer, Map<Integer, String>> rows = new HashMap<>();
for (Relationship relationship : block.relationships()) {
if (relationship.type().equals(RelationshipType.CHILD)) {
for (String childId : relationship.ids()) {
Block cell = blockMap.get(childId);
if (cell != null) {
int rowIndex = cell.rowIndex();
int colIndex = cell.columnIndex();
if (rows.get(rowIndex) == null) {
Map<Integer, String> row = new HashMap<>();
rows.put(rowIndex, row);
}
rows.get(rowIndex).put(colIndex, getText(cell, blockMap));
}
}
}
}
return rows;
}
public String getText(Block block, Map<String, Block> blockMap) {
String text = "";
if (block.relationships() != null && block.relationships().size() > 0) {
for (Relationship relationship : block.relationships()) {
if (relationship.type().equals(RelationshipType.CHILD)) {
for (String childId : relationship.ids()) {
Block wordBlock = blockMap.get(childId);
if (wordBlock != null && wordBlock.blockType() != null) {
if (wordBlock.blockType().equals(BlockType.WORD))) {
text += wordBlock.text() + " ";
}
}
}
}
}
}
return text;
}

How to group inside Flink with my model

Im using Flink with Java to make my recommendation system using our logic.
So i have a dataset:
[user] [item]
100 1
100 2
100 3
100 4
100 5
200 1
200 2
200 3
200 6
300 1
300 6
400 7
So i map all to a tuple :
DataSet<Tuple3<Long, Long, Integer>> csv = text.flatMap(new LineSplitter()).groupBy(0, 1).reduceGroup(new GroupReduceFunction<Tuple2<Long, Long>, Tuple3<Long, Long, Integer>>() {
#Override
public void reduce(Iterable<Tuple2<Long, Long>> iterable, Collector<Tuple3<Long, Long, Integer>> collector) throws Exception {
Long customerId = 0L;
Long itemId = 0L;
Integer count = 0;
for (Tuple2<Long, Long> item : iterable) {
customerId = item.f0;
itemId = item.f1;
count = count + 1;
}
collector.collect(new Tuple3<>(customerId, itemId, count));
}
});
After i get all Customers and is Items inside arraylist:
DataSet<CustomerItems> customerItems = csv.groupBy(0).reduceGroup(new GroupReduceFunction<Tuple3<Long, Long, Integer>, CustomerItems>() {
#Override
public void reduce(Iterable<Tuple3<Long, Long, Integer>> iterable, Collector<CustomerItems> collector) throws Exception {
ArrayList<Long> newItems = new ArrayList<>();
Long customerId = 0L;
for (Tuple3<Long, Long, Integer> item : iterable) {
customerId = item.f0;
newItems.add(item.f1);
}
collector.collect(new CustomerItems(customerId, newItems));
}
});
Now i need get all "similar" customers. But after try a lot of things, nothing work.
The logic will be:
for ci : CustomerItems
c1 = c1.customerId
for ci2 : CustomerItems
c2 = ci2.cstomerId
if c1 != c2
if c2.getItems() have any item inside c1.getItems()
collector.collect(new Tuple2<c1, c2>)
I tried it using reduce, but i cant iterate on iterator two time (loop inside loop).
Can anyone help me?
You can cross the dataset with itself and basically insert your logic 1:1 into a cross function (excluding the 2 loops since the cross does that for you).
I solve the problem, but i need group and reduce after the "cross". I dont know that it is the best method. Can anyone suggest something?
The result is here:
package org.myorg.quickstart;
import org.apache.flink.api.common.functions.CrossFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.GroupReduceFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.util.Collector;
import java.io.Serializable;
import java.util.ArrayList;
public class UserRecommendation {
public static void main(String[] args) throws Exception {
final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
// le o arquivo cm o dataset
DataSet<String> text = env.readTextFile("/Users/paulo/Downloads/dataset.csv");
// cria tuple com: customer | item | count
DataSet<Tuple3<Long, Long, Integer>> csv = text.flatMap(new LineFieldSplitter()).groupBy(0, 1).reduceGroup(new GroupReduceFunction<Tuple2<Long, Long>, Tuple3<Long, Long, Integer>>() {
#Override
public void reduce(Iterable<Tuple2<Long, Long>> iterable, Collector<Tuple3<Long, Long, Integer>> collector) throws Exception {
Long customerId = 0L;
Long itemId = 0L;
Integer count = 0;
for (Tuple2<Long, Long> item : iterable) {
customerId = item.f0;
itemId = item.f1;
count = count + 1;
}
collector.collect(new Tuple3<>(customerId, itemId, count));
}
});
// agrupa os items do customer dentro do customer
final DataSet<CustomerItems> customerItems = csv.groupBy(0).reduceGroup(new GroupReduceFunction<Tuple3<Long, Long, Integer>, CustomerItems>() {
#Override
public void reduce(Iterable<Tuple3<Long, Long, Integer>> iterable, Collector<CustomerItems> collector) throws Exception {
ArrayList<Long> newItems = new ArrayList<>();
Long customerId = 0L;
for (Tuple3<Long, Long, Integer> item : iterable) {
customerId = item.f0;
newItems.add(item.f1);
}
collector.collect(new CustomerItems(customerId, newItems));
}
});
// obtém todos os itens do customer que pertence a um usuário parecido
DataSet<CustomerItems> ci = customerItems.cross(customerItems).with(new CrossFunction<CustomerItems, CustomerItems, CustomerItems>() {
#Override
public CustomerItems cross(CustomerItems customerItems, CustomerItems customerItems2) throws Exception {
if (!customerItems.customerId.equals(customerItems2.customerId)) {
boolean has = false;
for (Long item : customerItems2.items) {
if (customerItems.items.contains(item)) {
has = true;
break;
}
}
if (has) {
for (Long item : customerItems2.items) {
if (!customerItems.items.contains(item)) {
customerItems.ritems.add(item);
}
}
}
}
return customerItems;
}
}).groupBy(new KeySelector<CustomerItems, Long>() {
#Override
public Long getKey(CustomerItems customerItems) throws Exception {
return customerItems.customerId;
}
}).reduceGroup(new GroupReduceFunction<CustomerItems, CustomerItems>() {
#Override
public void reduce(Iterable<CustomerItems> iterable, Collector<CustomerItems> collector) throws Exception {
CustomerItems c = new CustomerItems();
for (CustomerItems current : iterable) {
c.customerId = current.customerId;
for (Long item : current.ritems) {
if (!c.ritems.contains(item)) {
c.ritems.add(item);
}
}
}
collector.collect(c);
}
});
ci.first(100).print();
System.out.println(ci.count());
}
public static class CustomerItems implements Serializable {
public Long customerId;
public ArrayList<Long> items = new ArrayList<>();
public ArrayList<Long> ritems = new ArrayList<>();
public CustomerItems() {
}
public CustomerItems(Long customerId, ArrayList<Long> items) {
this.customerId = customerId;
this.items = items;
}
#Override
public String toString() {
StringBuilder itemsData = new StringBuilder();
if (items != null) {
for (Long item : items) {
if (itemsData.length() == 0) {
itemsData.append(item);
} else {
itemsData.append(", ").append(item);
}
}
}
StringBuilder ritemsData = new StringBuilder();
if (ritems != null) {
for (Long item : ritems) {
if (ritemsData.length() == 0) {
ritemsData.append(item);
} else {
ritemsData.append(", ").append(item);
}
}
}
return String.format("[ID: %d, Items: %s, RItems: %s]", customerId, itemsData, ritemsData);
}
}
public static final class LineFieldSplitter implements FlatMapFunction<String, Tuple2<Long, Long>> {
#Override
public void flatMap(String value, Collector<Tuple2<Long, Long>> out) {
// normalize and split the line
String[] tokens = value.split("\t");
if (tokens.length > 1) {
out.collect(new Tuple2<>(Long.valueOf(tokens[0]), Long.valueOf(tokens[1])));
}
}
}
}
Link with gist:
https://gist.github.com/prsolucoes/b406ae98ea24120436954967e37103f6

Elasticsearch retrieves data from bad the index

I do not know why elasticsearch does not set the appropriate values for the index, and type. He needs to retrieve data from the index = auctions and type = auctions like this is in Model:
AuctionIndex.java:
#Document(indexName = "auctions", type = "auctions")
public class AuctionIndex {
#Id
private String id;
private Long cat;
private Long tcat;
private String curr;
private Long price;
private Long start_date;
private Long end_date;
private String title;
private String pow;
private String woj;
private String loc;
private String cat_name;
private Long catdec;
private Long uid;
private Long qty;
...getters and setters...
}
This code works when downloading data as follows:
public Map searchByIndexParams(AuctionIndexSearchParams searchParams, Pageable pageable) {
Map response = new HashMap();
NativeSearchQuery searchQuery = this.getSearchQuery(searchParams, pageable).build();
final FacetedPage<AuctionIndex> search = auctionIndexRepository.search(searchQuery);
List<AuctionIndex> content = search.getContent();
response.put("content", content.stream().map(row -> {
return Auction.builder()
.cat(row.getCat())
.item(Long.parseLong(row.getId()))
.endts(row.getEnd_date())
.startts(row.getStart_date())
.loc(row.getLoc())
.pow(row.getPow())
.woj(row.getWoj())
.price(row.getPrice())
.qty(row.getQty())
.title(row.getTitle())
.user(row.getUid())
.catName(row.getCat_name())
.build();
}).collect(Collectors.toList()));
response.put("first", search.isFirst());
response.put("last", search.isLast());
response.put("number", search.getNumber());
response.put("numberOfElements", search.getNumberOfElements());
response.put("size", search.getSize());
response.put("sort", search.getSort());
response.put("totalElements", search.getTotalElements());
response.put("totalPages", search.getTotalPages());
return response;
}
By downloading all the records in this way:
public Map findAllByIndexParams(AuctionIndexSearchParams searchParams, Pageable pageable) {
List rows = new ArrayList();
Map response = new HashMap();
final List<FilterBuilder> filters = Lists.newArrayList();
final NativeSearchQueryBuilder searchQuery = new NativeSearchQueryBuilder().withQuery(matchAllQuery());
Optional.ofNullable(searchParams.getCategoryId()).ifPresent(v -> filters.add(boolFilter().must(termFilter("cat", v))));
Optional.ofNullable(searchParams.getCurrency()).ifPresent(v -> filters.add(boolFilter().must(termFilter("curr", v))));
Optional.ofNullable(searchParams.getTreeCategoryId()).ifPresent(v -> filters.add(boolFilter().must(termFilter("tcat", v))));
Optional.ofNullable(searchParams.getUid()).ifPresent(v -> filters.add(boolFilter().must(termFilter("uid", v))));
final BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
//access for many uids
if (searchParams.getUids() != null) {
if (searchParams.getItemId() != null || searchParams.getTitle() != null) {
Optional.ofNullable(searchParams.getUids().split(",")).ifPresent(v -> {
filters.add(boolFilter().must(termsFilter("uid", v)));
});
} else {
for (String user : searchParams.getUids().split(",")) {
boolQueryBuilder.should(queryStringQuery(user).field("uid"));
}
}
}
//access for many categories
if (searchParams.getCategories() != null) {
Optional.ofNullable(searchParams.getCategories().split(",")).ifPresent(v -> {
filters.add(boolFilter().must(termsFilter("cat", v)));
});
}
if (searchParams.getItemId() != null) {
boolQueryBuilder.must(queryStringQuery(searchParams.getItemId()).field("_id"));
}
if (Optional.ofNullable(searchParams.getTitle()).isPresent()) {
boolQueryBuilder.must(queryStringQuery(searchParams.getTitle()).analyzeWildcard(true).field("title"));
}
if (searchParams.getLoc() != null) {
filters.add(boolFilter().must(termsFilter("loc", searchParams.getLoc())));
// boolQueryBuilder.must(queryStringQuery(searchParams.getLoc()).field("loc"));
}
if (searchParams.getPow() != null) {
filters.add(boolFilter().must(termsFilter("pow", searchParams.getPow())));
// boolQueryBuilder.must(queryStringQuery(searchParams.getPow()).field("pow"));
}
if (searchParams.getWoj() != null) {
filters.add(boolFilter().must(termsFilter("woj", searchParams.getWoj())));
// boolQueryBuilder.must(queryStringQuery(searchParams.getWoj()).field("woj"));
}
if (searchParams.getCatdec() != null) {
boolQueryBuilder.must(queryStringQuery(String.valueOf(searchParams.getCatdec())).field("catdec"));
}
if (Optional.ofNullable(searchParams.getStartDateFrom()).isPresent()
|| Optional.ofNullable(searchParams.getStartDateTo()).isPresent()) {
filters.add(rangeFilter("start_date").from(searchParams.getStartDateFrom()).to(searchParams.getStartDateTo()));
}
if (Optional.ofNullable(searchParams.getEndDateFrom()).isPresent()
|| Optional.ofNullable(searchParams.getEndDateTo()).isPresent()) {
filters.add(rangeFilter("end_date").from(searchParams.getEndDateFrom()).to(searchParams.getEndDateTo()));
}
if (Optional.ofNullable(searchParams.getPriceFrom()).isPresent()
|| Optional.ofNullable(searchParams.getPriceTo()).isPresent()) {
filters.add(rangeFilter("price").from(searchParams.getPriceFrom()).to(searchParams.getPriceTo()));
}
searchQuery.withQuery(boolQueryBuilder);
FilterBuilder[] filterArr = new FilterBuilder[filters.size()];
filterArr = filters.toArray(filterArr);
searchQuery.withFilter(andFilter(filterArr));
if (searchParams.getOrderBy() != null && searchParams.getOrderDir() != null) {
if (searchParams.getOrderDir().toLowerCase().equals("asc")) {
searchQuery.withSort(SortBuilders.fieldSort(searchParams.getOrderBy()).order(SortOrder.ASC));
} else {
searchQuery.withSort(SortBuilders.fieldSort(searchParams.getOrderBy()).order(SortOrder.DESC));
}
}
String scrollId = searchTemplate.scan(searchQuery.build(), 100000, false);
System.out.println(scrollId);
Page<AuctionIndex> page = searchTemplate.scroll(scrollId, 500000, AuctionIndex.class);
System.out.println(page.getTotalElements());
if (page.hasContent()) {
while (true) {
for (AuctionIndex hit : page.getContent()) {
Auction row = Auction.builder()
.cat(hit.getCat())
.item(Long.parseLong(hit.getId()))
.endts(hit.getEnd_date())
.startts(hit.getStart_date())
.loc(hit.getLoc())
.pow(hit.getPow())
.woj(hit.getWoj())
.price(hit.getPrice())
.qty(hit.getQty())
.title(hit.getTitle())
.user(hit.getUid())
.catName(hit.getCat_name())
.build();
rows.add(row);
}
page = searchTemplate.scroll(scrollId, 500000, AuctionIndex.class);
if (page.hasContent() == false) {
break;
}
}
}
response.put("content", rows);
return response;
}
AuctionService.java:
private AuctionRepository auctionRepository;
private AuctionIndexRepository auctionIndexRepository;
#Autowired
public AuctionService(AuctionRepository auctionRepository, AuctionIndexRepository auctionIndexRepository) {
this.auctionRepository = auctionRepository;
this.auctionIndexRepository = auctionIndexRepository;
}
#Autowired
private ElasticsearchTemplate searchTemplate;
AuctionIndexRepository.java:
public interface AuctionIndexRepository extends ElasticsearchRepository<AuctionIndex, Integer> {
}
AuctionRepository.java:
#Repository
public class AuctionRepository {
private final AerospikeClient aerospikeClient;
#Autowired
public AuctionRepository(AerospikeClient aerospikeClient) {
this.aerospikeClient = aerospikeClient;
}
/**
*
* #param auctionId
* #param transactionIndexId
* #return
*/
public Map findTransactionAuctionById(Long auctionId, String transactionIndexId) {
final Statement stmt = new Statement();
stmt.setNamespace(NAMESPACE_ALLEK);
stmt.setSetName(SET_U);
final Map<String, Object> aMap = findAuctionUserInSetA(auctionId);
final Key uKey = new Key(NAMESPACE_ALLEK, SET_U, aMap.get("u") + "_" + aMap.get("p"));
final Object uRecord = aerospikeClient.execute(null, uKey, NAMESPACE_ALLEK, FUN_FIND_U_ITEM, Value.get(auctionId));
return parseTransactionAuction((HashMap) uRecord, auctionId, transactionIndexId);
}
/**
*
* #param r
* #return
*/
private Map parseTransactionAuction(HashMap r, Long auctionId, String transactionIndexId) {
return parseTransactionAuction(new Record(r, 0, 0), auctionId, transactionIndexId);
}
/**
*
* #param r rekord z aerospike
* #return
* #return
*/
private Map parseTransactionAuction(Record r, Long auctionId, String transactionIndexId) {
Map response = new HashMap();
final Object recordTrans = r.getValue("t");
final ArrayList<HashMap> trans = Optional.ofNullable(recordTrans).isPresent() ? (ArrayList<HashMap>) recordTrans : new ArrayList<>();
Object qty = 0;
Object price = 0;
for (HashMap hit : trans) {
if (transactionIndexId.equals(auctionId + "_" + hit.get("buyer") + "_" + hit.get("ts"))) {
qty = hit.get("qty");
price = hit.get("price");
break;
}
}
response.put("qty", qty);
response.put("price", price);
response.put("startts", r.getLong("startts"));
response.put("endts", r.getLong("endts"));
response.put("qty_auction", r.getLong("qty"));
return response;
}
public AuctionRaw findAuctionRawById(Long auctionId) {
final Statement stmt = new Statement();
stmt.setNamespace(NAMESPACE_ALLEK);
stmt.setSetName(SET_U);
final Map<String, Object> aMap = findAuctionUserInSetA(auctionId);
final Key uKey = new Key(NAMESPACE_ALLEK, SET_U, aMap.get("u") + "_" + aMap.get("p"));
final Object uRecord = aerospikeClient.execute(null, uKey, NAMESPACE_ALLEK, FUN_FIND_U_ITEM, Value.get(auctionId));
return parseAuctionRaw((HashMap) uRecord);
}
private AuctionRaw parseAuctionRaw(HashMap r) {
return parseAuctionRaw(new Record(r, 0, 0));
}
private AuctionRaw parseAuctionRaw(Record r) {
return AuctionRaw.builder()
.cat(r.getLong("cat"))
.len(r.getInt("len"))
.start(r.getLong("start"))
.build();
}
public Auction findAuctionById(Long auctionId) {
final Statement stmt = new Statement();
stmt.setNamespace(NAMESPACE_ALLEK);
stmt.setSetName(SET_U);
final Map<String, Object> aMap = findAuctionUserInSetA(auctionId);
final Key uKey = new Key(NAMESPACE_ALLEK, SET_U, aMap.get("u") + "_" + aMap.get("p"));
final Object uRecord = aerospikeClient.execute(null, uKey, NAMESPACE_ALLEK, FUN_FIND_U_ITEM, Value.get(auctionId));
return parseAuction((HashMap) uRecord);
}
public Map<String, Object> findAuctionUserInSetA(Long auctionId) {
final Statement stmt = new Statement();
stmt.setNamespace(NAMESPACE_ALLEK);
stmt.setSetName(SET_U);
final Key aKey = new Key(NAMESPACE_ALLEK, SET_A, Value.get(auctionId / 1024));
final Map<String, Object> aMap = (Map<String, Object>) aerospikeClient.execute(null, aKey, NAMESPACE_ALLEK, FUN_FIND_A_ITEM, Value.get(auctionId));
return aMap;
}
public List<Auction> findAuctionByUserId(Long userId) {
final Statement stmt = new Statement();
stmt.setNamespace(NAMESPACE_ALLEK);
stmt.setSetName(SET_U);
stmt.setFilters(Filter.equal("u", userId));
final RecordSet records = aerospikeClient.query(null, stmt);
return StreamSupport.stream(records.spliterator(), true)
.flatMap(l -> {
final ArrayList<HashMap> auctionsFromRecord = (ArrayList<HashMap>) l.record.getValue("v");
return Optional.ofNullable(auctionsFromRecord).isPresent() ? auctionsFromRecord.stream() : Stream.<HashMap>empty();
})
.map(r -> parseAuction(r))
.collect(Collectors.toList());
}
private Auction parseAuction(HashMap r) {
return parseAuction(new Record(r, 0, 0));
}
private Auction parseAuction(Record r) {
// final Object recordTrans = r.getValue("t");
// final ArrayList<HashMap> trans = Optional.ofNullable(recordTrans).isPresent() ? (ArrayList<HashMap>) recordTrans : new ArrayList<>();
// final List<Transaction> transactions = trans.stream()
// .map(m -> {
// HashMap recordComment = (HashMap) m.get("c");
// Comment comment = null;
// if (recordComment != null && recordComment.size() > 0) {
// comment = Comment.builder()
// .id((Long) recordComment.get("id"))
// .ts((Long) recordComment.get("ts"))
// .text((String) recordComment.get("text"))
// .type((Long) recordComment.get("type"))
// .build();
// }
// return Transaction.builder()
// .ts((Long) m.get("ts"))
// .qty((Long) m.get("qty"))
// .price((Long) m.get("price"))
// .c(comment)
// .buyer((Long) m.get("buyer"))
// .build();
// })
// .collect(Collectors.toList());
return Auction.builder()
.item(r.getLong("item"))
.startts(r.getLong("startts"))
.endts(r.getLong("endts"))
.user(r.getLong("user"))
.qty(r.getLong("qty"))
.price(r.getLong("price"))
.title(r.getString("title"))
.cat(r.getLong("cat"))
// .tcat(r.getLong("tcat"))
// .curr(r.getString("curr"))
.loc(r.getString("loc"))
.woj(r.getString("woj"))
.pow(r.getString("pow"))
.catName(r.getString("cat_name"))
// .t(transactions)
// .len(r.getInt("len"))
// .detSt(r.getLong("det_st"))
// .detLen(r.getLong("det_len"))
// .start(r.getLong("start"))
.build();
}
}
I do not know why but scroll retrieves the data from the old the index = allek and type = auctions.
How do I know that the old index? And so the result in the old Index is equal to 16k (there is just more data and there are other fields than in the new Index) while in the new Index is the records of about 400.
My question is why is this happening? What I should change to be able to use the scrollbar configuration index = auctions and type = auctions?
I ask you for help I have no idea why this is happening.

Java Hibernate Transformer AliasToBeanNestedResultTransformer

i have a query like this. i pass the student ID i need some fields from Student as well as their parent as well some fields from the parent->Address[here is the main problem i am facing] i am using AliasToBeanNestedResultTransformer transformer by Sami Andoni
here is the implementation of it CODE
here is my code.
public List<Student>searchForStudent(Integer studentId)
{
Projection p=Projections.projectionList().create()
.add(Projections.property("name"),"name")//the student name it works O.K
.add(Projections.property("lastname"),"lastname")//the student name it works O.K
.add(Projections.property("age"),"age")//the student AGE it works O.K
.add(Projections.property("p.phone"),"parent.phone")//the parent phone it works O.K
.add(Projections.property("address.state").as("parent.Address.state")); // i need a field from address.state here is the problem...
Session session = ......
Criteria like = session.createCriteria(Student.class).add(prepareForSelect())//some filters..
.createAlias("parent","p")//the parent of the student. a student have one parent
.createAlias("parent.Address","address")//the address of the parent.... a parent have one address.
.setProjection(p)
.setResultTransformer(new AliasToBeanNestedResultTransformer(Student.class));
List<Student>results=like.list();
return results;
}
it throws
Exception in thread "main" org.hibernate.PropertyAccessException: IllegalArgumentException occurred while calling setter of com.generic.model.Parent.Address
FYI is some type mismatch i have done some tracing in SAMI code and i see this
[MyState]
[Address]
seems that Hibernate is returning a String State MyState in this case and the transformer is using a Address Object and this is the type Mismatch.
is any help is hugely needed it
thanks a lot.
I have improved the SamiAndoni class, maybe it solve your issue
package com.alutiiq.develop.promanagesys.core.util;
import java.lang.reflect.Field;
import java.lang.reflect.ParameterizedType;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.hibernate.HibernateException;
import org.hibernate.property.PropertyAccessor;
import org.hibernate.property.PropertyAccessorFactory;
import org.hibernate.property.Setter;
import org.hibernate.transform.AliasToBeanResultTransformer;
import org.hibernate.transform.AliasedTupleSubsetResultTransformer;
import org.hibernate.transform.ResultTransformer;
/**
* Help to transform alises with nested alises
*
* #author Miguel Resendiz
*
*/
public class AliasToBeanNestedResultTransformer extends
AliasedTupleSubsetResultTransformer {
private static final long serialVersionUID = -8047276133980128266L;
private static final int TUPE_INDEX = 0;
private static final int ALISES_INDEX = 1;
private static final int FIELDNAME_INDEX = 2;
private static final PropertyAccessor accessor = PropertyAccessorFactory
.getPropertyAccessor("property");
private final Class<?> resultClass;
private Object[] entityTuples;
private String[] entityAliases;
private Map<String, Class<?>> fieldToClass = new HashMap<String, Class<?>>();
private Map<String, List<?>> subEntities = new HashMap<String, List<?>>();
private List<String> nestedAliases = new ArrayList<String>();
private Map<String, Class<?>> listFields = new HashMap<String, Class<?>>();
public boolean isTransformedValueATupleElement(String[] aliases,
int tupleLength) {
return false;
}
public AliasToBeanNestedResultTransformer(Class<?> resultClass) {
this.resultClass = resultClass;
}
public Object transformTuple(Object[] tuple, String[] aliases) {
handleSubEntities(tuple, aliases);
cleanParams(tuple, aliases);
ResultTransformer rootTransformer = new AliasToBeanResultTransformer(
resultClass);
Object root = rootTransformer.transformTuple(entityTuples,
entityAliases);
loadSubEntities(root);
cleanMaps();
return root;
}
private void handleSubEntities(Object[] tuple, String[] aliases)
throws HibernateException {
String fieldName = "";
String aliasName = "";
try {
for (int i = 0; i < aliases.length; i++) {
String alias = aliases[i];
if (alias.contains(".")) {
String[] sp = alias.split("\\.");
StringBuilder aliasBuilder = new StringBuilder();
for (int j = 0; j < sp.length; j++) {
if (j == 0) {
fieldName = sp[j];
} else {
aliasBuilder.append(sp[j]);
aliasBuilder.append(".");
}
}
aliasName = aliasBuilder.substring(0,
aliasBuilder.length() - 1);
nestedAliases.add(alias);
manageEntities(fieldName, aliasName, tuple[i]);
}
}
} catch (NoSuchFieldException e) {
throw new HibernateException("Could not instantiate resultclass: "
+ resultClass.getName() + " for field name: " + fieldName
+ " and alias name:" + aliasName);
}
}
private Class<?> findClass(String fieldName) throws NoSuchFieldException,
SecurityException {
if (fieldToClass.containsKey(fieldName)) {
return fieldToClass.get(fieldName);
} else {
Class<?> subclass = resultClass.getDeclaredField(fieldName)
.getType();
if (subclass.equals(List.class) || subclass.equals(Set.class)) {
if (subclass.equals(List.class)) {
listFields.put(fieldName, LinkedList.class);
} else {
listFields.put(fieldName, HashSet.class);
}
Field field = resultClass.getDeclaredField(fieldName);
ParameterizedType genericType = (ParameterizedType) field
.getGenericType();
subclass = (Class<?>) genericType.getActualTypeArguments()[0];
}
fieldToClass.put(fieldName, subclass);
return subclass;
}
}
#SuppressWarnings("unchecked")
private void manageEntities(String fieldName, String aliasName,
Object tupleValue) throws NoSuchFieldException, SecurityException {
Class<?> subclass = findClass(fieldName);
if (!subEntities.containsKey(fieldName)) {
List<Object> list = new ArrayList<Object>();
list.add(new ArrayList<Object>());
list.add(new ArrayList<String>());
list.add(FIELDNAME_INDEX, subclass);
subEntities.put(fieldName, list);
}
((List<Object>) subEntities.get(fieldName).get(TUPE_INDEX))
.add(tupleValue);
((List<String>) subEntities.get(fieldName).get(ALISES_INDEX))
.add(aliasName);
}
private void cleanParams(Object[] tuple, String[] aliases) {
entityTuples = new Object[aliases.length - nestedAliases.size()];
entityAliases = new String[aliases.length - nestedAliases.size()];
for (int j = 0, i = 0; j < aliases.length; j++) {
if (!nestedAliases.contains(aliases[j])) {
entityTuples[i] = tuple[j];
entityAliases[i] = aliases[j];
++i;
}
}
}
#SuppressWarnings({ "unchecked", "rawtypes" })
private void loadSubEntities(Object root) throws HibernateException {
try {
for (String fieldName : subEntities.keySet()) {
Class<?> subclass = (Class<?>) subEntities.get(fieldName).get(
FIELDNAME_INDEX);
ResultTransformer subclassTransformer = new AliasToBeanNestedResultTransformer(
subclass);
Object subObject = subclassTransformer.transformTuple(
((List<Object>) subEntities.get(fieldName).get(0))
.toArray(),
((List<Object>) subEntities.get(fieldName).get(1))
.toArray(new String[0]));
Setter setter = accessor.getSetter(resultClass, fieldName);
if (listFields.containsKey(fieldName)) {
Class<?> collectionClass = listFields.get(fieldName);
Collection subObjectList = (Collection) collectionClass
.newInstance();
subObjectList.add(subObject);
setter.set(root, subObjectList, null);
} else {
setter.set(root, subObject, null);
}
}
} catch (Exception e) {
throw new HibernateException(e);
}
}
private void cleanMaps() {
fieldToClass = new HashMap<String, Class<?>>();
subEntities = new HashMap<String, List<?>>();
nestedAliases = new ArrayList<String>();
listFields = new HashMap<String, Class<?>>();
}
}
I hope it help you.
--------------edit 07/25/15---------------
To group nested list.
public List<? extends Entity<?>> cleanList(
List<? extends Entity<?>> resultList) throws DataException {
List<Entity<?>> entities = new ArrayList<Entity<?>>();
Entity<?> current = null;
try {
for (Entity<?> entity : resultList) {
if (entity.getId() == null) {
continue;
}
if (current == null) {
current = entity;
continue;
}
if (current.getId().equals(entity.getId())) {
append(current, entity);
} else {
entities.add(current);
current = entity;
}
}
if (current != null) {
entities.add(current);
}
cleanSubList(entities);
return entities;
} catch (Exception e) {
throw new DataException(e);
}
}
#SuppressWarnings({ "rawtypes", "unchecked" })
public Set<? extends Entity<?>> cleanList(
Set<? extends Entity<?>> resultList) throws DataException {
List listToClean = new LinkedList();
listToClean.addAll(resultList);
listToClean = cleanList(listToClean);
resultList.clear();
resultList.addAll(listToClean);
return resultList;
}
#SuppressWarnings({ "unchecked", "rawtypes" })
private void append(Entity<?> current, Entity<?> next)
throws IllegalArgumentException, IllegalAccessException {
Field[] fields = current.getClass().getDeclaredFields();
for (Field field : fields) {
if (field.getType().equals(List.class)) {
field.setAccessible(true);
List valueNext = (List) field.get(next);
List valueCurrent = (List) field.get(current);
if (valueNext != null) {
if (valueCurrent != null) {
valueCurrent.addAll(valueNext);
field.set(current, valueCurrent);
} else {
field.set(current, valueNext);
}
}
} else if (field.getType().equals(Set.class)) {
field.setAccessible(true);
Set valueNext = (Set) field.get(next);
Set valueCurrent = (Set) field.get(current);
if (valueNext != null) {
if (valueCurrent != null) {
valueCurrent.addAll(valueNext);
field.set(current, valueCurrent);
} else {
field.set(current, valueNext);
}
}
}
}
}
#SuppressWarnings({ "rawtypes", "unchecked" })
private void cleanSubList(List<? extends Entity<?>> listToClean)
throws IllegalArgumentException, IllegalAccessException,
DataException {
for (Entity<?> entity : listToClean) {
Field[] fields = entity.getClass().getDeclaredFields();
for (Field field : fields) {
if (field.getType().equals(List.class)) {
field.setAccessible(true);
List valueToClean = (List) field.get(entity);
// Throw a thread
if (valueToClean != null) {
valueToClean = cleanList(valueToClean);
field.set(entity, valueToClean);
}
} else if (field.getType().equals(Set.class)) {
field.setAccessible(true);
Set valueToClean = (Set) field.get(entity);
// Throw a thread
if (valueToClean != null) {
valueToClean = cleanList(valueToClean);
field.set(entity, valueToClean);
}
}
}
}
}
To speed up the process I´m suggesting throw a thread in the main process.
This is my Entity interface:
package com.alutiiq.develop.promanagesys.common.entity;
import java.io.Serializable;
/**
* Entity for Hibernate comunications
*
* #author Miguel Resendiz
*
* #param <I>
* Primary key type
*/
public interface Entity<I extends Serializable> extends Serializable {
/**
* Enable poissibility to write generic queries using primary key
*
* #return primary key value for entity
*/
I getId();
void setId(I id);
void setId(String id);
}
Usage example:
criteria.setResultTransformer(new AliasToBeanNestedResultTransformer(
entityClass));
List<Project> projects = criteria.list();
projects = (List<Project>) cleanList(projects);

How to intercept and change sql query dynamically in mybatis

I use mybatis to perform sql queries in my project. I need to intercept sql query before executing to apply some changed dynamically. I've read about #Interseptors like this:
#Intercepts({#Signature(type= Executor.class, method = "query", args = {...})})
public class ExamplePlugin implements Interceptor {
public Object intercept(Invocation invocation) throws Throwable {
return invocation.proceed();
}
public Object plugin(Object target) {
return Plugin.wrap(target, this);
}
public void setProperties(Properties properties) {
}
}
And it really intercepts executions, but there is no way to change sql query since appropriate field is not writable. Should I build new instance of whole object manually to just replace sql query? Where is the right place to intercept query execution to change it dynamically? Thank.
I hope it will help you:
#Intercepts( { #Signature(type = Executor.class, method = "query", args = {
MappedStatement.class, Object.class, RowBounds.class,
ResultHandler.class
})
})
public class SelectCountSqlInterceptor2 implements Interceptor
{
public static String COUNT = "_count";
private static int MAPPED_STATEMENT_INDEX = 0;
private static int PARAMETER_INDEX = 1;
#Override
public Object intercept(Invocation invocation) throws Throwable
{
processCountSql(invocation.getArgs());
return invocation.proceed();
}
#SuppressWarnings("rawtypes")
private void processCountSql(final Object[] queryArgs)
{
if (queryArgs[PARAMETER_INDEX] instanceof Map)
{
Map parameter = (Map) queryArgs[PARAMETER_INDEX];
if (parameter.containsKey(COUNT))
{
MappedStatement ms = (MappedStatement) queryArgs[MAPPED_STATEMENT_INDEX];
BoundSql boundSql = ms.getBoundSql(parameter);
String sql = ms.getBoundSql(parameter).getSql().trim();
BoundSql newBoundSql = new BoundSql(ms.getConfiguration(),
getCountSQL(sql), boundSql.getParameterMappings(),
boundSql.getParameterObject());
MappedStatement newMs = copyFromMappedStatement(ms,
new OffsetLimitInterceptor.BoundSqlSqlSource(newBoundSql));
queryArgs[MAPPED_STATEMENT_INDEX] = newMs;
}
}
}
// see: MapperBuilderAssistant
#SuppressWarnings({ "unchecked", "rawtypes" })
private MappedStatement copyFromMappedStatement(MappedStatement ms,
SqlSource newSqlSource)
{
Builder builder = new MappedStatement.Builder(ms.getConfiguration(), ms
.getId(), newSqlSource, ms.getSqlCommandType());
builder.resource(ms.getResource());
builder.fetchSize(ms.getFetchSize());
builder.statementType(ms.getStatementType());
builder.keyGenerator(ms.getKeyGenerator());
// setStatementTimeout()
builder.timeout(ms.getTimeout());
// setParameterMap()
builder.parameterMap(ms.getParameterMap());
// setStatementResultMap()
List<ResultMap> resultMaps = new ArrayList<ResultMap>();
String id = "-inline";
if (ms.getResultMaps() != null)
{
id = ms.getResultMaps().get(0).getId() + "-inline";
}
ResultMap resultMap = new ResultMap.Builder(null, id, Long.class,
new ArrayList()).build();
resultMaps.add(resultMap);
builder.resultMaps(resultMaps);
builder.resultSetType(ms.getResultSetType());
// setStatementCache()
builder.cache(ms.getCache());
builder.flushCacheRequired(ms.isFlushCacheRequired());
builder.useCache(ms.isUseCache());
return builder.build();
}
private String getCountSQL(String sql)
{
String lowerCaseSQL = sql.toLowerCase().replace("\n", " ").replace("\t", " ");
int index = lowerCaseSQL.indexOf(" order ");
if (index != -1)
{
sql = sql.substring(0, index);
}
return "SELECT COUNT(*) from ( select 1 as col_c " + sql.substring(lowerCaseSQL.indexOf(" from ")) + " ) cnt";
}
#Override
public Object plugin(Object target)
{
return Plugin.wrap(target, this);
}
#Override
public void setProperties(Properties properties)
{
}
}
You may consider using a string template library (eg Velocity, Handlebars, Mustache) to help you
As of to date, there is even MyBatis-Velocity (http://mybatis.github.io/velocity-scripting/) to help you to do scripting for the sql.
Depending on the changes you want to make, you may want to use the dynamic sql feature of mybatis 3

Categories