use of org.apache.carbondata.core.indexstore.blockletindex.IndexWrapper in project carbondata by apache.
the class AbstractQueryExecutor method getDataBlocks.
/**
* Method returns the block(s) on which query will get executed
*
* @param queryModel
* @return
* @throws IOException
*/
private List<AbstractIndex> getDataBlocks(QueryModel queryModel) throws IOException {
Map<String, List<TableBlockInfo>> listMap = new LinkedHashMap<>();
// this is introduced to handle the case when CACHE_LEVEL=BLOCK and there are few other indexes
// like lucene, Bloom created on the table. In that case all the indexes will do blocklet
// level pruning and blockInfo entries will be repeated with different blockletIds
Map<String, DataFileFooter> filePathToFileFooterMapping = new HashMap<>();
Map<String, SegmentProperties> filePathToSegmentPropertiesMap = new HashMap<>();
for (TableBlockInfo blockInfo : queryModel.getTableBlockInfos()) {
List<TableBlockInfo> tableBlockInfos = listMap.get(blockInfo.getFilePath());
if (tableBlockInfos == null) {
tableBlockInfos = new ArrayList<>();
listMap.put(blockInfo.getFilePath(), tableBlockInfos);
}
SegmentProperties segmentProperties = filePathToSegmentPropertiesMap.get(blockInfo.getFilePath());
BlockletDetailInfo blockletDetailInfo = blockInfo.getDetailInfo();
// 3. CACHE_LEVEL is BLOCKLET but filter column min/max is not cached in driver
if (null == blockletDetailInfo || blockletDetailInfo.getBlockletInfo() == null || blockletDetailInfo.isUseMinMaxForPruning()) {
if (null != blockletDetailInfo) {
blockInfo.setBlockOffset(blockletDetailInfo.getBlockFooterOffset());
}
DataFileFooter fileFooter = filePathToFileFooterMapping.get(blockInfo.getFilePath());
if (null != blockInfo.getDataFileFooter()) {
fileFooter = blockInfo.getDataFileFooter();
}
if (null == fileFooter) {
blockInfo.setDetailInfo(null);
fileFooter = CarbonUtil.readMetadataFile(blockInfo);
// columnName
if (!queryModel.getTable().isTransactionalTable()) {
QueryUtil.updateColumnUniqueIdForNonTransactionTable(fileFooter.getColumnInTable());
}
filePathToFileFooterMapping.put(blockInfo.getFilePath(), fileFooter);
if (null == blockletDetailInfo) {
blockletDetailInfo = QueryUtil.getBlockletDetailInfo(fileFooter, blockInfo);
}
blockInfo.setCarbonDataFileWrittenVersion(fileFooter.getCarbonDataFileWrittenVersion());
blockInfo.setDetailInfo(blockletDetailInfo);
}
if (null == segmentProperties) {
segmentProperties = new SegmentProperties(fileFooter.getColumnInTable());
createFilterExpression(queryModel, segmentProperties);
updateColumns(queryModel, fileFooter.getColumnInTable(), blockInfo.getFilePath());
filePathToSegmentPropertiesMap.put(blockInfo.getFilePath(), segmentProperties);
}
readAndFillBlockletInfo(tableBlockInfos, blockInfo, blockletDetailInfo, fileFooter);
} else {
if (null == segmentProperties) {
segmentProperties = new SegmentProperties(blockInfo.getDetailInfo().getColumnSchemas());
createFilterExpression(queryModel, segmentProperties);
updateColumns(queryModel, blockInfo.getDetailInfo().getColumnSchemas(), blockInfo.getFilePath());
filePathToSegmentPropertiesMap.put(blockInfo.getFilePath(), segmentProperties);
}
tableBlockInfos.add(blockInfo);
}
}
List<AbstractIndex> indexList = new ArrayList<>();
for (List<TableBlockInfo> tableBlockInfos : listMap.values()) {
indexList.add(new IndexWrapper(tableBlockInfos, filePathToSegmentPropertiesMap.get(tableBlockInfos.get(0).getFilePath())));
}
return indexList;
}
Aggregations