use of org.apache.carbondata.core.datastore.BlockIndexStore in project carbondata by apache.
the class AbstractQueryExecutor method initQuery.
/**
* Below method will be used to fill the executor properties based on query
* model it will parse the query model and get the detail and fill it in
* query properties
*
* @param queryModel
*/
protected void initQuery(QueryModel queryModel) throws IOException {
StandardLogService.setThreadName(StandardLogService.getPartitionID(queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()), queryModel.getQueryId());
LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName());
// add executor service for query execution
queryProperties.executorService = Executors.newCachedThreadPool();
// Initializing statistics list to record the query statistics
// creating copy on write to handle concurrent scenario
queryProperties.queryStatisticsRecorder = CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId());
queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
QueryStatistic queryStatistic = new QueryStatistic();
// sort the block info
// so block will be loaded in sorted order this will be required for
// query execution
Collections.sort(queryModel.getTableBlockInfos());
if (queryModel.getTableBlockInfos().get(0).getDetailInfo() != null) {
List<AbstractIndex> indexList = new ArrayList<>();
Map<String, List<TableBlockInfo>> listMap = new LinkedHashMap<>();
for (TableBlockInfo blockInfo : queryModel.getTableBlockInfos()) {
List<TableBlockInfo> tableBlockInfos = listMap.get(blockInfo.getFilePath());
if (tableBlockInfos == null) {
tableBlockInfos = new ArrayList<>();
listMap.put(blockInfo.getFilePath(), tableBlockInfos);
}
BlockletDetailInfo blockletDetailInfo = blockInfo.getDetailInfo();
// the blocklet information from block file
if (blockletDetailInfo.getBlockletInfo() == null) {
readAndFillBlockletInfo(blockInfo, tableBlockInfos, blockletDetailInfo);
} else {
tableBlockInfos.add(blockInfo);
}
}
for (List<TableBlockInfo> tableBlockInfos : listMap.values()) {
indexList.add(new IndexWrapper(tableBlockInfos));
}
queryProperties.dataBlocks = indexList;
} else {
// get the table blocks
CacheProvider cacheProvider = CacheProvider.getInstance();
BlockIndexStore<TableBlockUniqueIdentifier, AbstractIndex> cache = (BlockIndexStore) cacheProvider.createCache(CacheType.EXECUTOR_BTREE);
// remove the invalid table blocks, block which is deleted or compacted
cache.removeTableBlocks(queryModel.getInvalidSegmentIds(), queryModel.getAbsoluteTableIdentifier());
List<TableBlockUniqueIdentifier> tableBlockUniqueIdentifiers = prepareTableBlockUniqueIdentifier(queryModel.getTableBlockInfos(), queryModel.getAbsoluteTableIdentifier());
cache.removeTableBlocksIfHorizontalCompactionDone(queryModel);
queryProperties.dataBlocks = cache.getAll(tableBlockUniqueIdentifiers);
}
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_EXECUTOR, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
// calculating the total number of aggeragted columns
int measureCount = queryModel.getProjectionMeasures().size();
int currentIndex = 0;
DataType[] dataTypes = new DataType[measureCount];
for (ProjectionMeasure carbonMeasure : queryModel.getProjectionMeasures()) {
// adding the data type and aggregation type of all the measure this
// can be used
// to select the aggregator
dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
currentIndex++;
}
queryProperties.measureDataTypes = dataTypes;
// as aggregation will be executed in following order
// 1.aggregate dimension expression
// 2. expression
// 3. query measure
// so calculating the index of the expression start index
// and measure column start index
queryProperties.filterMeasures = new HashSet<>();
queryProperties.complexFilterDimension = new HashSet<>();
QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree(), queryProperties.complexFilterDimension, queryProperties.filterMeasures);
CarbonTable carbonTable = queryModel.getTable();
TableProvider tableProvider = new SingleTableProvider(carbonTable);
queryStatistic = new QueryStatistic();
// dictionary column unique column id to dictionary mapping
// which will be used to get column actual data
queryProperties.columnToDictionaryMapping = QueryUtil.getDimensionDictionaryDetail(queryModel.getProjectionDimensions(), queryProperties.complexFilterDimension, queryModel.getAbsoluteTableIdentifier(), tableProvider);
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionaryMapping);
}
use of org.apache.carbondata.core.datastore.BlockIndexStore in project carbondata by apache.
the class CacheProviderTest method driverExecutorCacheConfTest.
/**
* to test the driver and executor lru memory configuration
*
* @throws IOException
* @throws NoSuchFieldException
* @throws IllegalAccessException
*/
@Test
public void driverExecutorCacheConfTest() throws IOException, NoSuchFieldException, IllegalAccessException {
// get cache provider instance
CacheProvider cacheProvider = CacheProvider.getInstance();
cacheProvider.dropAllCache();
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "true");
Cache<TableSegmentUniqueIdentifier, SegmentTaskIndexStore> driverCache = cacheProvider.createCache(CacheType.DRIVER_BTREE);
Field carbonLRUCacheField = SegmentTaskIndexStore.class.getDeclaredField("lruCache");
carbonLRUCacheField.setAccessible(true);
CarbonLRUCache carbonLRUCache = (CarbonLRUCache) carbonLRUCacheField.get(driverCache);
Field lruCacheMemorySizeField = CarbonLRUCache.class.getDeclaredField("lruCacheMemorySize");
lruCacheMemorySizeField.setAccessible(true);
long lruCacheMemorySize = (long) lruCacheMemorySizeField.get(carbonLRUCache);
String driverCacheSize = CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_MAX_DRIVER_LRU_CACHE_SIZE);
assertEquals(1024 * 1024 * Integer.parseInt(driverCacheSize), lruCacheMemorySize);
// drop cache
cacheProvider.dropAllCache();
// validation test for the executor memory.
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "false");
Cache<TableBlockUniqueIdentifier, BlockIndexStore> executorCache = cacheProvider.createCache(CacheType.EXECUTOR_BTREE);
carbonLRUCacheField = BlockIndexStore.class.getSuperclass().getDeclaredField("lruCache");
carbonLRUCacheField.setAccessible(true);
carbonLRUCache = (CarbonLRUCache) carbonLRUCacheField.get(executorCache);
lruCacheMemorySizeField = CarbonLRUCache.class.getDeclaredField("lruCacheMemorySize");
lruCacheMemorySizeField.setAccessible(true);
lruCacheMemorySize = (long) lruCacheMemorySizeField.get(carbonLRUCache);
String executorCacheSize = CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_MAX_EXECUTOR_LRU_CACHE_SIZE);
assertEquals(1024 * 1024 * Integer.parseInt(executorCacheSize), lruCacheMemorySize);
cacheProvider.dropAllCache();
}
Aggregations