use of org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier in project carbondata by apache.
the class BlockIndexStoreTest method testLoadAndGetTaskIdToSegmentsMapForSingleSegment.
@Test
public void testLoadAndGetTaskIdToSegmentsMapForSingleSegment() throws IOException {
File file = getPartFile();
TableBlockInfo info = new TableBlockInfo(file.getAbsolutePath(), 0, "0", new String[] { "loclhost" }, file.length(), ColumnarFormatVersion.V1);
CarbonTableIdentifier carbonTableIdentifier = new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "t3", "1");
AbsoluteTableIdentifier absoluteTableIdentifier = new AbsoluteTableIdentifier("/src/test/resources", carbonTableIdentifier);
try {
List<TableBlockUniqueIdentifier> tableBlockInfoList = getTableBlockUniqueIdentifierList(Arrays.asList(new TableBlockInfo[] { info }), absoluteTableIdentifier);
List<AbstractIndex> loadAndGetBlocks = cache.getAll(tableBlockInfoList);
assertTrue(loadAndGetBlocks.size() == 1);
} catch (Exception e) {
assertTrue(false);
}
List<String> segmentIds = new ArrayList<>();
segmentIds.add(info.getSegmentId());
cache.removeTableBlocks(segmentIds, absoluteTableIdentifier);
}
use of org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier in project carbondata by apache.
the class CacheProvider method createDictionaryCacheForGivenType.
/**
* This method will create the cache for given cache type
*
* @param cacheType type of cache
* @param carbonStorePath store path
*/
private void createDictionaryCacheForGivenType(CacheType cacheType, String carbonStorePath) {
Cache cacheObject = null;
if (cacheType.equals(CacheType.REVERSE_DICTIONARY)) {
cacheObject = new ReverseDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath, carbonLRUCache);
} else if (cacheType.equals(CacheType.FORWARD_DICTIONARY)) {
cacheObject = new ForwardDictionaryCache<DictionaryColumnUniqueIdentifier, Dictionary>(carbonStorePath, carbonLRUCache);
} else if (cacheType.equals(cacheType.EXECUTOR_BTREE)) {
cacheObject = new BlockIndexStore<TableBlockUniqueIdentifier, AbstractIndex>(carbonStorePath, carbonLRUCache);
} else if (cacheType.equals(cacheType.DRIVER_BTREE)) {
cacheObject = new SegmentTaskIndexStore(carbonStorePath, carbonLRUCache);
}
cacheTypeToCacheMap.put(cacheType, cacheObject);
}
use of org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier in project carbondata by apache.
the class BlockIndexStore method clearAccessCount.
@Override
public void clearAccessCount(List<TableBlockUniqueIdentifier> keys) {
for (TableBlockUniqueIdentifier tableBlockUniqueIdentifier : keys) {
SegmentTaskIndexWrapper cacheable = (SegmentTaskIndexWrapper) lruCache.get(tableBlockUniqueIdentifier.getUniqueTableBlockName());
cacheable.clear();
}
}
use of org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier in project carbondata by apache.
the class AbstractQueryExecutor method initQuery.
/**
* Below method will be used to fill the executor properties based on query
* model it will parse the query model and get the detail and fill it in
* query properties
*
* @param queryModel
*/
protected void initQuery(QueryModel queryModel) throws IOException {
StandardLogService.setThreadName(StandardLogService.getPartitionID(queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName()), queryModel.getQueryId());
LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier().getCarbonTableIdentifier().getTableName());
// add executor service for query execution
queryProperties.executorService = Executors.newCachedThreadPool();
// Initializing statistics list to record the query statistics
// creating copy on write to handle concurrent scenario
queryProperties.queryStatisticsRecorder = CarbonTimeStatisticsFactory.createExecutorRecorder(queryModel.getQueryId());
queryModel.setStatisticsRecorder(queryProperties.queryStatisticsRecorder);
QueryStatistic queryStatistic = new QueryStatistic();
// sort the block info
// so block will be loaded in sorted order this will be required for
// query execution
Collections.sort(queryModel.getTableBlockInfos());
if (queryModel.getTableBlockInfos().get(0).getDetailInfo() != null) {
List<AbstractIndex> indexList = new ArrayList<>();
Map<String, List<TableBlockInfo>> listMap = new LinkedHashMap<>();
for (TableBlockInfo blockInfo : queryModel.getTableBlockInfos()) {
List<TableBlockInfo> tableBlockInfos = listMap.get(blockInfo.getFilePath());
if (tableBlockInfos == null) {
tableBlockInfos = new ArrayList<>();
listMap.put(blockInfo.getFilePath(), tableBlockInfos);
}
BlockletDetailInfo blockletDetailInfo = blockInfo.getDetailInfo();
// the blocklet information from block file
if (blockletDetailInfo.getBlockletInfo() == null) {
readAndFillBlockletInfo(blockInfo, tableBlockInfos, blockletDetailInfo);
} else {
tableBlockInfos.add(blockInfo);
}
}
for (List<TableBlockInfo> tableBlockInfos : listMap.values()) {
indexList.add(new IndexWrapper(tableBlockInfos));
}
queryProperties.dataBlocks = indexList;
} else {
// get the table blocks
CacheProvider cacheProvider = CacheProvider.getInstance();
BlockIndexStore<TableBlockUniqueIdentifier, AbstractIndex> cache = (BlockIndexStore) cacheProvider.createCache(CacheType.EXECUTOR_BTREE);
// remove the invalid table blocks, block which is deleted or compacted
cache.removeTableBlocks(queryModel.getInvalidSegmentIds(), queryModel.getAbsoluteTableIdentifier());
List<TableBlockUniqueIdentifier> tableBlockUniqueIdentifiers = prepareTableBlockUniqueIdentifier(queryModel.getTableBlockInfos(), queryModel.getAbsoluteTableIdentifier());
cache.removeTableBlocksIfHorizontalCompactionDone(queryModel);
queryProperties.dataBlocks = cache.getAll(tableBlockUniqueIdentifiers);
}
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_EXECUTOR, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
// calculating the total number of aggeragted columns
int measureCount = queryModel.getProjectionMeasures().size();
int currentIndex = 0;
DataType[] dataTypes = new DataType[measureCount];
for (ProjectionMeasure carbonMeasure : queryModel.getProjectionMeasures()) {
// adding the data type and aggregation type of all the measure this
// can be used
// to select the aggregator
dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
currentIndex++;
}
queryProperties.measureDataTypes = dataTypes;
// as aggregation will be executed in following order
// 1.aggregate dimension expression
// 2. expression
// 3. query measure
// so calculating the index of the expression start index
// and measure column start index
queryProperties.filterMeasures = new HashSet<>();
queryProperties.complexFilterDimension = new HashSet<>();
QueryUtil.getAllFilterDimensions(queryModel.getFilterExpressionResolverTree(), queryProperties.complexFilterDimension, queryProperties.filterMeasures);
CarbonTable carbonTable = queryModel.getTable();
TableProvider tableProvider = new SingleTableProvider(carbonTable);
queryStatistic = new QueryStatistic();
// dictionary column unique column id to dictionary mapping
// which will be used to get column actual data
queryProperties.columnToDictionaryMapping = QueryUtil.getDimensionDictionaryDetail(queryModel.getProjectionDimensions(), queryProperties.complexFilterDimension, queryModel.getAbsoluteTableIdentifier(), tableProvider);
queryStatistic.addStatistics(QueryStatisticsConstants.LOAD_DICTIONARY, System.currentTimeMillis());
queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
queryModel.setColumnToDictionaryMapping(queryProperties.columnToDictionaryMapping);
}
use of org.apache.carbondata.core.datastore.block.TableBlockUniqueIdentifier in project carbondata by apache.
the class BlockIndexStore method getAll.
/**
* The method takes list of tableblocks as input and load them in btree lru cache
* and returns the list of data blocks meta
*
* @param tableBlocksInfos List of unique table blocks
* @return List<AbstractIndex>
* @throws IndexBuilderException
*/
@Override
public List<AbstractIndex> getAll(List<TableBlockUniqueIdentifier> tableBlocksInfos) throws IndexBuilderException {
AbstractIndex[] loadedBlock = new AbstractIndex[tableBlocksInfos.size()];
int numberOfCores = 1;
try {
numberOfCores = Integer.parseInt(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.NUM_CORES, CarbonCommonConstants.NUM_CORES_DEFAULT_VAL));
} catch (NumberFormatException e) {
numberOfCores = Integer.parseInt(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL);
}
ExecutorService executor = Executors.newFixedThreadPool(numberOfCores);
List<Future<AbstractIndex>> blocksList = new ArrayList<Future<AbstractIndex>>();
for (TableBlockUniqueIdentifier tableBlockUniqueIdentifier : tableBlocksInfos) {
blocksList.add(executor.submit(new BlockLoaderThread(tableBlockUniqueIdentifier)));
}
// shutdown the executor gracefully and wait until all the task is finished
executor.shutdown();
try {
executor.awaitTermination(1, TimeUnit.HOURS);
} catch (InterruptedException e) {
throw new IndexBuilderException(e);
}
// fill the block which were not loaded before to loaded blocks array
fillLoadedBlocks(loadedBlock, blocksList);
return Arrays.asList(loadedBlock);
}
Aggregations