use of org.apache.carbondata.core.datastore.block.BlockInfo in project carbondata by apache.
the class BlockIndexStore method removeTableBlocksIfHorizontalCompactionDone.
/**
* remove TableBlocks executer level If Horizontal Compaction Done
* @param queryModel
*/
public void removeTableBlocksIfHorizontalCompactionDone(QueryModel queryModel) {
// get the invalid segments blocks details
Map<String, UpdateVO> invalidBlocksVO = queryModel.getInvalidBlockVOForSegmentId();
if (!invalidBlocksVO.isEmpty()) {
UpdateVO updateMetadata;
Iterator<Map.Entry<String, UpdateVO>> itr = invalidBlocksVO.entrySet().iterator();
String blockTimestamp = null;
while (itr.hasNext()) {
Map.Entry<String, UpdateVO> entry = itr.next();
TableSegmentUniqueIdentifier tableSegmentUniqueIdentifier = new TableSegmentUniqueIdentifier(queryModel.getAbsoluteTableIdentifier(), entry.getKey());
List<BlockInfo> blockInfos = segmentIdToBlockListMap.get(tableSegmentUniqueIdentifier.getUniqueTableSegmentIdentifier());
if (null != blockInfos) {
for (BlockInfo blockInfo : blockInfos) {
// reading the updated block names from status manager instance
blockTimestamp = blockInfo.getBlockUniqueName().substring(blockInfo.getBlockUniqueName().lastIndexOf('-') + 1, blockInfo.getBlockUniqueName().length());
updateMetadata = entry.getValue();
if (CarbonUpdateUtil.isMaxQueryTimeoutExceeded(Long.parseLong(blockTimestamp))) {
Long blockTimeStamp = Long.parseLong(blockTimestamp);
if (blockTimeStamp > updateMetadata.getFactTimestamp() && (updateMetadata.getUpdateDeltaStartTimestamp() != null && blockTimeStamp < updateMetadata.getUpdateDeltaStartTimestamp())) {
String lruCacheKey = getLruCacheKey(queryModel.getAbsoluteTableIdentifier(), blockInfo);
lruCache.remove(lruCacheKey);
}
}
}
}
}
}
}
use of org.apache.carbondata.core.datastore.block.BlockInfo in project carbondata by apache.
the class BlockIndexStore method get.
/**
* The method loads the block meta in B-tree lru cache and returns the block meta.
*
* @param tableBlockUniqueIdentifier Uniquely identifies the block
* @return returns the blocks B-Tree meta
*/
@Override
public AbstractIndex get(TableBlockUniqueIdentifier tableBlockUniqueIdentifier) throws IOException {
TableBlockInfo tableBlockInfo = tableBlockUniqueIdentifier.getTableBlockInfo();
BlockInfo blockInfo = new BlockInfo(tableBlockInfo);
String lruCacheKey = getLruCacheKey(tableBlockUniqueIdentifier.getAbsoluteTableIdentifier(), blockInfo);
AbstractIndex tableBlock = (AbstractIndex) lruCache.get(lruCacheKey);
// if block is not loaded
if (null == tableBlock) {
// check any lock object is present in
// block info lock map
Object blockInfoLockObject = blockInfoLock.get(blockInfo);
// object will be added
if (null == blockInfoLockObject) {
synchronized (blockInfoLock) {
// again checking the block info lock, to check whether lock object is present
// or not if now also not present then add a lock object
blockInfoLockObject = blockInfoLock.get(blockInfo);
if (null == blockInfoLockObject) {
blockInfoLockObject = new Object();
blockInfoLock.put(blockInfo, blockInfoLockObject);
}
}
}
//acquire the lock for particular block info
synchronized (blockInfoLockObject) {
// check again whether block is present or not to avoid the
// same block is loaded
//more than once in case of concurrent query
tableBlock = (AbstractIndex) lruCache.get(getLruCacheKey(tableBlockUniqueIdentifier.getAbsoluteTableIdentifier(), blockInfo));
// if still block is not present then load the block
if (null == tableBlock) {
tableBlock = loadBlock(tableBlockUniqueIdentifier);
fillSegmentIdToBlockListMap(tableBlockUniqueIdentifier.getAbsoluteTableIdentifier(), blockInfo);
}
}
} else {
tableBlock.incrementAccessCount();
}
return tableBlock;
}
use of org.apache.carbondata.core.datastore.block.BlockInfo in project carbondata by apache.
the class BlockIndexStore method invalidate.
/**
* the method removes the entry from cache.
*
* @param tableBlockUniqueIdentifier
*/
@Override
public void invalidate(TableBlockUniqueIdentifier tableBlockUniqueIdentifier) {
BlockInfo blockInfo = new BlockInfo(tableBlockUniqueIdentifier.getTableBlockInfo());
lruCache.remove(getLruCacheKey(tableBlockUniqueIdentifier.getAbsoluteTableIdentifier(), blockInfo));
}
use of org.apache.carbondata.core.datastore.block.BlockInfo in project carbondata by apache.
the class BlockIndexStore method loadBlock.
private AbstractIndex loadBlock(TableBlockUniqueIdentifier tableBlockUniqueIdentifier) throws IOException {
AbstractIndex tableBlock = new BlockIndex();
BlockInfo blockInfo = new BlockInfo(tableBlockUniqueIdentifier.getTableBlockInfo());
String lruCacheKey = getLruCacheKey(tableBlockUniqueIdentifier.getAbsoluteTableIdentifier(), blockInfo);
checkAndLoadTableBlocks(tableBlock, tableBlockUniqueIdentifier, lruCacheKey);
// finally remove the lock object from block info lock as once block is loaded
// it will not come inside this if condition
blockInfoLock.remove(blockInfo);
return tableBlock;
}
use of org.apache.carbondata.core.datastore.block.BlockInfo in project carbondata by apache.
the class AbstractDataFileFooterConverter method getIndexInfo.
/**
* Below method will be used to get the index info from index file
*
* @param filePath file path of the index file
* @param tableBlockInfoList table block index
* @return list of index info
* @throws IOException problem while reading the index file
*/
public List<DataFileFooter> getIndexInfo(String filePath, List<TableBlockInfo> tableBlockInfoList) throws IOException {
CarbonIndexFileReader indexReader = new CarbonIndexFileReader();
List<DataFileFooter> dataFileFooters = new ArrayList<DataFileFooter>();
try {
// open the reader
indexReader.openThriftReader(filePath);
// get the index header
org.apache.carbondata.format.IndexHeader readIndexHeader = indexReader.readIndexHeader();
List<ColumnSchema> columnSchemaList = new ArrayList<ColumnSchema>();
List<org.apache.carbondata.format.ColumnSchema> table_columns = readIndexHeader.getTable_columns();
for (int i = 0; i < table_columns.size(); i++) {
columnSchemaList.add(thriftColumnSchmeaToWrapperColumnSchema(table_columns.get(i)));
}
// get the segment info
SegmentInfo segmentInfo = getSegmentInfo(readIndexHeader.getSegment_info());
BlockletIndex blockletIndex = null;
int counter = 0;
int index = 0;
DataFileFooter dataFileFooter = null;
// read the block info from file
while (indexReader.hasNext()) {
BlockIndex readBlockIndexInfo = indexReader.readBlockIndexInfo();
blockletIndex = getBlockletIndex(readBlockIndexInfo.getBlock_index());
dataFileFooter = new DataFileFooter();
TableBlockInfo tableBlockInfo = tableBlockInfoList.get(index);
if (Integer.parseInt(CarbonTablePath.DataFileUtil.getPartNo(tableBlockInfo.getFilePath())) == counter++) {
tableBlockInfo.setBlockOffset(readBlockIndexInfo.getOffset());
tableBlockInfo.setVersion(ColumnarFormatVersion.valueOf((short) readIndexHeader.getVersion()));
int blockletSize = getBlockletSize(readBlockIndexInfo);
tableBlockInfo.getBlockletInfos().setNoOfBlockLets(blockletSize);
dataFileFooter.setBlockletIndex(blockletIndex);
dataFileFooter.setColumnInTable(columnSchemaList);
dataFileFooter.setNumberOfRows(readBlockIndexInfo.getNum_rows());
dataFileFooter.setBlockInfo(new BlockInfo(tableBlockInfo));
dataFileFooter.setSegmentInfo(segmentInfo);
dataFileFooters.add(dataFileFooter);
if (++index == tableBlockInfoList.size()) {
break;
}
}
}
} finally {
indexReader.closeThriftReader();
}
return dataFileFooters;
}
Aggregations