use of org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap in project carbondata by apache.
the class BlockletDataMapIndexStore method loadAndGetDataMap.
/**
* Below method will be used to load the segment of segments
* One segment may have multiple task , so table segment will be loaded
* based on task id and will return the map of taksId to table segment
* map
*
* @return map of taks id to segment mapping
* @throws IOException
*/
private BlockletDataMap loadAndGetDataMap(TableBlockIndexUniqueIdentifier identifier, SegmentIndexFileStore indexFileStore, Map<String, BlockMetaInfo> blockMetaInfoMap) throws IOException, MemoryException {
String uniqueTableSegmentIdentifier = identifier.getUniqueTableSegmentIdentifier();
Object lock = segmentLockMap.get(uniqueTableSegmentIdentifier);
if (lock == null) {
lock = addAndGetSegmentLock(uniqueTableSegmentIdentifier);
}
BlockletDataMap dataMap;
synchronized (lock) {
dataMap = new BlockletDataMap();
dataMap.init(new BlockletDataMapModel(identifier.getIndexFilePath() + CarbonCommonConstants.FILE_SEPARATOR + identifier.getIndexFileName(), indexFileStore.getFileData(identifier.getIndexFileName()), blockMetaInfoMap, identifier.getSegmentId()));
lruCache.put(identifier.getUniqueTableSegmentIdentifier(), dataMap, dataMap.getMemorySize());
}
return dataMap;
}
use of org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap in project carbondata by apache.
the class BlockletDataMapIndexStore method getAll.
@Override
public List<BlockletDataMap> getAll(List<TableBlockIndexUniqueIdentifier> tableSegmentUniqueIdentifiers) throws IOException {
List<BlockletDataMap> blockletDataMaps = new ArrayList<>(tableSegmentUniqueIdentifiers.size());
List<TableBlockIndexUniqueIdentifier> missedIdentifiers = new ArrayList<>();
// Get the datamaps for each indexfile from cache.
try {
for (TableBlockIndexUniqueIdentifier identifier : tableSegmentUniqueIdentifiers) {
BlockletDataMap ifPresent = getIfPresent(identifier);
if (ifPresent != null) {
blockletDataMaps.add(ifPresent);
} else {
missedIdentifiers.add(identifier);
}
}
if (missedIdentifiers.size() > 0) {
SegmentIndexFileStore indexFileStore = new SegmentIndexFileStore();
Set<String> filesRead = new HashSet<>();
for (TableBlockIndexUniqueIdentifier identifier : missedIdentifiers) {
Map<String, BlockMetaInfo> blockMetaInfoMap = getBlockMetaInfoMap(identifier, indexFileStore, filesRead);
blockletDataMaps.add(loadAndGetDataMap(identifier, indexFileStore, blockMetaInfoMap));
}
}
} catch (Throwable e) {
for (BlockletDataMap dataMap : blockletDataMaps) {
dataMap.clear();
}
throw new IOException("Problem in loading segment blocks.", e);
}
return blockletDataMaps;
}
use of org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap in project carbondata by apache.
the class BlockletDataMapIndexStore method get.
@Override
public BlockletDataMap get(TableBlockIndexUniqueIdentifier identifier) throws IOException {
String lruCacheKey = identifier.getUniqueTableSegmentIdentifier();
BlockletDataMap dataMap = (BlockletDataMap) lruCache.get(lruCacheKey);
if (dataMap == null) {
try {
SegmentIndexFileStore indexFileStore = new SegmentIndexFileStore();
Set<String> filesRead = new HashSet<>();
Map<String, BlockMetaInfo> blockMetaInfoMap = getBlockMetaInfoMap(identifier, indexFileStore, filesRead);
dataMap = loadAndGetDataMap(identifier, indexFileStore, blockMetaInfoMap);
} catch (MemoryException e) {
LOGGER.error("memory exception when loading datamap: " + e.getMessage());
throw new RuntimeException(e.getMessage(), e);
}
}
return dataMap;
}
use of org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap in project carbondata by apache.
the class BlockletDataMapIndexStore method clearAccessCount.
/**
* The method clears the access count of table segments
*
* @param tableSegmentUniqueIdentifiers
*/
@Override
public void clearAccessCount(List<TableBlockIndexUniqueIdentifier> tableSegmentUniqueIdentifiers) {
for (TableBlockIndexUniqueIdentifier identifier : tableSegmentUniqueIdentifiers) {
BlockletDataMap cacheable = (BlockletDataMap) lruCache.get(identifier.getUniqueTableSegmentIdentifier());
cacheable.clear();
}
}
Aggregations