use of org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache in project carbondata by apache.
the class BlockletDeleteDeltaCacheLoader method loadDeleteDeltaFileDataToCache.
/**
* This method will load the delete delta cache based on blocklet id of particular block with
* the help of SegmentUpdateStatusManager.
*/
public void loadDeleteDeltaFileDataToCache() {
SegmentUpdateStatusManager segmentUpdateStatusManager = new SegmentUpdateStatusManager(absoluteIdentifier);
int[] deleteDeltaFileData = null;
BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = null;
if (null == blockletNode.getDeleteDeltaDataCache()) {
try {
deleteDeltaFileData = segmentUpdateStatusManager.getDeleteDeltaDataFromAllFiles(blockletID);
deleteDeltaDataCache = new BlockletLevelDeleteDeltaDataCache(deleteDeltaFileData, segmentUpdateStatusManager.getTimestampForRefreshCache(blockletID, null));
} catch (Exception e) {
LOGGER.debug("Unable to retrieve delete delta files");
}
} else {
deleteDeltaDataCache = blockletNode.getDeleteDeltaDataCache();
// if already cache is present then validate the cache using timestamp
String cacheTimeStamp = segmentUpdateStatusManager.getTimestampForRefreshCache(blockletID, deleteDeltaDataCache.getCacheTimeStamp());
if (null != cacheTimeStamp) {
try {
deleteDeltaFileData = segmentUpdateStatusManager.getDeleteDeltaDataFromAllFiles(blockletID);
deleteDeltaDataCache = new BlockletLevelDeleteDeltaDataCache(deleteDeltaFileData, segmentUpdateStatusManager.getTimestampForRefreshCache(blockletID, cacheTimeStamp));
} catch (Exception e) {
LOGGER.debug("Unable to retrieve delete delta files");
}
}
}
blockletNode.setDeleteDeltaDataCache(deleteDeltaDataCache);
}
use of org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache in project carbondata by apache.
the class DictionaryBasedResultCollector method collectData.
/**
* This method will add a record both key and value to list object
* it will keep track of how many record is processed, to handle limit scenario
*/
@Override
public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
// scan the record and add to list
List<Object[]> listBasedResult = new ArrayList<>(batchSize);
int rowCounter = 0;
int[] surrogateResult;
byte[][] noDictionaryKeys;
byte[][] complexTypeKeyArray;
BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = scannedResult.getDeleteDeltaDataCache();
while (scannedResult.hasNext() && rowCounter < batchSize) {
Object[] row = new Object[queryDimensions.length + queryMeasures.length];
if (isDimensionExists) {
surrogateResult = scannedResult.getDictionaryKeyIntegerArray();
noDictionaryKeys = scannedResult.getNoDictionaryKeyArray();
complexTypeKeyArray = scannedResult.getComplexTypeKeyArray();
dictionaryColumnIndex = 0;
noDictionaryColumnIndex = 0;
complexTypeColumnIndex = 0;
for (int i = 0; i < queryDimensions.length; i++) {
fillDimensionData(scannedResult, surrogateResult, noDictionaryKeys, complexTypeKeyArray, comlexDimensionInfoMap, row, i);
}
} else {
scannedResult.incrementCounter();
}
if (null != deleteDeltaDataCache && deleteDeltaDataCache.contains(scannedResult.getCurrentRowId())) {
continue;
}
fillMeasureData(scannedResult, row);
listBasedResult.add(row);
rowCounter++;
}
return listBasedResult;
}
use of org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache in project carbondata by apache.
the class RestructureBasedDictionaryResultCollector method collectData.
/**
* This method will add a record both key and value to list object
* it will keep track of how many record is processed, to handle limit scenario
*/
@Override
public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
// scan the record and add to list
List<Object[]> listBasedResult = new ArrayList<>(batchSize);
int rowCounter = 0;
int[] surrogateResult;
byte[][] noDictionaryKeys;
byte[][] complexTypeKeyArray;
BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = scannedResult.getDeleteDeltaDataCache();
Map<Integer, GenericQueryType> comlexDimensionInfoMap = tableBlockExecutionInfos.getComlexDimensionInfoMap();
while (scannedResult.hasNext() && rowCounter < batchSize) {
Object[] row = new Object[queryDimensions.length + queryMeasures.length];
if (isDimensionExists) {
surrogateResult = scannedResult.getDictionaryKeyIntegerArray();
noDictionaryKeys = scannedResult.getNoDictionaryKeyArray();
complexTypeKeyArray = scannedResult.getComplexTypeKeyArray();
dictionaryColumnIndex = 0;
noDictionaryColumnIndex = 0;
complexTypeColumnIndex = 0;
for (int i = 0; i < queryDimensions.length; i++) {
// fill default value in case the dimension does not exist in the current block
if (!dimensionInfo.getDimensionExists()[i]) {
if (dictionaryEncodingArray[i] || directDictionaryEncodingArray[i]) {
row[order[i]] = dimensionInfo.getDefaultValues()[i];
dictionaryColumnIndex++;
} else {
row[order[i]] = dimensionInfo.getDefaultValues()[i];
}
continue;
}
fillDimensionData(scannedResult, surrogateResult, noDictionaryKeys, complexTypeKeyArray, comlexDimensionInfoMap, row, i);
}
} else {
scannedResult.incrementCounter();
}
if (null != deleteDeltaDataCache && deleteDeltaDataCache.contains(scannedResult.getCurrentRowId())) {
continue;
}
fillMeasureData(scannedResult, row);
listBasedResult.add(row);
rowCounter++;
}
return listBasedResult;
}
use of org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache in project carbondata by apache.
the class RawBasedResultCollector method collectData.
/**
* This method will add a record both key and value to list object
* it will keep track of how many record is processed, to handle limit scenario
*/
@Override
public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
List<Object[]> listBasedResult = new ArrayList<>(batchSize);
QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getQueryMeasures();
BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = scannedResult.getDeleteDeltaDataCache();
// scan the record and add to list
int rowCounter = 0;
while (scannedResult.hasNext() && rowCounter < batchSize) {
scanResultAndGetData(scannedResult);
if (null != deleteDeltaDataCache && deleteDeltaDataCache.contains(scannedResult.getCurrentRowId())) {
continue;
}
prepareRow(scannedResult, listBasedResult, queryMeasures);
rowCounter++;
}
return listBasedResult;
}
use of org.apache.carbondata.core.cache.update.BlockletLevelDeleteDeltaDataCache in project carbondata by apache.
the class RestructureBasedRawResultCollector method collectData.
/**
* This method will add a record both key and value to list object
* it will keep track of how many record is processed, to handle limit scenario
*/
@Override
public List<Object[]> collectData(AbstractScannedResult scannedResult, int batchSize) {
List<Object[]> listBasedResult = new ArrayList<>(batchSize);
QueryMeasure[] queryMeasures = tableBlockExecutionInfos.getActualQueryMeasures();
BlockletLevelDeleteDeltaDataCache deleteDeltaDataCache = scannedResult.getDeleteDeltaDataCache();
// scan the record and add to list
int rowCounter = 0;
while (scannedResult.hasNext() && rowCounter < batchSize) {
scanResultAndGetData(scannedResult);
if (null != deleteDeltaDataCache && deleteDeltaDataCache.contains(scannedResult.getCurrentRowId())) {
continue;
}
// re-fill dictionary and no dictionary key arrays for the newly added columns
if (dimensionInfo.isDictionaryColumnAdded()) {
dictionaryKeyArray = fillDictionaryKeyArrayWithLatestSchema(dictionaryKeyArray);
}
if (dimensionInfo.isNoDictionaryColumnAdded()) {
noDictionaryKeyArray = fillNoDictionaryKeyArrayWithLatestSchema(noDictionaryKeyArray);
}
prepareRow(scannedResult, listBasedResult, queryMeasures);
rowCounter++;
}
return listBasedResult;
}
Aggregations