use of org.apache.carbondata.core.indexstore.row.DataMapRow in project carbondata by apache.
the class BlockletDataMap method addMinMax.
private DataMapRow addMinMax(int[] minMaxLen, CarbonRowSchema carbonRowSchema, byte[][] minValues) {
CarbonRowSchema[] minSchemas = ((CarbonRowSchema.StructCarbonRowSchema) carbonRowSchema).getChildSchemas();
DataMapRow minRow = new DataMapRowImpl(minSchemas);
int minOrdinal = 0;
// min value adding
for (int i = 0; i < minMaxLen.length; i++) {
minRow.setByteArray(minValues[i], minOrdinal++);
}
return minRow;
}
use of org.apache.carbondata.core.indexstore.row.DataMapRow in project carbondata by apache.
the class BlockletDataMap method prune.
private List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties) {
if (unsafeMemoryDMStore.getRowCount() == 0) {
return new ArrayList<>();
}
List<Blocklet> blocklets = new ArrayList<>();
if (filterExp == null) {
int rowCount = unsafeMemoryDMStore.getRowCount();
for (int i = 0; i < rowCount; i++) {
DataMapRow safeRow = unsafeMemoryDMStore.getUnsafeRow(i).convertToSafeRow();
blocklets.add(createBlocklet(safeRow, safeRow.getShort(BLOCKLET_ID_INDEX)));
}
} else {
// Remove B-tree jump logic as start and end key prepared is not
// correct for old store scenarios
int startIndex = 0;
int endIndex = unsafeMemoryDMStore.getRowCount();
FilterExecuter filterExecuter = FilterUtil.getFilterExecuterTree(filterExp, segmentProperties, null);
while (startIndex < endIndex) {
DataMapRow safeRow = unsafeMemoryDMStore.getUnsafeRow(startIndex).convertToSafeRow();
int blockletId = safeRow.getShort(BLOCKLET_ID_INDEX);
String filePath = new String(safeRow.getByteArray(FILE_PATH_INDEX), CarbonCommonConstants.DEFAULT_CHARSET_CLASS);
boolean isValid = addBlockBasedOnMinMaxValue(filterExecuter, getMinMaxValue(safeRow, MAX_VALUES_INDEX), getMinMaxValue(safeRow, MIN_VALUES_INDEX), filePath, blockletId);
if (isValid) {
blocklets.add(createBlocklet(safeRow, blockletId));
}
startIndex++;
}
}
return blocklets;
}
use of org.apache.carbondata.core.indexstore.row.DataMapRow in project carbondata by apache.
the class BlockletDataMap method getMinMaxValue.
private byte[][] getMinMaxValue(DataMapRow row, int index) {
DataMapRow minMaxRow = row.getRow(index);
byte[][] minMax = new byte[minMaxRow.getColumnCount()][];
for (int i = 0; i < minMax.length; i++) {
minMax[i] = minMaxRow.getByteArray(i);
}
return minMax;
}
use of org.apache.carbondata.core.indexstore.row.DataMapRow in project carbondata by apache.
the class BlockletDataMap method loadToUnsafeBlock.
/**
* Load information for the block.It is the case can happen only for old stores
* where blocklet information is not available in index file. So load only block information
* and read blocklet information in executor.
*/
private DataMapRowImpl loadToUnsafeBlock(DataFileFooter fileFooter, SegmentProperties segmentProperties, String filePath, DataMapRowImpl summaryRow, BlockMetaInfo blockMetaInfo) {
int[] minMaxLen = segmentProperties.getColumnsValueSize();
BlockletIndex blockletIndex = fileFooter.getBlockletIndex();
CarbonRowSchema[] schema = unsafeMemoryDMStore.getSchema();
// Add one row to maintain task level min max for segment pruning
if (summaryRow == null) {
summaryRow = new DataMapRowImpl(unsafeMemorySummaryDMStore.getSchema());
}
DataMapRow row = new DataMapRowImpl(schema);
int ordinal = 0;
int taskMinMaxOrdinal = 0;
// add start key as index key
row.setByteArray(blockletIndex.getBtreeIndex().getStartKey(), ordinal++);
BlockletMinMaxIndex minMaxIndex = blockletIndex.getMinMaxIndex();
byte[][] minValues = updateMinValues(minMaxIndex.getMinValues(), minMaxLen);
byte[][] maxValues = updateMaxValues(minMaxIndex.getMaxValues(), minMaxLen);
// update min max values in case of old store
byte[][] updatedMinValues = CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, true);
byte[][] updatedMaxValues = CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, false);
row.setRow(addMinMax(minMaxLen, schema[ordinal], updatedMinValues), ordinal);
// compute and set task level min values
addTaskMinMaxValues(summaryRow, minMaxLen, unsafeMemorySummaryDMStore.getSchema()[taskMinMaxOrdinal], updatedMinValues, TASK_MIN_VALUES_INDEX, true);
ordinal++;
taskMinMaxOrdinal++;
row.setRow(addMinMax(minMaxLen, schema[ordinal], updatedMaxValues), ordinal);
// compute and set task level max values
addTaskMinMaxValues(summaryRow, minMaxLen, unsafeMemorySummaryDMStore.getSchema()[taskMinMaxOrdinal], updatedMaxValues, TASK_MAX_VALUES_INDEX, false);
ordinal++;
row.setInt((int) fileFooter.getNumberOfRows(), ordinal++);
// add file path
byte[] filePathBytes = filePath.getBytes(CarbonCommonConstants.DEFAULT_CHARSET_CLASS);
row.setByteArray(filePathBytes, ordinal++);
// add pages
row.setShort((short) 0, ordinal++);
// add version number
row.setShort(fileFooter.getVersionId().number(), ordinal++);
// add schema updated time
row.setLong(fileFooter.getSchemaUpdatedTimeStamp(), ordinal++);
// add blocklet info
row.setByteArray(new byte[0], ordinal++);
row.setLong(fileFooter.getBlockInfo().getTableBlockInfo().getBlockOffset(), ordinal++);
try {
setLocations(blockMetaInfo.getLocationInfo(), row, ordinal);
ordinal++;
// for relative blocklet id. Value is -1 because in case of old store blocklet info will
// not be present in the index file and in that case we will not knwo the total number of
// blocklets
row.setShort((short) -1, ordinal++);
// store block size
row.setLong(blockMetaInfo.getSize(), ordinal);
unsafeMemoryDMStore.addIndexRowToUnsafe(row);
} catch (Exception e) {
throw new RuntimeException(e);
}
return summaryRow;
}
use of org.apache.carbondata.core.indexstore.row.DataMapRow in project carbondata by apache.
the class BlockletDataMap method getDetailedBlocklet.
public ExtendedBlocklet getDetailedBlocklet(String blockletId) {
int index = Integer.parseInt(blockletId);
DataMapRow safeRow = unsafeMemoryDMStore.getUnsafeRow(index).convertToSafeRow();
return createBlocklet(safeRow, safeRow.getShort(BLOCKLET_ID_INDEX));
}
Aggregations