use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class RowLevelRangeLessThanEqualFilterExecuterImpl method applyFilter.
@Override
public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws FilterUnsupportedException, IOException {
// select all rows if dimension does not exists in the current block
if (!isDimensionPresentInCurrentBlock[0]) {
int numberOfRows = blockChunkHolder.getDataBlock().nodeSize();
return FilterUtil.createBitSetGroupWithDefaultValue(blockChunkHolder.getDataBlock().numberOfPages(), numberOfRows, true);
}
int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(dimensionBlocksIndex[0]);
if (null == blockChunkHolder.getDimensionRawDataChunk()[blockIndex]) {
blockChunkHolder.getDimensionRawDataChunk()[blockIndex] = blockChunkHolder.getDataBlock().getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
}
DimensionRawColumnChunk rawColumnChunk = blockChunkHolder.getDimensionRawDataChunk()[blockIndex];
BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
if (rawColumnChunk.getMinValues() != null) {
if (isScanRequired(rawColumnChunk.getMinValues()[i], this.filterRangeValues)) {
BitSet bitSet = getFilteredIndexes(rawColumnChunk.convertToDimColDataChunk(i), rawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
} else {
BitSet bitSet = getFilteredIndexes(rawColumnChunk.convertToDimColDataChunk(i), rawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
}
return bitSetGroup;
}
use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class IncludeColGroupFilterExecuterImpl method applyFilter.
@Override
public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws IOException {
int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(dimColumnEvaluatorInfo.getColumnIndex());
if (null == blockChunkHolder.getDimensionRawDataChunk()[blockIndex]) {
blockChunkHolder.getDimensionRawDataChunk()[blockIndex] = blockChunkHolder.getDataBlock().getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
}
DimensionRawColumnChunk dimensionRawColumnChunk = blockChunkHolder.getDimensionRawDataChunk()[blockIndex];
BitSetGroup bitSetGroup = new BitSetGroup(dimensionRawColumnChunk.getPagesCount());
for (int i = 0; i < dimensionRawColumnChunk.getPagesCount(); i++) {
if (dimensionRawColumnChunk.getMaxValues() != null) {
BitSet bitSet = getFilteredIndexes(dimensionRawColumnChunk.convertToDimColDataChunk(i), dimensionRawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
}
return bitSetGroup;
}
use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class RowLevelRangeGrtThanFiterExecuterImpl method applyFilter.
@Override
public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws FilterUnsupportedException, IOException {
// select all rows if dimension does not exists in the current block
if (!isDimensionPresentInCurrentBlock[0]) {
int numberOfRows = blockChunkHolder.getDataBlock().nodeSize();
return FilterUtil.createBitSetGroupWithDefaultValue(blockChunkHolder.getDataBlock().numberOfPages(), numberOfRows, true);
}
int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(dimensionBlocksIndex[0]);
if (null == blockChunkHolder.getDimensionRawDataChunk()[blockIndex]) {
blockChunkHolder.getDimensionRawDataChunk()[blockIndex] = blockChunkHolder.getDataBlock().getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
}
DimensionRawColumnChunk rawColumnChunk = blockChunkHolder.getDimensionRawDataChunk()[blockIndex];
BitSetGroup bitSetGroup = new BitSetGroup(rawColumnChunk.getPagesCount());
for (int i = 0; i < rawColumnChunk.getPagesCount(); i++) {
if (rawColumnChunk.getMaxValues() != null) {
if (isScanRequired(rawColumnChunk.getMaxValues()[i], this.filterRangeValues)) {
int compare = ByteUtil.UnsafeComparer.INSTANCE.compareTo(filterRangeValues[0], rawColumnChunk.getMinValues()[i]);
if (compare < 0) {
BitSet bitSet = new BitSet(rawColumnChunk.getRowCount()[i]);
bitSet.flip(0, rawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
} else {
BitSet bitSet = getFilteredIndexes(rawColumnChunk.convertToDimColDataChunk(i), rawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
}
} else {
BitSet bitSet = getFilteredIndexes(rawColumnChunk.convertToDimColDataChunk(i), rawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
}
return bitSetGroup;
}
use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class IncludeFilterExecuterImpl method applyFilter.
@Override
public BitSetGroup applyFilter(BlocksChunkHolder blockChunkHolder) throws IOException {
int blockIndex = segmentProperties.getDimensionOrdinalToBlockMapping().get(dimColumnEvaluatorInfo.getColumnIndex());
if (null == blockChunkHolder.getDimensionRawDataChunk()[blockIndex]) {
blockChunkHolder.getDimensionRawDataChunk()[blockIndex] = blockChunkHolder.getDataBlock().getDimensionChunk(blockChunkHolder.getFileReader(), blockIndex);
}
DimensionRawColumnChunk dimensionRawColumnChunk = blockChunkHolder.getDimensionRawDataChunk()[blockIndex];
BitSetGroup bitSetGroup = new BitSetGroup(dimensionRawColumnChunk.getPagesCount());
for (int i = 0; i < dimensionRawColumnChunk.getPagesCount(); i++) {
if (dimensionRawColumnChunk.getMaxValues() != null) {
if (isScanRequired(dimensionRawColumnChunk.getMaxValues()[i], dimensionRawColumnChunk.getMinValues()[i], dimColumnExecuterInfo.getFilterKeys())) {
BitSet bitSet = getFilteredIndexes(dimensionRawColumnChunk.convertToDimColDataChunk(i), dimensionRawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
} else {
BitSet bitSet = getFilteredIndexes(dimensionRawColumnChunk.convertToDimColDataChunk(i), dimensionRawColumnChunk.getRowCount()[i]);
bitSetGroup.setBitSet(bitSet, i);
}
}
return bitSetGroup;
}
use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class CompressedDimensionChunkFileBasedReaderV1 method readRawDimensionChunk.
/**
* Below method will be used to read the raw chunk based on block index
*
* @param fileReader file reader to read the blocks from file
* @param blockletIndex block to be read
* @return dimension column chunk
*/
@Override
public DimensionRawColumnChunk readRawDimensionChunk(FileHolder fileReader, int blockletIndex) throws IOException {
DataChunk dataChunk = dimensionColumnChunk.get(blockletIndex);
ByteBuffer buffer = null;
synchronized (fileReader) {
buffer = fileReader.readByteBuffer(filePath, dataChunk.getDataPageOffset(), dataChunk.getDataPageLength());
}
DimensionRawColumnChunk rawColumnChunk = new DimensionRawColumnChunk(blockletIndex, buffer, 0, dataChunk.getDataPageLength(), this);
rawColumnChunk.setFileHolder(fileReader);
rawColumnChunk.setPagesCount(1);
rawColumnChunk.setRowCount(new int[] { numberOfRows });
return rawColumnChunk;
}
Aggregations