use of org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk in project carbondata by apache.
the class CompressedDimensionChunkFileBasedReaderV2 method readRawDimensionChunksInGroup.
/**
* Below method will be used to read measure chunk data in group.
* This method will be useful to avoid multiple IO while reading the
* data from
*
* @param fileReader file reader to read the data
* @param startColumnBlockletIndex first column blocklet index to be read
* @param endColumnBlockletIndex end column blocklet index to be read
* @return measure raw chunkArray
* @throws IOException
*/
protected DimensionRawColumnChunk[] readRawDimensionChunksInGroup(FileHolder fileReader, int startColumnBlockletIndex, int endColumnBlockletIndex) throws IOException {
long currentDimensionOffset = dimensionChunksOffset.get(startColumnBlockletIndex);
ByteBuffer buffer = null;
synchronized (fileReader) {
buffer = fileReader.readByteBuffer(filePath, currentDimensionOffset, (int) (dimensionChunksOffset.get(endColumnBlockletIndex + 1) - currentDimensionOffset));
}
DimensionRawColumnChunk[] dataChunks = new DimensionRawColumnChunk[endColumnBlockletIndex - startColumnBlockletIndex + 1];
int index = 0;
int runningLength = 0;
for (int i = startColumnBlockletIndex; i <= endColumnBlockletIndex; i++) {
int currentLength = (int) (dimensionChunksOffset.get(i + 1) - dimensionChunksOffset.get(i));
dataChunks[index] = new DimensionRawColumnChunk(i, buffer, runningLength, currentLength, this);
dataChunks[index].setFileHolder(fileReader);
dataChunks[index].setPagesCount(1);
dataChunks[index].setRowCount(new int[] { numberOfRows });
runningLength += currentLength;
index++;
}
return dataChunks;
}
Aggregations