use of org.apache.carbondata.processing.loading.row.IntermediateSortTempRow in project carbondata by apache.
the class UnsafeIntermediateFileMerger method getSortedRecordFromFile.
/**
* This method will be used to get sorted sort temp row from the sort temp files
*
* @return sorted record sorted record
* @throws CarbonSortKeyAndGroupByException
*/
private IntermediateSortTempRow getSortedRecordFromFile() throws CarbonSortKeyAndGroupByException {
IntermediateSortTempRow row = null;
// poll the top object from heap
// heap maintains binary tree which is based on heap condition that will
// be based on comparator we are passing the heap
// when will call poll it will always delete root of the tree and then
// it does trickel down operation complexity is log(n)
SortTempChunkHolder poll = this.recordHolderHeap.peek();
// get the row from chunk
row = poll.getRow();
// check if there no entry present
if (!poll.hasNext()) {
// if chunk is empty then close the stream
poll.close();
this.recordHolderHeap.poll();
// change the file counter
--this.fileCounter;
// return row
return row;
}
// read new row
poll.readRow();
// maintain heap
this.recordHolderHeap.siftTopDown();
// return row
return row;
}
use of org.apache.carbondata.processing.loading.row.IntermediateSortTempRow in project carbondata by apache.
the class IntermediateFileMerger method getSortedRecordFromFile.
/**
* This method will be used to get the sorted sort temp row from sort temp file
*
* @return sorted record sorted record
* @throws CarbonSortKeyAndGroupByException
*/
private IntermediateSortTempRow getSortedRecordFromFile() throws CarbonSortKeyAndGroupByException {
IntermediateSortTempRow row = null;
// poll the top object from heap
// heap maintains binary tree which is based on heap condition that will
// be based on comparator we are passing the heap
// when will call poll it will always delete root of the tree and then
// it does trickel down operation complexity is log(n)
SortTempFileChunkHolder poll = this.recordHolderHeap.peek();
// get the row from chunk
row = poll.getRow();
// check if there no entry present
if (!poll.hasNext()) {
// if chunk is empty then close the stream
poll.closeStream();
this.recordHolderHeap.poll();
// change the file counter
--this.fileCounter;
// reaturn row
return row;
}
// read new row
poll.readRow();
// maintain heap
this.recordHolderHeap.siftTopDown();
// return row
return row;
}
use of org.apache.carbondata.processing.loading.row.IntermediateSortTempRow in project carbondata by apache.
the class UnsafeSingleThreadFinalSortFilesMerger method getSortedRecordFromFile.
/**
* This method will be used to get the sorted record from file
*
* @return sorted record sorted record
*/
private IntermediateSortTempRow getSortedRecordFromFile() throws CarbonDataWriterException {
IntermediateSortTempRow row = null;
// poll the top object from heap
// heap maintains binary tree which is based on heap condition that will
// be based on comparator we are passing the heap
// when will call poll it will always delete root of the tree and then
// it does trickel down operation complexity is log(n)
SortTempChunkHolder poll = this.recordHolderHeapLocal.peek();
// get the row from chunk
row = poll.getRow();
// check if there no entry present
if (!poll.hasNext()) {
// if chunk is empty then close the stream
poll.close();
recordHolderHeapLocal.poll();
// change the file counter
--this.fileCounter;
// return row
return row;
}
// read new row
try {
poll.readRow();
} catch (Exception e) {
throw new CarbonDataWriterException(e);
}
// maintain heap
this.recordHolderHeapLocal.siftTopDown();
// return row
return row;
}
use of org.apache.carbondata.processing.loading.row.IntermediateSortTempRow in project carbondata by apache.
the class UnsafeInMemoryIntermediateDataMerger method writeDataToFile.
private void writeDataToFile(UnsafeCarbonRowForMerge row) throws IOException {
IntermediateSortTempRow sortTempRow = unsafeCarbonRowPages[row.index].getRow(row.address);
sortStepRowHandler.writeIntermediateSortTempRowToOutputStream(sortTempRow, outputStream);
}
use of org.apache.carbondata.processing.loading.row.IntermediateSortTempRow in project carbondata by apache.
the class FileMergeSortComparatorTest method testFileMergeSortComparator.
@Test
public void testFileMergeSortComparator() {
CarbonTable carbonTable = CarbonTable.buildFromTableInfo(getTableInfo());
// test get noDictDataTypes
DataType[] noDictDataTypes = CarbonDataProcessorUtil.getNoDictDataTypes(carbonTable);
assert (noDictDataTypes.length == 3 && noDictDataTypes[0].equals(DataTypes.INT) && noDictDataTypes[1].equals(DataTypes.STRING) && noDictDataTypes[2].equals(DataTypes.LONG));
// test comparator
Map<String, int[]> columnIdxMap = CarbonDataProcessorUtil.getColumnIdxBasedOnSchemaInRow(carbonTable);
int[] columnIdxBasedOnSchemaInRows = columnIdxMap.get("columnIdxBasedOnSchemaInRow");
int[] noDictSortIdxBasedOnSchemaInRows = columnIdxMap.get("noDictSortIdxBasedOnSchemaInRow");
int[] dictSortIdxBasedOnSchemaInRows = columnIdxMap.get("dictSortIdxBasedOnSchemaInRow");
assert (noDictSortIdxBasedOnSchemaInRows.length == 2 && noDictSortIdxBasedOnSchemaInRows[0] == 1 && noDictSortIdxBasedOnSchemaInRows[1] == 2);
assert (dictSortIdxBasedOnSchemaInRows.length == 1 && dictSortIdxBasedOnSchemaInRows[0] == 0);
FileMergeSortComparator comparator = new FileMergeSortComparator(noDictDataTypes, columnIdxBasedOnSchemaInRows, noDictSortIdxBasedOnSchemaInRows, dictSortIdxBasedOnSchemaInRows);
// prepare data for final sort
int[] dictSortDims1 = { 1 };
Object[] noDictSortDims1 = { 1, new byte[] { 98, 99, 104 }, 2 };
byte[] noSortDimsAndMeasures1 = {};
IntermediateSortTempRow row1 = new IntermediateSortTempRow(dictSortDims1, noDictSortDims1, noSortDimsAndMeasures1);
int[] dictSortDims = { 1 };
Object[] noDictSortDims = { 2, new byte[] { 98, 99, 100 }, 1 };
byte[] noSortDimsAndMeasures = {};
IntermediateSortTempRow row2 = new IntermediateSortTempRow(dictSortDims, noDictSortDims, noSortDimsAndMeasures);
assert (comparator.compare(row1, row2) > 0);
}
Aggregations