use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class SingleThreadFinalSortFilesMerger method startSorting.
/**
* Below method will be used to start storing process This method will get
* all the temp files present in sort temp folder then it will create the
* record holder heap and then it will read first record from each file and
* initialize the heap
*
* @throws CarbonSortKeyAndGroupByException
*/
private void startSorting(File[] files) throws CarbonDataWriterException {
this.fileCounter = files.length;
if (fileCounter == 0) {
LOGGER.info("No files to merge sort");
return;
}
this.fileBufferSize = CarbonDataProcessorUtil.getFileBufferSize(this.fileCounter, CarbonProperties.getInstance(), CarbonCommonConstants.CONSTANT_SIZE_TEN);
LOGGER.info("Number of temp file: " + this.fileCounter);
LOGGER.info("File Buffer Size: " + this.fileBufferSize);
// create record holder heap
createRecordHolderQueue(files);
// iterate over file list and create chunk holder and add to heap
LOGGER.info("Started adding first record from each file");
int maxThreadForSorting = 0;
try {
maxThreadForSorting = Integer.parseInt(CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_MERGE_SORT_READER_THREAD, CarbonCommonConstants.CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE));
} catch (NumberFormatException e) {
maxThreadForSorting = Integer.parseInt(CarbonCommonConstants.CARBON_MERGE_SORT_READER_THREAD_DEFAULTVALUE);
}
ExecutorService service = Executors.newFixedThreadPool(maxThreadForSorting);
for (final File tempFile : files) {
Callable<Void> runnable = new Callable<Void>() {
@Override
public Void call() throws CarbonSortKeyAndGroupByException {
// create chunk holder
SortTempFileChunkHolder sortTempFileChunkHolder = new SortTempFileChunkHolder(tempFile, dimensionCount, complexDimensionCount, measureCount, fileBufferSize, noDictionaryCount, measureDataType, isNoDictionaryColumn, isNoDictionarySortColumn);
// initialize
sortTempFileChunkHolder.initialize();
sortTempFileChunkHolder.readRow();
synchronized (LOCKOBJECT) {
recordHolderHeapLocal.add(sortTempFileChunkHolder);
}
// add to heap
return null;
}
};
service.submit(runnable);
}
service.shutdown();
try {
service.awaitTermination(2, TimeUnit.HOURS);
} catch (Exception e) {
throw new CarbonDataWriterException(e.getMessage(), e);
}
LOGGER.info("Heap Size" + this.recordHolderHeapLocal.size());
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class UnsafeIntermediateMerger method finish.
public void finish() throws CarbonSortKeyAndGroupByException {
try {
executorService.shutdown();
executorService.awaitTermination(2, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new CarbonSortKeyAndGroupByException("Problem while shutdown the server ", e);
}
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class SortIntermediateFileMerger method finish.
public void finish() throws CarbonSortKeyAndGroupByException {
try {
executorService.shutdown();
executorService.awaitTermination(2, TimeUnit.DAYS);
} catch (InterruptedException e) {
throw new CarbonSortKeyAndGroupByException("Problem while shutdown the server ", e);
}
procFiles.clear();
procFiles = null;
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class IntermediateFileMerger method writeDataTofile.
/**
* Below method will be used to write data to file
*
* @throws CarbonSortKeyAndGroupByException problem while writing
*/
private void writeDataTofile(Object[] row) throws CarbonSortKeyAndGroupByException {
if (mergerParameters.isSortFileCompressionEnabled() || mergerParameters.isPrefetch()) {
if (entryCount == 0) {
records = new Object[totalSize][];
records[entryCount++] = row;
return;
}
records[entryCount++] = row;
if (entryCount == totalSize) {
this.writer.writeSortTempFile(records);
entryCount = 0;
records = new Object[totalSize][];
}
return;
}
try {
DataType[] aggType = mergerParameters.getMeasureDataType();
int[] mdkArray = (int[]) row[0];
byte[][] nonDictArray = (byte[][]) row[1];
int mdkIndex = 0;
int nonDictKeyIndex = 0;
// write dictionary and non dictionary dimensions here.
for (boolean nodictinary : noDictionarycolumnMapping) {
if (nodictinary) {
byte[] col = nonDictArray[nonDictKeyIndex++];
stream.writeShort(col.length);
stream.write(col);
} else {
stream.writeInt(mdkArray[mdkIndex++]);
}
}
int fieldIndex = 0;
for (int counter = 0; counter < mergerParameters.getMeasureColCount(); counter++) {
if (null != NonDictionaryUtil.getMeasure(fieldIndex, row)) {
stream.write((byte) 1);
switch(aggType[counter]) {
case SHORT:
case INT:
case LONG:
Long val = (Long) NonDictionaryUtil.getMeasure(fieldIndex, row);
stream.writeLong(val);
break;
case DOUBLE:
stream.writeDouble((Double) NonDictionaryUtil.getMeasure(fieldIndex, row));
break;
case DECIMAL:
byte[] bigDecimalInBytes = (byte[]) NonDictionaryUtil.getMeasure(fieldIndex, row);
stream.writeInt(bigDecimalInBytes.length);
stream.write(bigDecimalInBytes);
break;
}
} else {
stream.write((byte) 0);
}
fieldIndex++;
}
} catch (IOException e) {
throw new CarbonSortKeyAndGroupByException("Problem while writing the file", e);
}
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class SortDataRows method addRowBatch.
/**
* This method will be used to add new row
*
* @param rowBatch new rowBatch
* @throws CarbonSortKeyAndGroupByException problem while writing
*/
public void addRowBatch(Object[][] rowBatch, int size) throws CarbonSortKeyAndGroupByException {
// sort the list and then write current list data to file
synchronized (addRowsLock) {
int sizeLeft = 0;
if (entryCount + size >= sortBufferSize) {
LOGGER.debug("************ Writing to temp file ********** ");
intermediateFileMerger.startMergingIfPossible();
Object[][] recordHolderListLocal = recordHolderList;
sizeLeft = sortBufferSize - entryCount;
if (sizeLeft > 0) {
System.arraycopy(rowBatch, 0, recordHolderListLocal, entryCount, sizeLeft);
}
try {
semaphore.acquire();
dataSorterAndWriterExecutorService.submit(new DataSorterAndWriter(recordHolderListLocal));
} catch (Exception e) {
LOGGER.error("exception occurred while trying to acquire a semaphore lock: " + e.getMessage());
throw new CarbonSortKeyAndGroupByException(e);
}
// create the new holder Array
this.recordHolderList = new Object[this.sortBufferSize][];
this.entryCount = 0;
size = size - sizeLeft;
if (size == 0) {
return;
}
}
System.arraycopy(rowBatch, sizeLeft, recordHolderList, entryCount, size);
entryCount += size;
}
}
Aggregations