use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class SortDataRows method addRow.
/**
* This method will be used to add new row
*
* @param row new row
* @throws CarbonSortKeyAndGroupByException problem while writing
*/
public void addRow(Object[] row) throws CarbonSortKeyAndGroupByException {
// if record holder list size is equal to sort buffer size then it will
// sort the list and then write current list data to file
int currentSize = entryCount;
if (sortBufferSize == currentSize) {
LOGGER.debug("************ Writing to temp file ********** ");
intermediateFileMerger.startMergingIfPossible();
Object[][] recordHolderListLocal = recordHolderList;
try {
semaphore.acquire();
dataSorterAndWriterExecutorService.submit(new DataSorterAndWriter(recordHolderListLocal));
} catch (InterruptedException e) {
LOGGER.error("exception occurred while trying to acquire a semaphore lock: " + e.getMessage());
throw new CarbonSortKeyAndGroupByException(e.getMessage());
}
// create the new holder Array
this.recordHolderList = new Object[this.sortBufferSize][];
this.entryCount = 0;
}
recordHolderList[entryCount++] = row;
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class SortDataRows method writeData.
private void writeData(Object[][] recordHolderList, int entryCountLocal, File file) throws CarbonSortKeyAndGroupByException {
DataOutputStream stream = null;
try {
// open stream
stream = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file), parameters.getFileWriteBufferSize()));
// write number of entries to the file
stream.writeInt(entryCountLocal);
int complexDimColCount = parameters.getComplexDimColCount();
int dimColCount = parameters.getDimColCount() + complexDimColCount;
DataType[] type = parameters.getMeasureDataType();
boolean[] noDictionaryDimnesionMapping = parameters.getNoDictionaryDimnesionColumn();
Object[] row = null;
for (int i = 0; i < entryCountLocal; i++) {
// get row from record holder list
row = recordHolderList[i];
int dimCount = 0;
// write dictionary and non dictionary dimensions here.
for (; dimCount < noDictionaryDimnesionMapping.length; dimCount++) {
if (noDictionaryDimnesionMapping[dimCount]) {
byte[] col = (byte[]) row[dimCount];
stream.writeShort(col.length);
stream.write(col);
} else {
stream.writeInt((int) row[dimCount]);
}
}
// write complex dimensions here.
for (; dimCount < dimColCount; dimCount++) {
byte[] value = (byte[]) row[dimCount];
stream.writeShort(value.length);
stream.write(value);
}
// as measures are stored in separate array.
for (int mesCount = 0; mesCount < parameters.getMeasureColCount(); mesCount++) {
Object value = row[mesCount + dimColCount];
if (null != value) {
stream.write((byte) 1);
switch(type[mesCount]) {
case SHORT:
stream.writeShort((Short) value);
break;
case INT:
stream.writeInt((Integer) value);
break;
case LONG:
stream.writeLong((Long) value);
break;
case DOUBLE:
stream.writeDouble((Double) value);
break;
case DECIMAL:
BigDecimal val = (BigDecimal) value;
byte[] bigDecimalInBytes = DataTypeUtil.bigDecimalToByte(val);
stream.writeInt(bigDecimalInBytes.length);
stream.write(bigDecimalInBytes);
break;
}
} else {
stream.write((byte) 0);
}
}
}
} catch (IOException e) {
throw new CarbonSortKeyAndGroupByException("Problem while writing the file", e);
} finally {
// close streams
CarbonUtil.closeStreams(stream);
}
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class CompressedTempSortFileWriter method writeSortTempFile.
/**
* Below method will be used to write the sort temp file
*
* @param records
*/
public void writeSortTempFile(Object[][] records) throws CarbonSortKeyAndGroupByException {
DataOutputStream dataOutputStream = null;
ByteArrayOutputStream blockDataArray = null;
int totalSize = 0;
int recordSize = 0;
try {
recordSize = (measureCount * CarbonCommonConstants.DOUBLE_SIZE_IN_BYTE) + (dimensionCount * CarbonCommonConstants.INT_SIZE_IN_BYTE);
totalSize = records.length * recordSize;
blockDataArray = new ByteArrayOutputStream(totalSize);
dataOutputStream = new DataOutputStream(blockDataArray);
UnCompressedTempSortFileWriter.writeDataOutputStream(records, dataOutputStream, measureCount, dimensionCount, noDictionaryCount, complexDimensionCount);
stream.writeInt(records.length);
byte[] byteArray = CompressorFactory.getInstance().getCompressor().compressByte(blockDataArray.toByteArray());
stream.writeInt(byteArray.length);
stream.write(byteArray);
} catch (IOException e) {
throw new CarbonSortKeyAndGroupByException(e);
} finally {
CarbonUtil.closeStreams(blockDataArray);
CarbonUtil.closeStreams(dataOutputStream);
}
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class UnCompressedTempSortFileWriter method writeSortTempFile.
/**
* Below method will be used to write the sort temp file
*
* @param records
*/
public void writeSortTempFile(Object[][] records) throws CarbonSortKeyAndGroupByException {
ByteArrayOutputStream blockDataArray = null;
DataOutputStream dataOutputStream = null;
int totalSize = 0;
int recordSize = 0;
try {
recordSize = (measureCount * CarbonCommonConstants.DOUBLE_SIZE_IN_BYTE) + (dimensionCount * CarbonCommonConstants.INT_SIZE_IN_BYTE);
totalSize = records.length * recordSize;
blockDataArray = new ByteArrayOutputStream(totalSize);
dataOutputStream = new DataOutputStream(blockDataArray);
writeDataOutputStream(records, dataOutputStream, measureCount, dimensionCount, noDictionaryCount, complexDimensionCount);
stream.writeInt(records.length);
byte[] byteArray = blockDataArray.toByteArray();
stream.writeInt(byteArray.length);
stream.write(byteArray);
} catch (IOException e) {
throw new CarbonSortKeyAndGroupByException(e);
} finally {
CarbonUtil.closeStreams(blockDataArray);
CarbonUtil.closeStreams(dataOutputStream);
}
}
use of org.apache.carbondata.processing.sortandgroupby.exception.CarbonSortKeyAndGroupByException in project carbondata by apache.
the class UnsafeIntermediateFileMerger method finish.
private void finish() throws CarbonSortKeyAndGroupByException {
if (recordHolderHeap != null) {
int size = recordHolderHeap.size();
for (int i = 0; i < size; i++) {
recordHolderHeap.poll().close();
}
}
try {
CarbonUtil.deleteFiles(intermediateFiles);
rowData.clear();
} catch (IOException e) {
throw new CarbonSortKeyAndGroupByException("Problem while deleting the intermediate files");
}
}
Aggregations