use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonFactDataHandlerColumnar method closeHandler.
/**
* below method will be used to close the handler
*/
public void closeHandler() throws CarbonDataWriterException {
if (null != this.dataWriter) {
// wait until all blocklets have been finished writing
while (blockletProcessingCount.get() > 0) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
throw new CarbonDataWriterException(e);
}
}
consumerExecutorService.shutdownNow();
processWriteTaskSubmitList(consumerExecutorServiceTaskList);
this.dataWriter.writeFooter();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("All blocklets have been finished writing");
}
// close all the open stream for both the files
this.dataWriter.closeWriter();
}
this.dataWriter = null;
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonFactDataHandlerColumnar method addDataToStore.
/**
* below method will be used to add row to store
*
* @param row
* @throws CarbonDataWriterException
*/
public void addDataToStore(CarbonRow row) throws CarbonDataWriterException {
int totalComplexColumnDepth = setFlatCarbonRowForComplex(row);
if (noDictColumnPageSize == null) {
// initialization using first row.
model.setNoDictAllComplexColumnDepth(totalComplexColumnDepth);
if (model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth() > 0) {
noDictColumnPageSize = new int[model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth()];
}
}
dataRows.add(row);
this.entryCount++;
// this to leaf node file and update the intermediate files
if (this.entryCount == this.pageSize || needToCutThePage(row)) {
try {
semaphore.acquire();
producerExecutorServiceTaskList.add(producerExecutorService.submit(new Producer(tablePageList, dataRows, ++writerTaskSequenceCounter, false)));
blockletProcessingCount.incrementAndGet();
// set the entry count to zero
processedDataCount += entryCount;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Total Number Of records added to store: " + processedDataCount);
}
dataRows = new ArrayList<>(this.pageSize);
this.entryCount = 0;
// re-init the complexIndexMap
this.complexIndexMapCopy = new HashMap<>();
for (Map.Entry<Integer, GenericDataType> entry : model.getComplexIndexMap().entrySet()) {
this.complexIndexMapCopy.put(entry.getKey(), entry.getValue().deepCopy());
}
noDictColumnPageSize = new int[model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth()];
} catch (InterruptedException e) {
LOGGER.error(e.getMessage(), e);
throw new CarbonDataWriterException(e);
}
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonFactDataHandlerColumnar method closeWriterExecutionService.
/**
* This method will close writer execution service and get the node holders and
* add them to node holder list
*
* @param service the service to shutdown
* @throws CarbonDataWriterException
*/
private void closeWriterExecutionService(ExecutorService service) throws CarbonDataWriterException {
try {
service.shutdown();
service.awaitTermination(1, TimeUnit.DAYS);
} catch (InterruptedException e) {
LOGGER.error(e.getMessage(), e);
throw new CarbonDataWriterException(e);
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonFactDataHandlerColumnar method finish.
/**
* below method will be used to finish the data handler
*
* @throws CarbonDataWriterException
*/
public void finish() throws CarbonDataWriterException {
// than 0
if (null == dataWriter) {
return;
}
if (producerExecutorService.isShutdown()) {
return;
}
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Started Finish Operation");
}
try {
semaphore.acquire();
producerExecutorServiceTaskList.add(producerExecutorService.submit(new Producer(tablePageList, dataRows, ++writerTaskSequenceCounter, true)));
blockletProcessingCount.incrementAndGet();
processedDataCount += entryCount;
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Total Number Of records added to store: " + processedDataCount);
}
closeWriterExecutionService(producerExecutorService);
processWriteTaskSubmitList(producerExecutorServiceTaskList);
processingComplete = true;
} catch (InterruptedException e) {
LOGGER.error(e.getMessage(), e);
throw new CarbonDataWriterException(e);
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonFactDataWriterImplV3 method writeBlockletToFile.
/**
* Write the collect blocklet data (blockletDataHolder) to file
*/
private void writeBlockletToFile() {
// get the list of all encoded table page
EncodedBlocklet encodedBlocklet = blockletDataHolder.getEncodedBlocklet();
int numDimensions = encodedBlocklet.getNumberOfDimension();
int numMeasures = encodedBlocklet.getNumberOfMeasure();
// get data chunks for all the column
byte[][] dataChunkBytes = new byte[numDimensions + numMeasures][];
long metadataSize = fillDataChunk(encodedBlocklet, dataChunkBytes);
// calculate the total size of data to be written
long blockletSize = blockletDataHolder.getSize() + metadataSize;
// to check if data size will exceed the block size then create a new file
createNewFileIfReachThreshold(blockletSize);
// write data to file
try {
if (currentOffsetInFile == 0) {
// write the header if file is empty
writeHeaderToFile();
}
writeBlockletToFile(dataChunkBytes);
if (listener != null && model.getDatabaseName().equalsIgnoreCase(listener.getTblIdentifier().getDatabaseName()) && model.getTableName().equalsIgnoreCase(listener.getTblIdentifier().getTableName())) {
listener.onBlockletEnd(blockletId++);
}
pageId = 0;
} catch (IOException e) {
LOGGER.error("Problem while writing file", e);
throw new CarbonDataWriterException("Problem while writing file", e);
} finally {
// clear the data holder
blockletDataHolder.clear();
}
}
Aggregations