use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class SingleThreadFinalSortFilesMerger method startSorting.
/**
* Below method will be used to start storing process This method will get
* all the temp files present in sort temp folder then it will create the
* record holder heap and then it will read first record from each file and
* initialize the heap
*
* @throws CarbonSortKeyAndGroupByException
*/
private void startSorting(List<File> files) throws CarbonDataWriterException {
if (files.size() == 0) {
LOGGER.info("No files to merge sort");
return;
}
LOGGER.info("Started Final Merge");
LOGGER.info("Number of temp file: " + files.size());
// create record holder heap
createRecordHolderQueue(files.size());
// iterate over file list and create chunk holder and add to heap
LOGGER.info("Started adding first record from each file");
this.executorService = Executors.newFixedThreadPool(maxThreadForSorting);
for (final File tempFile : files) {
Callable<Void> callable = new Callable<Void>() {
@Override
public Void call() {
// create chunk holder
SortTempFileChunkHolder sortTempFileChunkHolder = new SortTempFileChunkHolder(tempFile, sortParameters, tableName, true);
try {
// initialize
sortTempFileChunkHolder.initialize();
sortTempFileChunkHolder.readRow();
} catch (CarbonSortKeyAndGroupByException ex) {
sortTempFileChunkHolder.closeStream();
notifyFailure(ex);
}
synchronized (LOCKOBJECT) {
recordHolderHeapLocal.add(sortTempFileChunkHolder);
}
return null;
}
};
mergerTask.add(executorService.submit(callable));
}
executorService.shutdown();
try {
executorService.awaitTermination(2, TimeUnit.HOURS);
} catch (Exception e) {
throw new CarbonDataWriterException(e);
}
checkFailure();
LOGGER.info("final merger Heap Size" + this.recordHolderHeapLocal.size());
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class SecondaryIndexQueryResultProcessor method initDataHandler.
/**
* initialise carbon data writer instance
*/
private void initDataHandler() throws SecondaryIndexException {
String carbonStoreLocation = CarbonDataProcessorUtil.createCarbonStoreLocation(this.indexTable, segmentId);
CarbonFactDataHandlerModel carbonFactDataHandlerModel = CarbonFactDataHandlerModel.getCarbonFactDataHandlerModel(carbonLoadModel, indexTable, segmentProperties, indexTable.getTableName(), tempStoreLocation, carbonStoreLocation);
carbonFactDataHandlerModel.setSchemaUpdatedTimeStamp(indexTable.getTableLastUpdatedTime());
CarbonDataFileAttributes carbonDataFileAttributes = new CarbonDataFileAttributes(Integer.parseInt(carbonLoadModel.getTaskNo()), carbonLoadModel.getFactTimeStamp());
carbonFactDataHandlerModel.setCarbonDataFileAttributes(carbonDataFileAttributes);
dataHandler = CarbonFactHandlerFactory.createCarbonFactHandler(carbonFactDataHandlerModel);
try {
dataHandler.initialise();
} catch (CarbonDataWriterException e) {
this.sortDataRows.close();
LOGGER.error(e);
throw new SecondaryIndexException("Problem initialising data handler while creating secondary index: " + e.getMessage());
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CarbonUtil method completeRemainingHdfsReplicas.
/**
* This method will complete the remaining hdfs replications
*
* @param fileName hdfs file name
* @param fileType filetype
* @throws CarbonDataWriterException if error occurs
*/
public static void completeRemainingHdfsReplicas(String fileName, FileFactory.FileType fileType) throws CarbonDataWriterException {
try {
long startTime = System.currentTimeMillis();
short replication = FileFactory.getDefaultReplication(fileName, fileType);
if (1 == replication) {
return;
}
boolean replicateFlag = FileFactory.setReplication(fileName, fileType, replication);
if (!replicateFlag) {
LOGGER.error("Failed to set replication for " + fileName + " with factor " + replication);
}
LOGGER.info("Total copy time (ms) to copy file " + fileName + " is " + (System.currentTimeMillis() - startTime));
} catch (IOException e) {
throw new CarbonDataWriterException("Problem while completing remaining HDFS backups", e);
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CompactionResultSortProcessor method readAndLoadDataFromSortTempFiles.
/**
* This method will read sort temp files, perform merge sort and add it to store for data loading
*/
private void readAndLoadDataFromSortTempFiles() throws Exception {
try {
intermediateFileMerger.finish();
finalMerger.startFinalMerge();
while (finalMerger.hasNext()) {
Object[] row = finalMerger.next();
dataHandler.addDataToStore(new CarbonRow(row));
}
dataHandler.finish();
} catch (CarbonDataWriterException e) {
LOGGER.error(e);
throw new Exception("Problem loading data during compaction.", e);
} catch (Exception e) {
LOGGER.error(e);
throw new Exception("Problem loading data during compaction.", e);
} finally {
if (null != dataHandler) {
try {
dataHandler.closeHandler();
} catch (CarbonDataWriterException e) {
LOGGER.error(e, "Error in close data handler");
throw new Exception("Error in close data handler", e);
}
}
}
}
use of org.apache.carbondata.core.datastore.exception.CarbonDataWriterException in project carbondata by apache.
the class CompactionResultSortProcessor method initDataHandler.
/**
* initialise carbon data writer instance
*/
private void initDataHandler() throws Exception {
String carbonStoreLocation;
if (partitionSpec != null) {
carbonStoreLocation = partitionSpec.getLocation().toString() + CarbonCommonConstants.FILE_SEPARATOR + carbonLoadModel.getFactTimeStamp() + ".tmp";
} else {
carbonStoreLocation = CarbonDataProcessorUtil.createCarbonStoreLocation(carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable(), carbonLoadModel.getSegmentId());
}
CarbonFactDataHandlerModel carbonFactDataHandlerModel = CarbonFactDataHandlerModel.getCarbonFactDataHandlerModel(carbonLoadModel, carbonTable, segmentProperties, tableName, tempStoreLocation, carbonStoreLocation);
carbonFactDataHandlerModel.setSegmentId(carbonLoadModel.getSegmentId());
carbonFactDataHandlerModel.setBucketId(carbonLoadModel.getBucketId());
setDataFileAttributesInModel(carbonLoadModel, compactionType, carbonFactDataHandlerModel);
this.noDicAndComplexColumns = carbonFactDataHandlerModel.getNoDictAndComplexColumns();
dataHandler = CarbonFactHandlerFactory.createCarbonFactHandler(carbonFactDataHandlerModel);
try {
dataHandler.initialise();
} catch (CarbonDataWriterException e) {
LOGGER.error(e.getMessage(), e);
throw new Exception("Problem initialising data handler during compaction: " + e.getMessage(), e);
}
}
Aggregations