use of org.apache.carbondata.processing.newflow.row.CarbonRow in project carbondata by apache.
the class RowConverterImpl method convert.
@Override
public CarbonRow convert(CarbonRow row) throws CarbonDataLoadingException {
//TODO: only copy if it is bad record
CarbonRow copy = row.getCopy();
logHolder.setLogged(false);
logHolder.clear();
for (int i = 0; i < fieldConverters.length; i++) {
fieldConverters[i].convert(row, logHolder);
if (!logHolder.isLogged() && logHolder.isBadRecordNotAdded()) {
if (badRecordLogger.isDataLoadFail()) {
String error = "Data load failed due to bad record: " + logHolder.getReason();
throw new CarbonDataLoadingException(error);
}
badRecordLogger.addBadRecordsToBuilder(copy.getData(), logHolder.getReason());
logHolder.clear();
logHolder.setLogged(true);
if (badRecordLogger.isBadRecordConvertNullDisable()) {
return null;
}
}
}
return row;
}
use of org.apache.carbondata.processing.newflow.row.CarbonRow in project carbondata by apache.
the class CarbonRowDataWriterProcessorStepImpl method processBatch.
private void processBatch(CarbonRowBatch batch, CarbonFactHandler dataHandler, int iteratorIndex) throws CarbonDataLoadingException {
try {
while (batch.hasNext()) {
CarbonRow row = batch.next();
CarbonRow converted = convertRow(row);
dataHandler.addDataToStore(converted);
readCounter[iteratorIndex]++;
}
writeCounter[iteratorIndex] += batch.getSize();
} catch (Exception e) {
throw new CarbonDataLoadingException("unable to generate the mdkey", e);
}
rowCounter.getAndAdd(batch.getSize());
}
use of org.apache.carbondata.processing.newflow.row.CarbonRow in project carbondata by apache.
the class DataConverterProcessorWithBucketingStepImpl method processRowBatch.
/**
* Process the batch of rows as per the step logic.
*
* @param rowBatch
* @return processed row.
*/
protected CarbonRowBatch processRowBatch(CarbonRowBatch rowBatch, RowConverter localConverter) {
CarbonRowBatch newBatch = new CarbonRowBatch(rowBatch.getSize());
while (rowBatch.hasNext()) {
CarbonRow next = rowBatch.next();
short bucketNumber = (short) partitioner.getPartition(next.getData());
CarbonRow convertRow = localConverter.convert(next);
convertRow.bucketNumber = bucketNumber;
newBatch.addRow(convertRow);
}
rowCounter.getAndAdd(newBatch.getSize());
return newBatch;
}
use of org.apache.carbondata.processing.newflow.row.CarbonRow in project carbondata by apache.
the class CompactionResultSortProcessor method readAndLoadDataFromSortTempFiles.
/**
* This method will read sort temp files, perform merge sort and add it to store for data loading
*/
private void readAndLoadDataFromSortTempFiles() throws Exception {
try {
intermediateFileMerger.finish();
finalMerger.startFinalMerge();
while (finalMerger.hasNext()) {
Object[] row = finalMerger.next();
dataHandler.addDataToStore(new CarbonRow(row));
}
dataHandler.finish();
} catch (CarbonDataWriterException e) {
LOGGER.error(e);
throw new Exception("Problem loading data during compaction: " + e.getMessage());
} catch (Exception e) {
LOGGER.error(e);
throw new Exception("Problem loading data during compaction: " + e.getMessage());
} finally {
if (null != dataHandler) {
try {
dataHandler.closeHandler();
} catch (CarbonDataWriterException e) {
LOGGER.error(e);
throw new Exception("Problem loading data during compaction: " + e.getMessage());
}
}
}
}
Aggregations