use of org.apache.carbondata.processing.loading.parser.impl.RowParserImpl in project carbondata by apache.
the class InputProcessorStepImpl method initialize.
@Override
public void initialize() throws IOException {
super.initialize();
rowParser = new RowParserImpl(getOutput(), configuration);
executorService = Executors.newCachedThreadPool(new CarbonThreadFactory("InputProcessorPool:" + configuration.getTableIdentifier().getCarbonTableIdentifier().getTableName()));
// if logger is enabled then raw data will be required.
this.isRawDataRequired = CarbonDataProcessorUtil.isRawDataRequired(configuration);
}
use of org.apache.carbondata.processing.loading.parser.impl.RowParserImpl in project carbondata by apache.
the class CarbonStreamRecordWriter method initializeAtFirstRow.
private void initializeAtFirstRow() throws IOException, InterruptedException {
// initialize metadata
isNoDictionaryDimensionColumn = CarbonDataProcessorUtil.getNoDictionaryMapping(configuration.getDataFields());
dimensionWithComplexCount = configuration.getDimensionCount();
measureCount = configuration.getMeasureCount();
dataFields = configuration.getDataFields();
measureDataTypes = new DataType[measureCount];
for (int i = 0; i < measureCount; i++) {
measureDataTypes[i] = dataFields[dimensionWithComplexCount + i].getColumn().getDataType();
}
// initialize parser and converter
rowParser = new RowParserImpl(dataFields, configuration);
badRecordLogger = BadRecordsLoggerProvider.createBadRecordLogger(configuration);
converter = new RowConverterImpl(configuration.getDataFields(), configuration, badRecordLogger);
configuration.setCardinalityFinder(converter);
converter.initialize();
// initialize encoder
nullBitSet = new BitSet(dataFields.length);
int rowBufferSize = hadoopConf.getInt(CarbonStreamOutputFormat.CARBON_ENCODER_ROW_BUFFER_SIZE, CarbonStreamOutputFormat.CARBON_ENCODER_ROW_BUFFER_SIZE_DEFAULT);
output = new StreamBlockletWriter(maxCacheSize, maxRowNums, rowBufferSize);
// initialize data writer
String filePath = segmentDir + File.separator + fileName;
FileFactory.FileType fileType = FileFactory.getFileType(filePath);
CarbonFile carbonFile = FileFactory.getCarbonFile(filePath, fileType);
if (carbonFile.exists()) {
// if the file is existed, use the append api
outputStream = FileFactory.getDataOutputStreamUsingAppend(filePath, fileType);
} else {
// IF the file is not existed, use the create api
outputStream = FileFactory.getDataOutputStream(filePath, fileType);
writeFileHeader();
}
isFirstRow = false;
}
Aggregations