use of org.apache.carbondata.processing.loading.partition.impl.HashPartitionerImpl in project carbondata by apache.
the class DataConverterProcessorStepImpl method initializeBucketColumnPartitioner.
/**
* initialize partitioner for bucket column
*/
private void initializeBucketColumnPartitioner() {
List<Integer> indexes = new ArrayList<>();
List<ColumnSchema> columnSchemas = new ArrayList<>();
DataField[] inputDataFields = getOutput();
BucketingInfo bucketingInfo = configuration.getBucketingInfo();
for (int i = 0; i < inputDataFields.length; i++) {
for (int j = 0; j < bucketingInfo.getListOfColumns().size(); j++) {
if (inputDataFields[i].getColumn().getColName().equals(bucketingInfo.getListOfColumns().get(j).getColumnName())) {
indexes.add(i);
columnSchemas.add(inputDataFields[i].getColumn().getColumnSchema());
break;
}
}
}
// hash partitioner to dispatch rows by bucket column
this.partitioner = new HashPartitionerImpl(indexes, columnSchemas, bucketingInfo.getNumOfRanges());
}
Aggregations