use of org.apache.carbondata.presto.impl.CarbonTableCacheModel in project carbondata by apache.
the class CarbondataRecordSetProvider method getRecordSet.
@Override
public RecordSet getRecordSet(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<? extends ColumnHandle> columns) {
CarbondataSplit carbondataSplit = checkType(split, CarbondataSplit.class, "split is not class CarbondataSplit");
checkArgument(carbondataSplit.getConnectorId().equals(connectorId), "split is not for this connector");
CarbonProjection carbonProjection = new CarbonProjection();
// Convert all columns handles
ImmutableList.Builder<CarbondataColumnHandle> handles = ImmutableList.builder();
for (ColumnHandle handle : columns) {
handles.add(checkType(handle, CarbondataColumnHandle.class, "handle"));
carbonProjection.addColumn(((CarbondataColumnHandle) handle).getColumnName());
}
CarbonTableCacheModel tableCacheModel = carbonTableReader.getCarbonCache(carbondataSplit.getSchemaTableName());
checkNotNull(tableCacheModel, "tableCacheModel should not be null");
checkNotNull(tableCacheModel.carbonTable, "tableCacheModel.carbonTable should not be null");
checkNotNull(tableCacheModel.carbonTable.getTableInfo(), "tableCacheModel.tableInfo should not be null");
// Build Query Model
CarbonTable targetTable = tableCacheModel.carbonTable;
QueryModel queryModel;
TaskAttemptContextImpl hadoopAttemptContext;
try {
Configuration conf = new Configuration();
conf.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, "");
String carbonTablePath = targetTable.getAbsoluteTableIdentifier().getTablePath();
conf.set(CarbonTableInputFormat.INPUT_DIR, carbonTablePath);
JobConf jobConf = new JobConf(conf);
CarbonTableInputFormat carbonTableInputFormat = createInputFormat(jobConf, tableCacheModel.carbonTable, PrestoFilterUtil.parseFilterExpression(carbondataSplit.getConstraints()), carbonProjection);
hadoopAttemptContext = new TaskAttemptContextImpl(jobConf, new TaskAttemptID("", 1, TaskType.MAP, 0, 0));
CarbonInputSplit carbonInputSplit = CarbonLocalInputSplit.convertSplit(carbondataSplit.getLocalInputSplit());
queryModel = carbonTableInputFormat.createQueryModel(carbonInputSplit, hadoopAttemptContext);
queryModel.setVectorReader(true);
} catch (IOException e) {
throw new RuntimeException("Unable to get the Query Model ", e);
}
return new CarbondataRecordSet(targetTable, session, carbondataSplit, handles.build(), queryModel, hadoopAttemptContext);
}
use of org.apache.carbondata.presto.impl.CarbonTableCacheModel in project carbondata by apache.
the class CarbondataSplitManager method getSplits.
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableLayoutHandle layout) {
CarbondataTableLayoutHandle layoutHandle = (CarbondataTableLayoutHandle) layout;
CarbondataTableHandle tableHandle = layoutHandle.getTable();
SchemaTableName key = tableHandle.getSchemaTableName();
// Packaging presto-TupleDomain into CarbondataColumnConstraint, to decouple from presto-spi Module
List<CarbondataColumnConstraint> rebuildConstraints = getColumnConstraints(layoutHandle.getConstraint());
CarbonTableCacheModel cache = carbonTableReader.getCarbonCache(key);
Expression filters = PrestoFilterUtil.parseFilterExpression(layoutHandle.getConstraint());
try {
List<CarbonLocalInputSplit> splits = carbonTableReader.getInputSplits2(cache, filters);
ImmutableList.Builder<ConnectorSplit> cSplits = ImmutableList.builder();
for (CarbonLocalInputSplit split : splits) {
cSplits.add(new CarbondataSplit(connectorId, tableHandle.getSchemaTableName(), layoutHandle.getConstraint(), split, rebuildConstraints));
}
return new FixedSplitSource(cSplits.build());
} catch (Exception ex) {
throw new RuntimeException(ex.getMessage(), ex);
}
}
Aggregations