use of org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework.ScanFrameworkBuilder in project drill by apache.
the class JdbcScanBatchCreator method createBuilder.
private ScanFrameworkBuilder createBuilder(OptionManager options, JdbcSubScan subScan) {
JdbcStorageConfig config = subScan.getConfig();
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.projection(subScan.getColumns());
builder.setUserName(subScan.getUserName());
JdbcStoragePlugin plugin = subScan.getPlugin();
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(new JdbcBatchReader(plugin.getDataSource(), subScan.getSql(), subScan.getColumns()));
ManagedScanFramework.ReaderFactory readerFactory = new BasicScanFactory(readers.iterator());
builder.setReaderFactory(readerFactory);
builder.nullType(Types.optional(MinorType.VARCHAR));
return builder;
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework.ScanFrameworkBuilder in project drill by apache.
the class HttpScanBatchCreator method createBuilder.
private ScanFrameworkBuilder createBuilder(OptionManager options, HttpSubScan subScan) {
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.projection(subScan.columns());
builder.setUserName(subScan.getUserName());
// Provide custom error context
builder.errorContext(new ChildErrorContext(builder.errorContext()) {
@Override
public void addContext(UserException.Builder builder) {
builder.addContext("Connection", subScan.tableSpec().connection());
builder.addContext("Plugin", subScan.tableSpec().pluginName());
}
});
// Reader
ReaderFactory readerFactory = new HttpReaderFactory(subScan);
builder.setReaderFactory(readerFactory);
builder.nullType(Types.optional(MinorType.VARCHAR));
return builder;
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework.ScanFrameworkBuilder in project drill by apache.
the class SplunkScanBatchCreator method createBuilder.
private ScanFrameworkBuilder createBuilder(OptionManager options, SplunkSubScan subScan) {
SplunkPluginConfig config = subScan.getConfig();
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.projection(subScan.getColumns());
builder.setUserName(subScan.getUserName());
// Reader
ReaderFactory readerFactory = new SplunkReaderFactory(config, subScan);
builder.setReaderFactory(readerFactory);
builder.nullType(Types.optional(MinorType.VARCHAR));
return builder;
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework.ScanFrameworkBuilder in project drill by apache.
the class PhoenixScanBatchCreator method createBuilder.
private ScanFrameworkBuilder createBuilder(OptionManager options, PhoenixSubScan subScan) {
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.projection(subScan.getColumns());
builder.setUserName(subScan.getUserName());
// Phoenix reader
ReaderFactory readerFactory = new PhoenixReaderFactory(subScan);
builder.setReaderFactory(readerFactory);
builder.nullType(Types.optional(MinorType.VARCHAR));
// Add custom error context
builder.errorContext(new ChildErrorContext(builder.errorContext()) {
@Override
public void addContext(UserException.Builder builder) {
builder.addContext("sql : ", subScan.getScanSpec().getSql());
builder.addContext("columns : ", subScan.getScanSpec().getColumns().toString());
builder.addContext("estimate row count : ", subScan.getScanSpec().getEstimateRows());
}
});
return builder;
}
use of org.apache.drill.exec.physical.impl.scan.framework.ManagedScanFramework.ScanFrameworkBuilder in project drill by apache.
the class MockScanBatchCreator method extendedMockScan.
private CloseableRecordBatch extendedMockScan(FragmentContext context, MockSubScanPOP config, List<MockScanEntry> entries) {
List<SchemaPath> projList = new LinkedList<>();
projList.add(SchemaPath.STAR_COLUMN);
// Create batch readers up front. Handy when we know there are
// only one or two; else use an iterator and create them on the fly.
final List<ManagedReader<SchemaNegotiator>> readers = new LinkedList<>();
for (final MockTableDef.MockScanEntry e : entries) {
readers.add(new ExtendedMockBatchReader(e));
}
// Limit the batch size to 10 MB, or whatever the operator definition
// specified.
int batchSizeBytes = 10 * 1024 * 1024;
MockTableDef.MockScanEntry first = entries.get(0);
if (first.getBatchSize() > 0) {
batchSizeBytes = first.getBatchSize();
}
// Set the scan to allow the maximum row count, allowing
// each reader to adjust the batch size smaller if desired.
ScanFrameworkBuilder builder = new ScanFrameworkBuilder();
builder.batchByteLimit(batchSizeBytes);
builder.projection(projList);
builder.setReaderFactory(new BasicScanFactory(readers.iterator()));
ManagedScanFramework framework = new ManagedScanFramework(builder);
return new OperatorRecordBatch(context, config, new ScanOperatorExec(framework, false), false);
}
Aggregations