use of org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator in project drill by apache.
the class TestMockRowReader method testColumnRepeat.
/**
* Test a repeated column.
*/
@Test
public void testColumnRepeat() {
int rowCount = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, 3, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, null, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
TupleMetadata expectedSchema = new SchemaBuilder().add("a1", MinorType.INT).add("a2", MinorType.INT).add("a3", MinorType.INT).add("b", MinorType.VARCHAR, 10).build();
BatchSchema expectedBatchSchema = new BatchSchema(SelectionVectorMode.NONE, expectedSchema.toFieldList());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data.
assertTrue(scan.next());
assertTrue(expectedBatchSchema.isEquivalent(scan.batchAccessor().schema()));
assertEquals(rowCount, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator in project drill by apache.
the class TestMockRowReader method testBatchSize.
/**
* Verify limit on individual batch size (limiting row count per batch).
*/
@Test
public void testBatchSize() {
int rowCount = 20;
int batchSize = 10;
MockTableDef.MockColumn[] cols = new MockTableDef.MockColumn[] { new MockTableDef.MockColumn("a", MinorType.INT, DataMode.REQUIRED, null, null, null, null, null, null), new MockTableDef.MockColumn("b", MinorType.VARCHAR, DataMode.REQUIRED, 10, null, null, null, null, null) };
MockTableDef.MockScanEntry entry = new MockTableDef.MockScanEntry(rowCount, true, batchSize, null, cols);
MockSubScanPOP config = new MockSubScanPOP("dummy", true, Collections.singletonList(entry));
ManagedReader<SchemaNegotiator> reader = new ExtendedMockBatchReader(entry);
List<ManagedReader<SchemaNegotiator>> readers = Collections.singletonList(reader);
// Create options and the scan operator
ScanFixture mockBatch = buildScan(config, readers);
ScanOperatorExec scan = mockBatch.scanOp;
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertEquals(0, scan.batchAccessor().rowCount());
// Next call, return with data, limited by batch size.
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
assertTrue(scan.next());
assertEquals(batchSize, scan.batchAccessor().rowCount());
scan.batchAccessor().release();
// EOF
assertFalse(scan.next());
mockBatch.close();
}
use of org.apache.drill.exec.physical.impl.scan.framework.SchemaNegotiator in project drill by apache.
the class AvroMessageReader method init.
@Override
public void init(SchemaNegotiator negotiator, ReadOptions readOptions, KafkaStoragePlugin plugin) {
Properties kafkaConsumerProps = plugin.getConfig().getKafkaConsumerProps();
Map<String, Object> propertiesMap = kafkaConsumerProps.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().toString(), Map.Entry::getValue));
deserializer = new KafkaAvroDeserializer(null, propertiesMap);
TupleMetadata providedSchema = negotiator.providedSchema();
loader = negotiator.build();
AvroColumnConverterFactory factory = new AvroColumnConverterFactory(providedSchema);
converter = factory.getRootConverter(providedSchema, new TupleSchema(), loader.writer());
String keyDeserializer = kafkaConsumerProps.getProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG);
deserializeKey = keyDeserializer != null && keyDeserializer.equals(KafkaAvroDeserializer.class.getName());
}
Aggregations