use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.
the class VectorAccessibleSerializable method readFromStream.
/**
* Reads from an InputStream and parses a RecordBatchDef. From this, we
* construct a SelectionVector2 if it exits and construct the vectors and add
* them to a vector container
*
* @param input
* the InputStream to read from
* @throws IOException
*/
@Override
public void readFromStream(InputStream input) throws IOException {
final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input);
recordCount = batchDef.getRecordCount();
if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) {
readSv2(input);
}
readVectors(input, batchDef);
}
use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.
the class WritableBatch method getBatchNoHV.
public static WritableBatch getBatchNoHV(int recordCount, Iterable<ValueVector> vectors, boolean isSV2) {
List<DrillBuf> buffers = Lists.newArrayList();
List<SerializedField> metadata = Lists.newArrayList();
for (ValueVector vv : vectors) {
metadata.add(vv.getMetadata());
// don't try to get the buffers if we don't have any records. It is possible the buffers are dead buffers.
if (recordCount == 0) {
vv.clear();
continue;
}
for (DrillBuf b : vv.getBuffers(true)) {
buffers.add(b);
}
// remove vv access to buffers.
vv.clear();
}
RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount).setCarriesTwoByteSelectionVector(isSV2).build();
WritableBatch b = new WritableBatch(batchDef, buffers);
return b;
}
use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.
the class UnorderedReceiverBatch method next.
@Override
public IterOutcome next() {
batchLoader.resetRecordCount();
stats.startProcessing();
try {
RawFragmentBatch batch;
try {
stats.startWait();
batch = getNextBatch();
// skip over empty batches. we do this since these are basically control messages.
while (batch != null && batch.getHeader().getDef().getRecordCount() == 0 && (!first || batch.getHeader().getDef().getFieldCount() == 0)) {
batch = getNextBatch();
}
} finally {
stats.stopWait();
}
first = false;
if (batch == null) {
batchLoader.clear();
if (!context.shouldContinue()) {
return IterOutcome.STOP;
}
return IterOutcome.NONE;
}
if (context.isOverMemoryLimit()) {
return IterOutcome.OUT_OF_MEMORY;
}
// logger.debug("Next received batch {}", batch);
final RecordBatchDef rbd = batch.getHeader().getDef();
final boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
stats.addLongStat(Metric.BYTES_RECEIVED, batch.getByteCount());
batch.release();
if (schemaChanged) {
this.schema = batchLoader.getSchema();
stats.batchReceived(0, rbd.getRecordCount(), true);
return IterOutcome.OK_NEW_SCHEMA;
} else {
stats.batchReceived(0, rbd.getRecordCount(), false);
return IterOutcome.OK;
}
} catch (SchemaChangeException | IOException ex) {
context.fail(ex);
return IterOutcome.STOP;
} finally {
stats.stopProcessing();
}
}
use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.
the class TestComplexToJson method test.
@Test
public void test() throws Exception {
DrillClient parent_client = client;
List<QueryDataBatch> results;
RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
client = new DrillClient(config, serviceSet.getCoordinator());
client.setSupportComplexTypes(false);
client.connect();
results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
RecordBatchDef def = results.get(0).getHeader().getDef();
// the entire row is returned as a single column
assertEquals(1, def.getFieldCount());
// with setSupportComplexTypes == false, the column mode should be REQUIRED
assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REQUIRED);
loader.clear();
for (QueryDataBatch result : results) {
result.release();
}
client.close();
client = new DrillClient(config, serviceSet.getCoordinator());
client.setSupportComplexTypes(true);
client.connect();
results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
def = results.get(0).getHeader().getDef();
// the entire row is returned as a single column
assertEquals(1, def.getFieldCount());
// with setSupportComplexTypes == true, the column mode should be REPEATED
assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REPEATED);
loader.clear();
for (QueryDataBatch result : results) {
result.release();
}
client.close();
client = parent_client;
}
Aggregations