Search in sources :

Example 1 with RecordBatchDef

use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.

the class VectorAccessibleSerializable method readFromStream.

/**
   * Reads from an InputStream and parses a RecordBatchDef. From this, we
   * construct a SelectionVector2 if it exits and construct the vectors and add
   * them to a vector container
   *
   * @param input
   *          the InputStream to read from
   * @throws IOException
   */
@Override
public void readFromStream(InputStream input) throws IOException {
    final UserBitShared.RecordBatchDef batchDef = UserBitShared.RecordBatchDef.parseDelimitedFrom(input);
    recordCount = batchDef.getRecordCount();
    if (batchDef.hasCarriesTwoByteSelectionVector() && batchDef.getCarriesTwoByteSelectionVector()) {
        readSv2(input);
    }
    readVectors(input, batchDef);
}
Also used : RecordBatchDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef) UserBitShared(org.apache.drill.exec.proto.UserBitShared)

Example 2 with RecordBatchDef

use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.

the class WritableBatch method getBatchNoHV.

public static WritableBatch getBatchNoHV(int recordCount, Iterable<ValueVector> vectors, boolean isSV2) {
    List<DrillBuf> buffers = Lists.newArrayList();
    List<SerializedField> metadata = Lists.newArrayList();
    for (ValueVector vv : vectors) {
        metadata.add(vv.getMetadata());
        // don't try to get the buffers if we don't have any records. It is possible the buffers are dead buffers.
        if (recordCount == 0) {
            vv.clear();
            continue;
        }
        for (DrillBuf b : vv.getBuffers(true)) {
            buffers.add(b);
        }
        // remove vv access to buffers.
        vv.clear();
    }
    RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount).setCarriesTwoByteSelectionVector(isSV2).build();
    WritableBatch b = new WritableBatch(batchDef, buffers);
    return b;
}
Also used : ValueVector(org.apache.drill.exec.vector.ValueVector) SerializedField(org.apache.drill.exec.proto.UserBitShared.SerializedField) RecordBatchDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef) DrillBuf(io.netty.buffer.DrillBuf)

Example 3 with RecordBatchDef

use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.

the class UnorderedReceiverBatch method next.

@Override
public IterOutcome next() {
    batchLoader.resetRecordCount();
    stats.startProcessing();
    try {
        RawFragmentBatch batch;
        try {
            stats.startWait();
            batch = getNextBatch();
            // skip over empty batches. we do this since these are basically control messages.
            while (batch != null && batch.getHeader().getDef().getRecordCount() == 0 && (!first || batch.getHeader().getDef().getFieldCount() == 0)) {
                batch = getNextBatch();
            }
        } finally {
            stats.stopWait();
        }
        first = false;
        if (batch == null) {
            batchLoader.clear();
            if (!context.shouldContinue()) {
                return IterOutcome.STOP;
            }
            return IterOutcome.NONE;
        }
        if (context.isOverMemoryLimit()) {
            return IterOutcome.OUT_OF_MEMORY;
        }
        //      logger.debug("Next received batch {}", batch);
        final RecordBatchDef rbd = batch.getHeader().getDef();
        final boolean schemaChanged = batchLoader.load(rbd, batch.getBody());
        // TODO:  Clean:  DRILL-2933:  That load(...) no longer throws
        // SchemaChangeException, so check/clean catch clause below.
        stats.addLongStat(Metric.BYTES_RECEIVED, batch.getByteCount());
        batch.release();
        if (schemaChanged) {
            this.schema = batchLoader.getSchema();
            stats.batchReceived(0, rbd.getRecordCount(), true);
            return IterOutcome.OK_NEW_SCHEMA;
        } else {
            stats.batchReceived(0, rbd.getRecordCount(), false);
            return IterOutcome.OK;
        }
    } catch (SchemaChangeException | IOException ex) {
        context.fail(ex);
        return IterOutcome.STOP;
    } finally {
        stats.stopProcessing();
    }
}
Also used : RawFragmentBatch(org.apache.drill.exec.record.RawFragmentBatch) SchemaChangeException(org.apache.drill.exec.exception.SchemaChangeException) RecordBatchDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef) IOException(java.io.IOException)

Example 4 with RecordBatchDef

use of org.apache.drill.exec.proto.UserBitShared.RecordBatchDef in project drill by apache.

the class TestComplexToJson method test.

@Test
public void test() throws Exception {
    DrillClient parent_client = client;
    List<QueryDataBatch> results;
    RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
    client = new DrillClient(config, serviceSet.getCoordinator());
    client.setSupportComplexTypes(false);
    client.connect();
    results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
    loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
    RecordBatchDef def = results.get(0).getHeader().getDef();
    // the entire row is returned as a single column
    assertEquals(1, def.getFieldCount());
    // with setSupportComplexTypes == false, the column mode should be REQUIRED
    assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REQUIRED);
    loader.clear();
    for (QueryDataBatch result : results) {
        result.release();
    }
    client.close();
    client = new DrillClient(config, serviceSet.getCoordinator());
    client.setSupportComplexTypes(true);
    client.connect();
    results = testSqlWithResults("select * from dfs_test.`[WORKING_PATH]/src/test/resources/store/text/data/regions.csv`");
    loader.load(results.get(0).getHeader().getDef(), results.get(0).getData());
    def = results.get(0).getHeader().getDef();
    // the entire row is returned as a single column
    assertEquals(1, def.getFieldCount());
    // with setSupportComplexTypes == true, the column mode should be REPEATED
    assertTrue(def.getField(0).getMajorType().getMode() == DataMode.REPEATED);
    loader.clear();
    for (QueryDataBatch result : results) {
        result.release();
    }
    client.close();
    client = parent_client;
}
Also used : QueryDataBatch(org.apache.drill.exec.rpc.user.QueryDataBatch) RecordBatchLoader(org.apache.drill.exec.record.RecordBatchLoader) DrillClient(org.apache.drill.exec.client.DrillClient) RecordBatchDef(org.apache.drill.exec.proto.UserBitShared.RecordBatchDef) Test(org.junit.Test)

Aggregations

RecordBatchDef (org.apache.drill.exec.proto.UserBitShared.RecordBatchDef)4 DrillBuf (io.netty.buffer.DrillBuf)1 IOException (java.io.IOException)1 DrillClient (org.apache.drill.exec.client.DrillClient)1 SchemaChangeException (org.apache.drill.exec.exception.SchemaChangeException)1 UserBitShared (org.apache.drill.exec.proto.UserBitShared)1 SerializedField (org.apache.drill.exec.proto.UserBitShared.SerializedField)1 RawFragmentBatch (org.apache.drill.exec.record.RawFragmentBatch)1 RecordBatchLoader (org.apache.drill.exec.record.RecordBatchLoader)1 QueryDataBatch (org.apache.drill.exec.rpc.user.QueryDataBatch)1 ValueVector (org.apache.drill.exec.vector.ValueVector)1 Test (org.junit.Test)1