use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class TestDrillbitResilience method assertDrillbitsOk.
/**
* Check that all the drillbits are ok.
* <p/>
* <p>The current implementation does this by counting the number of drillbits using a query.
*/
private static void assertDrillbitsOk() {
final SingleRowListener listener = new SingleRowListener() {
private final BufferAllocator bufferAllocator = RootAllocatorFactory.newRoot(zkHelper.getConfig());
private final RecordBatchLoader loader = new RecordBatchLoader(bufferAllocator);
@Override
public void rowArrived(final QueryDataBatch queryResultBatch) {
// load the single record
final QueryData queryData = queryResultBatch.getHeader();
try {
loader.load(queryData.getDef(), queryResultBatch.getData());
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
} catch (final SchemaChangeException e) {
fail(e.toString());
}
assertEquals(1, loader.getRecordCount());
// there should only be one column
final BatchSchema batchSchema = loader.getSchema();
assertEquals(1, batchSchema.getFieldCount());
// the column should be an integer
final MaterializedField countField = batchSchema.getColumn(0);
final MinorType fieldType = countField.getType().getMinorType();
assertEquals(MinorType.BIGINT, fieldType);
// get the column value
final VectorWrapper<?> vw = loader.iterator().next();
final Object obj = vw.getValueVector().getAccessor().getObject(0);
assertTrue(obj instanceof Long);
final Long countValue = (Long) obj;
// assume this means all the drillbits are still ok
assertEquals(drillbits.size(), countValue.intValue());
loader.clear();
}
@Override
public void cleanup() {
DrillAutoCloseables.closeNoChecked(bufferAllocator);
}
};
try {
QueryTestUtil.testWithListener(drillClient, QueryType.SQL, "select count(*) from sys.memory", listener);
listener.waitForCompletion();
final QueryState state = listener.getQueryState();
assertTrue(String.format("QueryState should be COMPLETED (and not %s).", state), state == QueryState.COMPLETED);
} catch (final Exception e) {
throw new RuntimeException("Couldn't query active drillbits", e);
}
final List<DrillPBError> errorList = listener.getErrorList();
assertTrue("There should not be any errors when checking if Drillbits are OK.", errorList.isEmpty());
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class SortRecordBatchBuilder method build.
public void build(FragmentContext context, VectorContainer outputContainer) throws SchemaChangeException {
outputContainer.clear();
if (batches.keySet().size() > 1) {
throw new SchemaChangeException("Sort currently only supports a single schema.");
}
if (batches.size() > Character.MAX_VALUE) {
throw new SchemaChangeException("Sort cannot work on more than %d batches at a time.", (int) Character.MAX_VALUE);
}
if (batches.keys().size() < 1) {
assert false : "Invalid to have an empty set of batches with no schemas.";
}
final DrillBuf svBuffer = reservation.allocateBuffer();
if (svBuffer == null) {
throw new OutOfMemoryError("Failed to allocate direct memory for SV4 vector in SortRecordBatchBuilder.");
}
sv4 = new SelectionVector4(svBuffer, recordCount, Character.MAX_VALUE);
BatchSchema schema = batches.keySet().iterator().next();
List<RecordBatchData> data = batches.get(schema);
// now we're going to generate the sv4 pointers
switch(schema.getSelectionVectorMode()) {
case NONE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, i);
}
recordBatchId++;
}
break;
}
case TWO_BYTE:
{
int index = 0;
int recordBatchId = 0;
for (RecordBatchData d : data) {
for (int i = 0; i < d.getRecordCount(); i++, index++) {
sv4.set(index, recordBatchId, (int) d.getSv2().getIndex(i));
}
// might as well drop the selection vector since we'll stop using it now.
d.getSv2().clear();
recordBatchId++;
}
break;
}
default:
throw new UnsupportedOperationException();
}
// next, we'll create lists of each of the vector types.
ArrayListMultimap<MaterializedField, ValueVector> vectors = ArrayListMultimap.create();
for (RecordBatchData rbd : batches.values()) {
for (ValueVector v : rbd.getVectors()) {
vectors.put(v.getField(), v);
}
}
for (MaterializedField f : schema) {
List<ValueVector> v = vectors.get(f);
outputContainer.addHyperList(v, false);
}
outputContainer.buildSchema(SelectionVectorMode.FOUR_BYTE);
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class ProducerConsumerBatch method load.
private boolean load(final RecordBatchData batch) {
final VectorContainer newContainer = batch.getContainer();
if (schema != null && newContainer.getSchema().equals(schema)) {
container.zeroVectors();
final BatchSchema schema = container.getSchema();
for (int i = 0; i < container.getNumberOfColumns(); i++) {
final MaterializedField field = schema.getColumn(i);
final MajorType type = field.getType();
final ValueVector vOut = container.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()), container.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds()).getValueVector();
final ValueVector vIn = newContainer.getValueAccessorById(TypeHelper.getValueVectorClass(type.getMinorType(), type.getMode()), newContainer.getValueVectorId(SchemaPath.getSimplePath(field.getPath())).getFieldIds()).getValueVector();
final TransferPair tp = vIn.makeTransferPair(vOut);
tp.transfer();
}
return false;
} else {
container.clear();
for (final VectorWrapper<?> w : newContainer) {
container.add(w.getValueVector());
}
container.buildSchema(SelectionVectorMode.NONE);
schema = container.getSchema();
return true;
}
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class TestBatchValidator method testVariableCorruptLastOutOfRange.
@Test
public void testVariableCorruptLastOutOfRange() {
BatchSchema schema = new SchemaBuilder().add("a", MinorType.VARCHAR).build();
SingleRowSet batch = fixture.rowSetBuilder(schema).add("xx").add("yy").add("zz").build();
zapOffset(batch, 3, 100_000);
// Validator should catch the error.
BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
validator.validate();
List<String> errors = validator.errors();
assertEquals(1, errors.size());
assertTrue(errors.get(0).contains("Invalid offset"));
batch.clear();
}
use of org.apache.drill.exec.record.BatchSchema in project drill by apache.
the class TestBatchValidator method testRepeatedBadValueOffset.
@Test
public void testRepeatedBadValueOffset() {
BatchSchema schema = new SchemaBuilder().add("a", MinorType.VARCHAR, DataMode.REPEATED).build();
SingleRowSet batch = fixture.rowSetBuilder(schema).add((Object) new String[] {}).add((Object) new String[] { "fred", "barney", "wilma" }).add((Object) new String[] { "dino" }).build();
VectorAccessible va = batch.vectorAccessible();
@SuppressWarnings("resource") ValueVector v = va.iterator().next().getValueVector();
RepeatedVarCharVector rvc = (RepeatedVarCharVector) v;
@SuppressWarnings("resource") VarCharVector vc = rvc.getDataVector();
@SuppressWarnings("resource") UInt4Vector ov = vc.getOffsetVector();
ov.getMutator().set(4, 100_000);
BatchValidator validator = new BatchValidator(batch.vectorAccessible(), true);
validator.validate();
List<String> errors = validator.errors();
assertEquals(1, errors.size());
assertTrue(errors.get(0).contains("Invalid offset"));
batch.clear();
}
Aggregations