use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class TestLoad method testLoadValueVector.
@SuppressWarnings("resource")
@Test
public void testLoadValueVector() throws Exception {
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
BatchSchema schema = new SchemaBuilder().add("ints", MinorType.INT).add("chars", MinorType.VARCHAR).addNullable("chars2", MinorType.VARCHAR).build();
// Create vectors
final List<ValueVector> vectors = createVectors(allocator, schema, 100);
// Writeable batch now owns vector buffers
final WritableBatch writableBatch = WritableBatch.getBatchNoHV(100, vectors, false);
// Serialize the vectors
final DrillBuf byteBuf = serializeBatch(allocator, writableBatch);
// Batch loader does NOT take ownership of the serialized buffer
final RecordBatchLoader batchLoader = new RecordBatchLoader(allocator);
batchLoader.load(writableBatch.getDef(), byteBuf);
// Release the serialized buffer.
byteBuf.release();
// TODO: Replace this with actual validation, not just dumping to the console.
boolean firstColumn = true;
int recordCount = 0;
for (final VectorWrapper<?> v : batchLoader) {
if (firstColumn) {
firstColumn = false;
} else {
System.out.print("\t");
}
System.out.print(v.getField().getName());
System.out.print("[");
System.out.print(v.getField().getType().getMinorType());
System.out.print("]");
}
System.out.println();
for (int r = 0; r < batchLoader.getRecordCount(); r++) {
boolean first = true;
recordCount++;
for (final VectorWrapper<?> v : batchLoader) {
if (first) {
first = false;
} else {
System.out.print("\t");
}
final ValueVector.Accessor accessor = v.getValueVector().getAccessor();
if (v.getField().getType().getMinorType() == TypeProtos.MinorType.VARCHAR) {
final Object obj = accessor.getObject(r);
if (obj != null) {
System.out.print(accessor.getObject(r));
} else {
System.out.print("NULL");
}
} else {
System.out.print(accessor.getObject(r));
}
}
if (!first) {
System.out.println();
}
}
assertEquals(100, recordCount);
// Free the original vectors
writableBatch.clear();
// Free the deserialized vectors
batchLoader.clear();
// The allocator will verify that the frees were done correctly.
allocator.close();
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by axbaretto.
the class TestDrillbitResilience method assertDrillbitsOk.
/**
* Check that all the drillbits are ok.
* <p/>
* <p>The current implementation does this by counting the number of drillbits using a query.
*/
private static void assertDrillbitsOk() {
final SingleRowListener listener = new SingleRowListener() {
private final BufferAllocator bufferAllocator = RootAllocatorFactory.newRoot(zkHelper.getConfig());
private final RecordBatchLoader loader = new RecordBatchLoader(bufferAllocator);
@Override
public void rowArrived(final QueryDataBatch queryResultBatch) {
// load the single record
final QueryData queryData = queryResultBatch.getHeader();
try {
loader.load(queryData.getDef(), queryResultBatch.getData());
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean catch clause below.
} catch (final SchemaChangeException e) {
fail(e.toString());
}
assertEquals(1, loader.getRecordCount());
// there should only be one column
final BatchSchema batchSchema = loader.getSchema();
assertEquals(1, batchSchema.getFieldCount());
// the column should be an integer
final MaterializedField countField = batchSchema.getColumn(0);
final MinorType fieldType = countField.getType().getMinorType();
assertEquals(MinorType.BIGINT, fieldType);
// get the column value
final VectorWrapper<?> vw = loader.iterator().next();
final Object obj = vw.getValueVector().getAccessor().getObject(0);
assertTrue(obj instanceof Long);
final Long countValue = (Long) obj;
// assume this means all the drillbits are still ok
assertEquals(drillbits.size(), countValue.intValue());
loader.clear();
}
@Override
public void cleanup() {
DrillAutoCloseables.closeNoChecked(bufferAllocator);
}
};
try {
QueryTestUtil.testWithListener(drillClient, QueryType.SQL, "select count(*) from sys.memory", listener);
listener.waitForCompletion();
final QueryState state = listener.getQueryState();
assertTrue(String.format("QueryState should be COMPLETED (and not %s).", state), state == QueryState.COMPLETED);
} catch (final Exception e) {
throw new RuntimeException("Couldn't query active drillbits", e);
}
final List<DrillPBError> errorList = listener.getErrorList();
assertTrue("There should not be any errors when checking if Drillbits are OK.", errorList.isEmpty());
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class ParquetRecordReaderTest method testPerformance.
@Test
@Ignore
public void testPerformance() throws Exception {
final DrillbitContext bitContext = mock(DrillbitContext.class);
final UserClientConnection connection = mock(UserClientConnection.class);
final DrillConfig c = DrillConfig.create();
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContextImpl context = new FragmentContextImpl(bitContext, BitControl.PlanFragment.getDefaultInstance(), connection, registry);
final Path fileName = new Path("/tmp/parquet_test_performance.parquet");
final HashMap<String, FieldInfo> fields = new HashMap<>();
final ParquetTestProperties props = new ParquetTestProperties(1, 20 * 1000 * 1000, DEFAULT_BYTES_PER_PAGE, fields);
populateFieldInfoMap(props);
final Configuration dfsConfig = new Configuration();
final List<Footer> footers = ParquetFileReader.readFooters(dfsConfig, fileName);
final Footer f = footers.iterator().next();
final List<SchemaPath> columns = Lists.newArrayList();
columns.add(new SchemaPath("_MAP.integer", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bigInt", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.f", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.d", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.b", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin", ExpressionPosition.UNKNOWN));
columns.add(new SchemaPath("_MAP.bin2", ExpressionPosition.UNKNOWN));
int totalRowCount = 0;
final FileSystem fs = new CachedSingleFileSystem(fileName);
final BufferAllocator allocator = RootAllocatorFactory.newRoot(c);
for (int i = 0; i < 25; i++) {
CompressionCodecFactory ccf = DrillCompressionCodecFactory.createDirectCodecFactory(dfsConfig, new ParquetDirectByteBufferAllocator(allocator), 0);
final ParquetRecordReader rr = new ParquetRecordReader(context, fileName, 0, fs, ccf, f.getParquetMetadata(), columns, ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION);
final TestOutputMutator mutator = new TestOutputMutator(allocator);
rr.setup(null, mutator);
final Stopwatch watch = Stopwatch.createStarted();
int rowCount = 0;
while ((rowCount = rr.next()) > 0) {
totalRowCount += rowCount;
}
rr.close();
}
allocator.close();
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestPromotableWriter method list.
@Test
public void list() throws Exception {
BufferAllocator allocator = RootAllocatorFactory.newRoot(DrillConfig.create());
TestOutputMutator output = new TestOutputMutator(allocator);
ComplexWriter rootWriter = new VectorContainerWriter(output, true);
MapWriter writer = rootWriter.rootAsMap();
rootWriter.setPosition(0);
{
writer.map("map").bigInt("a").writeBigInt(1);
}
rootWriter.setPosition(1);
{
writer.map("map").float4("a").writeFloat4(2.0f);
}
rootWriter.setPosition(2);
{
writer.map("map").list("a").startList();
writer.map("map").list("a").endList();
}
rootWriter.setPosition(3);
{
writer.map("map").list("a").startList();
writer.map("map").list("a").bigInt().writeBigInt(3);
writer.map("map").list("a").float4().writeFloat4(4);
writer.map("map").list("a").endList();
}
rootWriter.setValueCount(4);
BatchPrinter.printBatch(output.getContainer());
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class IncomingBuffers method batchArrived.
public boolean batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException {
// Otherwise we would leak memory.
try (@SuppressWarnings("unused") AutoCloseables.Closeable lock = sharedIncomingBatchLock.open()) {
if (closed) {
return false;
}
if (incomingBatch.getHeader().getIsLastBatch()) {
streamsRemaining.decrementAndGet();
}
final int sendMajorFragmentId = incomingBatch.getHeader().getSendingMajorFragmentId();
DataCollector collector = collectorMap.get(sendMajorFragmentId);
if (collector == null) {
throw new FragmentSetupException(String.format("We received a major fragment id that we were not expecting. The id was %d. %s", sendMajorFragmentId, Arrays.toString(collectorMap.values().toArray())));
}
// Use the Data Collector's buffer allocator if set, otherwise the fragment's one
BufferAllocator ownerAllocator = collector.getAllocator();
synchronized (collector) {
final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(ownerAllocator);
boolean decrementedToZero = collector.batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch);
newRawFragmentBatch.release();
// we should only return true if remaining required has been decremented and is currently equal to zero.
return decrementedToZero;
}
}
}
Aggregations