use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class AbstractGenericCopierTest method testAppendRecords.
@Test
public void testAppendRecords() throws Exception {
try (OperatorFixture operatorFixture = new OperatorFixture.Builder(baseDirTestWatcher).build()) {
final BufferAllocator allocator = operatorFixture.allocator();
final BatchSchema batchSchema = createTestSchema(BatchSchema.SelectionVectorMode.NONE);
final RowSet srcRowSet = createSrcRowSet(allocator);
final VectorContainer destContainer = new VectorContainer(allocator, batchSchema);
AbstractCopier.allocateOutgoing(destContainer, 3);
destContainer.setRecordCount(0);
final RowSet expectedRowSet = createExpectedRowset(allocator);
MockRecordBatch mockRecordBatch = null;
try {
mockRecordBatch = new MockRecordBatch.Builder().sendData(srcRowSet).build(operatorFixture.getFragmentContext());
mockRecordBatch.next();
final Copier copier = createCopier(mockRecordBatch, destContainer, null);
copier.appendRecord(0);
copier.appendRecords(1, 2);
new RowSetComparison(expectedRowSet).verify(DirectRowSet.fromContainer(destContainer));
} finally {
if (mockRecordBatch != null) {
mockRecordBatch.close();
}
srcRowSet.clear();
destContainer.clear();
expectedRowSet.clear();
}
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestLenientAllocation method testLenientLimit.
@Test
public void testLenientLimit() {
LogFixtureBuilder logBuilder = LogFixture.builder().logger(Accountant.class, Level.WARN);
try (LogFixture logFixture = logBuilder.build()) {
// Test can't run without assertions
assertTrue(AssertionUtil.isAssertionsEnabled());
// Create a child allocator
BufferAllocator allocator = fixture.allocator().newChildAllocator("test", 10 * ONE_MEG, 128 * ONE_MEG);
((Accountant) allocator).forceLenient();
// Allocate most of the available memory
DrillBuf buf1 = allocator.buffer(64 * ONE_MEG);
// Oops, we did our math wrong; allocate too large a buffer.
DrillBuf buf2 = allocator.buffer(128 * ONE_MEG);
try {
allocator.buffer(64 * ONE_MEG);
fail();
} catch (OutOfMemoryException e) {
// Expected
}
// Clean up
buf1.close();
buf2.close();
allocator.close();
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestLenientAllocation method testStrict.
/**
* Test that the allocator is normally strict in debug mode.
*/
@Test
public void testStrict() {
LogFixtureBuilder logBuilder = LogFixture.builder().logger(Accountant.class, Level.WARN);
try (LogFixture logFixture = logBuilder.build()) {
// Test can't run without assertions
assertTrue(AssertionUtil.isAssertionsEnabled());
// Create a child allocator
BufferAllocator allocator = fixture.allocator().newChildAllocator("test", 10 * 1024, 128 * 1024);
// Allocate most of the available memory
DrillBuf buf1 = allocator.buffer(64 * 1024);
try {
allocator.buffer(128 * 1024);
fail();
} catch (OutOfMemoryException e) {
// Expected
}
// Clean up
buf1.close();
allocator.close();
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestLoad method testMapSchemaChange.
@Test
public void testMapSchemaChange() throws SchemaChangeException {
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final RecordBatchLoader batchLoader = new RecordBatchLoader(allocator);
// Initial schema: a: INT, m: MAP{}
SchemaBuilder schemaBuilder1 = new SchemaBuilder().add("a", MinorType.INT).addMap("m").resumeSchema();
BatchSchema schema1 = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder1).build();
{
assertTrue(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Same schema
// Schema change: No
{
assertFalse(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Add column to map: a: INT, m: MAP{b: VARCHAR}
// Schema change: Yes
SchemaBuilder schemaBuilder2 = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema();
BatchSchema schema2 = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder2).build();
{
assertTrue(loadBatch(allocator, batchLoader, schema2));
assertTrue(schema2.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Same schema
// Schema change: No
{
assertFalse(loadBatch(allocator, batchLoader, schema2));
assertTrue(schema2.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Add column: a: INT, m: MAP{b: VARCHAR, c: INT}
// Schema change: Yes
{
SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).add("c", MinorType.INT).resumeSchema();
BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Drop a column: a: INT, m: MAP{b: VARCHAR}
// Schema change: Yes
{
SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema();
BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Change type: a: INT, m: MAP{b: INT}
// Schema change: Yes
{
SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).resumeSchema();
BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Empty map: a: INT, m: MAP{}
{
assertTrue(loadBatch(allocator, batchLoader, schema1));
assertTrue(schema1.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
// Drop map: a: INT
{
SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT);
BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
assertTrue(loadBatch(allocator, batchLoader, schema));
assertTrue(schema.isEquivalent(batchLoader.getSchema()));
batchLoader.getContainer().zeroVectors();
}
batchLoader.clear();
allocator.close();
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestLoad method testLoadValueVector.
@Test
public void testLoadValueVector() throws Exception {
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
SchemaBuilder schemaBuilder = new SchemaBuilder().add("ints", MinorType.INT).add("chars", MinorType.VARCHAR).addNullable("chars2", MinorType.VARCHAR);
BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
// Create vectors
final List<ValueVector> vectors = createVectors(allocator, schema, 100);
// Writeable batch now owns vector buffers
final WritableBatch writableBatch = WritableBatch.getBatchNoHV(100, vectors, false);
// Serialize the vectors
final DrillBuf byteBuf = serializeBatch(allocator, writableBatch);
// Batch loader does NOT take ownership of the serialized buffer
final RecordBatchLoader batchLoader = new RecordBatchLoader(allocator);
batchLoader.load(writableBatch.getDef(), byteBuf);
// Release the serialized buffer.
byteBuf.release();
// TODO: Do actual validation
assertEquals(100, batchLoader.getRecordCount());
// Free the original vectors
writableBatch.clear();
// Free the deserialized vectors
batchLoader.clear();
// The allocator will verify that the frees were done correctly.
allocator.close();
}
Aggregations