use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestSplitAndTransfer method test.
@Test
public void test() throws Exception {
final DrillConfig drillConfig = DrillConfig.create();
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final MaterializedField field = MaterializedField.create("field", Types.optional(MinorType.VARCHAR));
final NullableVarCharVector varCharVector = new NullableVarCharVector(field, allocator);
varCharVector.allocateNew(10000, 1000);
final int valueCount = 500;
final String[] compareArray = new String[valueCount];
final NullableVarCharVector.Mutator mutator = varCharVector.getMutator();
for (int i = 0; i < valueCount; i += 3) {
final String s = String.format("%010d", i);
mutator.set(i, s.getBytes());
compareArray[i] = s;
}
mutator.setValueCount(valueCount);
final TransferPair tp = varCharVector.getTransferPair(allocator);
final NullableVarCharVector newVarCharVector = (NullableVarCharVector) tp.getTo();
final Accessor accessor = newVarCharVector.getAccessor();
final int[][] startLengths = { { 0, 201 }, { 201, 200 }, { 401, 99 } };
for (final int[] startLength : startLengths) {
final int start = startLength[0];
final int length = startLength[1];
tp.splitAndTransfer(start, length);
newVarCharVector.getMutator().setValueCount(length);
for (int i = 0; i < length; i++) {
final boolean expectedSet = ((start + i) % 3) == 0;
if (expectedSet) {
final byte[] expectedValue = compareArray[start + i].getBytes();
assertFalse(accessor.isNull(i));
assertArrayEquals(expectedValue, accessor.get(i));
} else {
assertTrue(accessor.isNull(i));
}
}
newVarCharVector.clear();
}
varCharVector.close();
allocator.close();
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestSplitAndTransfer method testBitVectorImpl.
public void testBitVectorImpl(int valueCount, final int[][] startLengths, TestBitPattern pattern) throws Exception {
final DrillConfig drillConfig = DrillConfig.create();
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final MaterializedField field = MaterializedField.create("field", Types.optional(MinorType.BIT));
final BitVector bitVector = new BitVector(field, allocator);
// extra byte at the end that gets filled with junk
bitVector.allocateNew(valueCount + 8);
final int[] compareArray = new int[valueCount];
int testBitValue = 0;
final BitVector.Mutator mutator = bitVector.getMutator();
for (int i = 0; i < valueCount; i++) {
testBitValue = getBit(pattern, i);
mutator.set(i, testBitValue);
compareArray[i] = testBitValue;
}
// off-by-one out-of-bound reads
for (int j = valueCount; j < valueCount + 8; j++) {
// fill with compliment of testBit
mutator.set(j, ~testBitValue);
}
mutator.setValueCount(valueCount);
final TransferPair tp = bitVector.getTransferPair(allocator);
final BitVector newBitVector = (BitVector) tp.getTo();
final BitVector.Accessor accessor = newBitVector.getAccessor();
for (final int[] startLength : startLengths) {
final int start = startLength[0];
final int length = startLength[1];
tp.splitAndTransfer(start, length);
assertEquals(newBitVector.getAccessor().getValueCount(), length);
for (int i = 0; i < length; i++) {
final int expectedValue = compareArray[start + i];
assertEquals(expectedValue, accessor.get(i));
}
newBitVector.clear();
}
bitVector.close();
allocator.close();
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestLenientAllocation method testLenient.
/**
* Use a test-time hack to force the allocator to be lenient,
* regardless of whether we are in debug mode or not.
*/
@Test
public void testLenient() {
LogFixtureBuilder logBuilder = LogFixture.builder().logger(Accountant.class, Level.WARN);
try (LogFixture logFixture = logBuilder.build()) {
// Test can't run without assertions
assertTrue(AssertionUtil.isAssertionsEnabled());
// Create a child allocator
BufferAllocator allocator = fixture.allocator().newChildAllocator("test", 10 * 1024, 128 * 1024);
((Accountant) allocator).forceLenient();
// Allocate most of the available memory
DrillBuf buf1 = allocator.buffer(64 * 1024);
// Oops, we did our math wrong; allocate too large a buffer.
DrillBuf buf2 = allocator.buffer(128 * 1024);
assertEquals(192 * 1024, allocator.getAllocatedMemory());
// We keep making mistakes.
DrillBuf buf3 = allocator.buffer(32 * 1024);
// Right up to the hard limit
DrillBuf buf4 = allocator.buffer(32 * 1024);
assertEquals(256 * 1024, allocator.getAllocatedMemory());
try {
allocator.buffer(8);
fail();
} catch (OutOfMemoryException e) {
// Expected
}
// Recover from our excesses
buf2.close();
buf3.close();
buf4.close();
assertEquals(64 * 1024, allocator.getAllocatedMemory());
// We're back in the good graces of the allocator,
// can allocate more.
DrillBuf buf5 = allocator.buffer(8);
// Clean up
buf1.close();
buf5.close();
allocator.close();
}
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestHashJoinOutcome method prepareUninitContainers.
private void prepareUninitContainers(List<VectorContainer> emptyInputContainers, BatchSchema batchSchema) {
BufferAllocator allocator = operatorFixture.getFragmentContext().getAllocator();
VectorContainer vc1 = new VectorContainer(allocator, batchSchema);
// set for first vc (with OK_NEW_SCHEMA) because record count is checked at AbstractRecordBatch.next
vc1.setRecordCount(0);
VectorContainer vc2 = new VectorContainer(allocator, batchSchema);
// Note - Uninitialized: Record count NOT SET for vc2 !!
emptyInputContainers.add(vc1);
emptyInputContainers.add(vc2);
}
use of org.apache.drill.exec.memory.BufferAllocator in project drill by apache.
the class TestDrillbitResilience method assertDrillbitsOk.
/**
* Check that all the drillbits are ok.
* <p/>
* <p>The current implementation does this by counting the number of drillbits using a query.
*/
private static void assertDrillbitsOk() {
SingleRowListener listener = new SingleRowListener() {
private final BufferAllocator bufferAllocator = RootAllocatorFactory.newRoot(cluster.config());
private final RecordBatchLoader loader = new RecordBatchLoader(bufferAllocator);
@Override
public void rowArrived(QueryDataBatch queryResultBatch) {
// load the single record
final QueryData queryData = queryResultBatch.getHeader();
loader.load(queryData.getDef(), queryResultBatch.getData());
assertEquals(1, loader.getRecordCount());
// there should only be one column
final BatchSchema batchSchema = loader.getSchema();
assertEquals(1, batchSchema.getFieldCount());
// the column should be an integer
final MaterializedField countField = batchSchema.getColumn(0);
final MinorType fieldType = countField.getType().getMinorType();
assertEquals(MinorType.BIGINT, fieldType);
// get the column value
final VectorWrapper<?> vw = loader.iterator().next();
final Object obj = vw.getValueVector().getAccessor().getObject(0);
assertTrue(obj instanceof Long);
final Long countValue = (Long) obj;
// assume this means all the drillbits are still ok
assertEquals(cluster.drillbits().size(), countValue.intValue());
loader.clear();
}
@Override
public void cleanup() {
loader.clear();
DrillAutoCloseables.closeNoChecked(bufferAllocator);
}
};
try {
QueryTestUtil.testWithListener(client.client(), QueryType.SQL, "select count(*) from sys.memory", listener);
listener.waitForCompletion();
QueryState state = listener.getQueryState();
assertSame(state, QueryState.COMPLETED, () -> String.format("QueryState should be COMPLETED (and not %s).", state));
assertTrue(listener.getErrorList().isEmpty(), "There should not be any errors when checking if Drillbits are OK");
} catch (final Exception e) {
throw new RuntimeException("Couldn't query active drillbits", e);
} finally {
logger.debug("Cleanup listener");
listener.cleanup();
}
logger.debug("Drillbits are ok.");
}
Aggregations