use of org.apache.drill.exec.memory.RootAllocator in project drill by apache.
the class VariableLengthVectorTest method testTrunicateVectorSetValueCount.
/**
* Test truncating data. If you have 10000 records, reduce the vector to 1000 records.
*/
@Test
public void testTrunicateVectorSetValueCount() {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
MaterializedField field = MaterializedField.create("stringCol", Types.required(TypeProtos.MinorType.VARCHAR));
@SuppressWarnings("resource") VarCharVector vector = new VarCharVector(field, allocator);
vector.allocateNew();
try {
int size = 1000;
int fluffSize = 10000;
VarCharVector.Mutator mutator = vector.getMutator();
VarCharVector.Accessor accessor = vector.getAccessor();
setSafeIndexStrings("", 0, size, mutator);
setSafeIndexStrings("first cut ", size, fluffSize, mutator);
mutator.setValueCount(fluffSize);
Assert.assertEquals(fluffSize, accessor.getValueCount());
checkIndexStrings("", 0, size, accessor);
} finally {
vector.clear();
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by apache.
the class VariableLengthVectorTest method testSetBackTracking.
/**
* Set 10000 values. Then go back and set new values starting at the 1001 the record.
*/
@Test
public void testSetBackTracking() {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
MaterializedField field = MaterializedField.create("stringCol", Types.required(TypeProtos.MinorType.VARCHAR));
@SuppressWarnings("resource") VarCharVector vector = new VarCharVector(field, allocator);
vector.allocateNew();
try {
int size = 1000;
int fluffSize = 10000;
VarCharVector.Mutator mutator = vector.getMutator();
VarCharVector.Accessor accessor = vector.getAccessor();
setSafeIndexStrings("", 0, size, mutator);
setSafeIndexStrings("first cut ", size, fluffSize, mutator);
setSafeIndexStrings("redone cut ", size, fluffSize, mutator);
mutator.setValueCount(fluffSize);
Assert.assertEquals(fluffSize, accessor.getValueCount());
checkIndexStrings("", 0, size, accessor);
checkIndexStrings("redone cut ", size, fluffSize, accessor);
} finally {
vector.clear();
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by apache.
the class VariableLengthVectorTest method testDRILL7341.
@Test
public void testDRILL7341() {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
MaterializedField field = MaterializedField.create("stringCol", Types.optional(TypeProtos.MinorType.VARCHAR));
NullableVarCharVector sourceVector = new NullableVarCharVector(field, allocator);
@SuppressWarnings("resource") NullableVarCharVector targetVector = new NullableVarCharVector(field, allocator);
sourceVector.allocateNew();
targetVector.allocateNew();
try {
NullableVarCharVector.Mutator sourceMutator = sourceVector.getMutator();
sourceMutator.setValueCount(sourceVector.getValueCapacity() * 4);
targetVector.exchange(sourceVector);
NullableVarCharVector.Mutator targetMutator = targetVector.getMutator();
targetMutator.setValueCount(targetVector.getValueCapacity() * 2);
} finally {
sourceVector.clear();
targetVector.clear();
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by apache.
the class TopNBatchTest method priorityQueueOrderingTest.
/**
* Priority queue unit test.
* @throws Exception
*/
@Test
public void priorityQueueOrderingTest() throws Exception {
Properties properties = new Properties();
DrillConfig drillConfig = DrillConfig.create(properties);
DrillbitContext drillbitContext = mockDrillbitContext();
when(drillbitContext.getFunctionImplementationRegistry()).thenReturn(new FunctionImplementationRegistry(drillConfig));
FieldReference expr = FieldReference.getWithQuotedRef("colA");
Order.Ordering ordering = new Order.Ordering(Order.Ordering.ORDER_DESC, expr, Order.Ordering.NULLS_FIRST);
List<Order.Ordering> orderings = Lists.newArrayList(ordering);
MaterializedField colA = MaterializedField.create("colA", Types.required(TypeProtos.MinorType.INT));
MaterializedField colB = MaterializedField.create("colB", Types.required(TypeProtos.MinorType.INT));
List<MaterializedField> cols = Lists.newArrayList(colA, colB);
BatchSchema batchSchema = new BatchSchema(BatchSchema.SelectionVectorMode.NONE, cols);
FragmentContextImpl context = new FragmentContextImpl(drillbitContext, BitControl.PlanFragment.getDefaultInstance(), null, drillbitContext.getFunctionImplementationRegistry());
RowSet expectedRowSet;
try (RootAllocator allocator = new RootAllocator(100_000_000)) {
expectedRowSet = new RowSetBuilder(allocator, batchSchema).addRow(110, 10).addRow(109, 9).addRow(108, 8).addRow(107, 7).addRow(106, 6).addRow(105, 5).addRow(104, 4).addRow(103, 3).addRow(102, 2).addRow(101, 1).build();
PriorityQueue queue;
ExpandableHyperContainer hyperContainer;
{
VectorContainer container = new RowSetBuilder(allocator, batchSchema).build().container();
hyperContainer = new ExpandableHyperContainer(container);
queue = TopNBatch.createNewPriorityQueue(TopNBatch.createMainMappingSet(), TopNBatch.createLeftMappingSet(), TopNBatch.createRightMappingSet(), orderings, hyperContainer, false, true, 10, allocator, batchSchema.getSelectionVectorMode(), context);
}
List<RecordBatchData> testBatches = Lists.newArrayList();
try {
final Random random = new Random();
final int bound = 100;
final int numBatches = 11;
final int numRecordsPerBatch = 100;
for (int batchCounter = 0; batchCounter < numBatches; batchCounter++) {
RowSetBuilder rowSetBuilder = new RowSetBuilder(allocator, batchSchema);
rowSetBuilder.addRow((batchCounter + bound), batchCounter);
for (int recordCounter = 0; recordCounter < numRecordsPerBatch; recordCounter++) {
rowSetBuilder.addRow(random.nextInt(bound), random.nextInt(bound));
}
VectorContainer vectorContainer = rowSetBuilder.build().container();
queue.add(new RecordBatchData(vectorContainer, allocator));
}
queue.generate();
VectorContainer resultContainer = queue.getHyperBatch();
resultContainer.buildSchema(BatchSchema.SelectionVectorMode.NONE);
RowSet.HyperRowSet actualHyperSet = HyperRowSetImpl.fromContainer(resultContainer, queue.getFinalSv4());
new RowSetComparison(expectedRowSet).verify(actualHyperSet);
} finally {
if (expectedRowSet != null) {
expectedRowSet.clear();
}
queue.cleanup();
hyperContainer.clear();
for (RecordBatchData testBatch : testBatches) {
testBatch.clear();
}
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by apache.
the class TestVectorContainer method testPrettyPrintRecord.
@Test
public void testPrettyPrintRecord() {
final MaterializedField colA = MaterializedField.create("colA", Types.required(TypeProtos.MinorType.INT));
final MaterializedField colB = MaterializedField.create("colB", Types.required(TypeProtos.MinorType.VARCHAR));
final MaterializedField colC = MaterializedField.create("colC", Types.repeated(TypeProtos.MinorType.FLOAT4));
final MaterializedField colD = MaterializedField.create("colD", Types.repeated(TypeProtos.MinorType.VARCHAR));
final List<MaterializedField> cols = Lists.newArrayList(colA, colB, colC, colD);
final BatchSchema batchSchema = new BatchSchema(BatchSchema.SelectionVectorMode.NONE, cols);
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
final RowSet rowSet = new RowSetBuilder(allocator, batchSchema).addRow(110, "green", new float[] { 5.5f, 2.3f }, new String[] { "1a", "1b" }).addRow(1440, "yellow", new float[] { 1.0f }, new String[] { "dog" }).build();
final String expected = "[\"colA\" = 110, \"colB\" = green, \"colC\" = [5.5,2.3], \"colD\" = [\"1a\",\"1b\"]]";
final String actual = rowSet.container().prettyPrintRecord(0);
try {
Assert.assertEquals(expected, actual);
} finally {
rowSet.clear();
}
}
}
Aggregations