use of org.apache.drill.exec.memory.RootAllocator in project drill by axbaretto.
the class TopNBatchTest method priorityQueueOrderingTest.
/**
* Priority queue unit test.
* @throws Exception
*/
@Test
public void priorityQueueOrderingTest() throws Exception {
Properties properties = new Properties();
DrillConfig drillConfig = DrillConfig.create(properties);
FieldReference expr = FieldReference.getWithQuotedRef("colA");
Order.Ordering ordering = new Order.Ordering(Order.Ordering.ORDER_DESC, expr, Order.Ordering.NULLS_FIRST);
List<Order.Ordering> orderings = Lists.newArrayList(ordering);
MaterializedField colA = MaterializedField.create("colA", Types.required(TypeProtos.MinorType.INT));
MaterializedField colB = MaterializedField.create("colB", Types.required(TypeProtos.MinorType.INT));
List<MaterializedField> cols = Lists.newArrayList(colA, colB);
BatchSchema batchSchema = new BatchSchema(BatchSchema.SelectionVectorMode.NONE, cols);
RowSet expectedRowSet;
try (RootAllocator allocator = new RootAllocator(100_000_000)) {
expectedRowSet = new RowSetBuilder(allocator, batchSchema).addRow(110, 10).addRow(109, 9).addRow(108, 8).addRow(107, 7).addRow(106, 6).addRow(105, 5).addRow(104, 4).addRow(103, 3).addRow(102, 2).addRow(101, 1).build();
PriorityQueue queue;
ExpandableHyperContainer hyperContainer;
{
VectorContainer container = new RowSetBuilder(allocator, batchSchema).build().container();
hyperContainer = new ExpandableHyperContainer(container);
queue = TopNBatch.createNewPriorityQueue(TopNBatch.createMainMappingSet(), TopNBatch.createLeftMappingSet(), TopNBatch.createRightMappingSet(), optionManager, new FunctionImplementationRegistry(drillConfig), new CodeCompiler(drillConfig, optionManager), orderings, hyperContainer, false, true, 10, allocator, batchSchema.getSelectionVectorMode());
}
List<RecordBatchData> testBatches = Lists.newArrayList();
try {
final Random random = new Random();
final int bound = 100;
final int numBatches = 11;
final int numRecordsPerBatch = 100;
for (int batchCounter = 0; batchCounter < numBatches; batchCounter++) {
RowSetBuilder rowSetBuilder = new RowSetBuilder(allocator, batchSchema);
rowSetBuilder.addRow((batchCounter + bound), batchCounter);
for (int recordCounter = 0; recordCounter < numRecordsPerBatch; recordCounter++) {
rowSetBuilder.addRow(random.nextInt(bound), random.nextInt(bound));
}
VectorContainer vectorContainer = rowSetBuilder.build().container();
queue.add(new RecordBatchData(vectorContainer, allocator));
}
queue.generate();
VectorContainer resultContainer = queue.getHyperBatch();
resultContainer.buildSchema(BatchSchema.SelectionVectorMode.NONE);
RowSet.HyperRowSet actualHyperSet = HyperRowSetImpl.fromContainer(resultContainer, queue.getFinalSv4());
new RowSetComparison(expectedRowSet).verify(actualHyperSet);
} finally {
if (expectedRowSet != null) {
expectedRowSet.clear();
}
queue.cleanup();
hyperContainer.clear();
for (RecordBatchData testBatch : testBatches) {
testBatch.clear();
}
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by axbaretto.
the class AbstractGenericCopierTest method testCopyRecords.
@Test
public void testCopyRecords() throws SchemaChangeException {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
final BatchSchema batchSchema = createTestSchema(BatchSchema.SelectionVectorMode.NONE);
final RowSet srcRowSet = createSrcRowSet(allocator);
final RowSet destRowSet = new RowSetBuilder(allocator, batchSchema).build();
final VectorContainer destContainer = destRowSet.container();
final Copier copier = createCopier();
final RowSet expectedRowSet = createExpectedRowset(allocator);
copier.setup(new RowSetBatch(srcRowSet), destContainer);
copier.copyRecords(0, 3);
try {
new RowSetComparison(expectedRowSet).verify(destRowSet);
} finally {
srcRowSet.clear();
if (srcRowSet instanceof RowSet.HyperRowSet) {
((RowSet.HyperRowSet) srcRowSet).getSv4().clear();
}
destRowSet.clear();
expectedRowSet.clear();
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by axbaretto.
the class ExampleTest method secondTest.
/**
* <p>
* Example that uses the fixture builder to build a cluster fixture. Lets
* you set configuration (boot-time) options, session options, system options
* and more.
* </p>
* <p>
* You can write test files to the {@link BaseDirTestWatcher#getRootDir()} and query them in the test.
* </p>
* <p>
* Also shows how to display the plan JSON and just run a query silently,
* getting just the row count, batch count and run time.
* </p>
* @throws Exception if anything goes wrong
*/
@Test
public void secondTest() throws Exception {
try (RootAllocator allocator = new RootAllocator(100_000_000)) {
final File tableFile = dirTestWatcher.getRootDir().toPath().resolve("employee.json").toFile();
final BatchSchema schema = new SchemaBuilder().add("id", Types.required(TypeProtos.MinorType.VARCHAR)).add("name", Types.required(TypeProtos.MinorType.VARCHAR)).build();
final RowSet rowSet = new RowSetBuilder(allocator, schema).addRow("1", "kiwi").addRow("2", "watermelon").build();
new JsonFileBuilder(rowSet).build(tableFile);
rowSet.clear();
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).configProperty(ExecConstants.SLICE_TARGET, 10);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
String sql = "SELECT * FROM `dfs`.`test/employee.json`";
System.out.println(client.queryBuilder().sql(sql).explainJson());
QuerySummary results = client.queryBuilder().sql(sql).run();
System.out.println(String.format("Read %d rows", results.recordCount()));
// Usually we want to test something. Here, just test that we got
// the 2 records.
assertEquals(2, results.recordCount());
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by axbaretto.
the class VariableLengthVectorTest method testSetBackTracking.
/**
* Set 10000 values. Then go back and set new values starting at the 1001 the record.
*/
@Test
public void testSetBackTracking() {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
final MaterializedField field = MaterializedField.create("stringCol", Types.required(TypeProtos.MinorType.VARCHAR));
final VarCharVector vector = new VarCharVector(field, allocator);
vector.allocateNew();
try {
final int size = 1000;
final int fluffSize = 10000;
final VarCharVector.Mutator mutator = vector.getMutator();
final VarCharVector.Accessor accessor = vector.getAccessor();
setSafeIndexStrings("", 0, size, mutator);
setSafeIndexStrings("first cut ", size, fluffSize, mutator);
setSafeIndexStrings("redone cut ", size, fluffSize, mutator);
mutator.setValueCount(fluffSize);
Assert.assertEquals(fluffSize, accessor.getValueCount());
checkIndexStrings("", 0, size, accessor);
checkIndexStrings("redone cut ", size, fluffSize, accessor);
} finally {
vector.clear();
}
}
}
use of org.apache.drill.exec.memory.RootAllocator in project drill by axbaretto.
the class VariableLengthVectorTest method testSettingSameValueCount.
/**
* If the vector contains 1000 records, setting a value count of 1000 should work.
*/
@Test
public void testSettingSameValueCount() {
try (RootAllocator allocator = new RootAllocator(10_000_000)) {
final MaterializedField field = MaterializedField.create("stringCol", Types.required(TypeProtos.MinorType.VARCHAR));
final VarCharVector vector = new VarCharVector(field, allocator);
vector.allocateNew();
try {
final int size = 1000;
final VarCharVector.Mutator mutator = vector.getMutator();
final VarCharVector.Accessor accessor = vector.getAccessor();
setSafeIndexStrings("", 0, size, mutator);
mutator.setValueCount(size);
Assert.assertEquals(size, accessor.getValueCount());
checkIndexStrings("", 0, size, accessor);
} finally {
vector.clear();
}
}
}
Aggregations