use of org.apache.drill.test.ClientFixture in project drill by apache.
the class TestSimpleExternalSort method mergeSortWithSv2.
/**
* Tests the external sort using an in-memory sort. Relies on default memory
* settings to be large enough to do the in-memory sort (there is,
* unfortunately, no way to double-check that no spilling was done.)
* This must be checked manually by setting a breakpoint in the in-memory
* sort routine.
*
* @param testLegacy
* @throws Exception
*/
private void mergeSortWithSv2(boolean testLegacy) throws Exception {
try (ClusterFixture cluster = ClusterFixture.standardCluster();
ClientFixture client = cluster.clientFixture()) {
chooseImpl(client, testLegacy);
List<QueryDataBatch> results = client.queryBuilder().physicalResource("xsort/one_key_sort_descending_sv2.json").results();
assertEquals(500000, client.countResults(results));
validateResults(client.allocator(), results);
}
}
use of org.apache.drill.test.ClientFixture in project drill by apache.
the class TestSimpleExternalSort method outOfMemoryExternalSort.
private void outOfMemoryExternalSort(boolean testLegacy) throws Throwable {
FixtureBuilder builder = ClusterFixture.builder().configProperty("drill.memory.fragment.max", 50000000).configProperty("drill.memory.fragment.initial", 2000000).configProperty("drill.memory.operator.max", 30000000).configProperty("drill.memory.operator.initial", 2000000);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
chooseImpl(client, testLegacy);
List<QueryDataBatch> results = client.queryBuilder().physicalResource("/xsort/oom_sort_test.json").results();
assertEquals(10000000, client.countResults(results));
long previousBigInt = Long.MAX_VALUE;
int recordCount = 0;
int batchCount = 0;
for (QueryDataBatch b : results) {
RecordBatchLoader loader = new RecordBatchLoader(client.allocator());
if (b.getHeader().getRowCount() > 0) {
batchCount++;
loader.load(b.getHeader().getDef(), b.getData());
@SuppressWarnings("resource") BigIntVector c1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("blue", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
BigIntVector.Accessor a1 = c1.getAccessor();
for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
recordCount++;
assertTrue(String.format("%d < %d", previousBigInt, a1.get(i)), previousBigInt >= a1.get(i));
previousBigInt = a1.get(i);
}
assertTrue(String.format("%d == %d", a1.get(0), a1.get(a1.getValueCount() - 1)), a1.get(0) != a1.get(a1.getValueCount() - 1));
}
loader.clear();
b.release();
}
System.out.println(String.format("Sorted %,d records in %d batches.", recordCount, batchCount));
}
}
use of org.apache.drill.test.ClientFixture in project drill by apache.
the class TestSimpleExternalSort method sortOneKeyDescendingMergeSort.
private void sortOneKeyDescendingMergeSort(boolean testLegacy) throws Throwable {
try (ClusterFixture cluster = ClusterFixture.standardCluster();
ClientFixture client = cluster.clientFixture()) {
chooseImpl(client, testLegacy);
List<QueryDataBatch> results = client.queryBuilder().physicalResource("xsort/one_key_sort_descending.json").results();
assertEquals(1000000, client.countResults(results));
validateResults(client.allocator(), results);
}
}
use of org.apache.drill.test.ClientFixture in project drill by axbaretto.
the class TopNBatchTest method sortOneKeyAscending.
/**
* End to end test of the TopN operator.
* @throws Throwable
*/
@Test
public void sortOneKeyAscending() throws Throwable {
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
TestBuilder testBuilder = new TestBuilder(new ClusterFixture.FixtureTestServices(client));
testBuilder.ordered().physicalPlanFromFile("topN/one_key_sort.json").baselineColumns("blue").go();
}
}
use of org.apache.drill.test.ClientFixture in project drill by axbaretto.
the class TestHashAggrSpill method testSpill.
/**
* A template for Hash Aggr spilling tests
*
* @throws Exception
*/
private void testSpill(long maxMem, long numPartitions, long minBatches, int maxParallel, boolean fallback, boolean predict, String sql, long expectedRows, int cycle, int fromPart, int toPart) throws Exception {
LogFixture.LogFixtureBuilder logBuilder = LogFixture.builder().toConsole().logger("org.apache.drill", Level.WARN);
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).sessionOption(ExecConstants.HASHAGG_MAX_MEMORY_KEY, maxMem).sessionOption(ExecConstants.HASHAGG_NUM_PARTITIONS_KEY, numPartitions).sessionOption(ExecConstants.HASHAGG_MIN_BATCHES_PER_PARTITION_KEY, minBatches).configProperty(ExecConstants.SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE, false).sessionOption(PlannerSettings.FORCE_2PHASE_AGGR_KEY, true).sessionOption(ExecConstants.HASHAGG_FALLBACK_ENABLED_KEY, fallback).sessionOption(ExecConstants.HASHAGG_USE_MEMORY_PREDICTION_KEY, predict).maxParallelization(maxParallel).saveProfiles();
String sqlStr = // if null then use this default query
sql != null ? // if null then use this default query
sql : "SELECT empid_s17, dept_i, branch_i, AVG(salary_i) FROM `mock`.`employee_1200K` GROUP BY empid_s17, dept_i, branch_i";
try (LogFixture logs = logBuilder.build();
ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
runAndDump(client, sqlStr, expectedRows, cycle, fromPart, toPart);
}
}
Aggregations