use of org.apache.drill.test.TestBuilder in project drill by axbaretto.
the class TopNBatchTest method sortOneKeyAscending.
/**
* End to end test of the TopN operator.
* @throws Throwable
*/
@Test
public void sortOneKeyAscending() throws Throwable {
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
TestBuilder testBuilder = new TestBuilder(new ClusterFixture.FixtureTestServices(client));
testBuilder.ordered().physicalPlanFromFile("topN/one_key_sort.json").baselineColumns("blue").go();
}
}
use of org.apache.drill.test.TestBuilder in project drill by axbaretto.
the class TestLocalExchange method testHelper.
private static void testHelper(boolean isMuxOn, boolean isDeMuxOn, String query, int expectedNumMuxes, int expectedNumDeMuxes, String[] baselineColumns, List<Object[]> baselineValues) throws Exception {
setupHelper(isMuxOn, isDeMuxOn);
String plan = getPlanInString("EXPLAIN PLAN FOR " + query, JSON_FORMAT);
System.out.println("Plan: " + plan);
if (isMuxOn) {
// # of hash exchanges should be = # of mux exchanges + # of demux exchanges
assertEquals("HashExpr on the hash column should not happen", 2 * expectedNumMuxes + expectedNumDeMuxes, StringUtils.countMatches(plan, HASH_EXPR_NAME));
jsonExchangeOrderChecker(plan, isDeMuxOn, expectedNumMuxes, "hash32asdouble\\(.*\\) ");
} else {
assertEquals("HashExpr on the hash column should not happen", 0, StringUtils.countMatches(plan, HASH_EXPR_NAME));
}
// Make sure the plan has mux and demux exchanges (TODO: currently testing is rudimentary,
// need to move it to sophisticated testing once we have better planning test tools are available)
assertEquals("Wrong number of MuxExchanges are present in the plan", expectedNumMuxes, StringUtils.countMatches(plan, MUX_EXCHANGE));
assertEquals("Wrong number of DeMuxExchanges are present in the plan", expectedNumDeMuxes, StringUtils.countMatches(plan, DEMUX_EXCHANGE));
// Run the query and verify the output
TestBuilder testBuilder = testBuilder().sqlQuery(query).unOrdered().baselineColumns(baselineColumns);
for (Object[] baselineRecord : baselineValues) {
testBuilder.baselineValues(baselineRecord);
}
testBuilder.go();
testHelperVerifyPartitionSenderParallelization(plan, isMuxOn, isDeMuxOn);
}
use of org.apache.drill.test.TestBuilder in project drill by axbaretto.
the class TestLocalExchange method testGroupByMultiFields.
@Test
public void testGroupByMultiFields() throws Exception {
// Test multifield hash generation
test("ALTER SESSION SET `planner.slice_target`=1");
test("ALTER SESSION SET `planner.enable_mux_exchange`=" + true);
test("ALTER SESSION SET `planner.enable_demux_exchange`=" + false);
final String groupByMultipleQuery = String.format("SELECT dept_id, mng_id, some_id, count(*) as numEmployees FROM dfs.`%s` e GROUP BY dept_id, mng_id, some_id", EMPT_TABLE);
final String[] groupByMultipleQueryBaselineColumns = new String[] { "dept_id", "mng_id", "some_id", "numEmployees" };
final int numOccurrances = NUM_EMPLOYEES / NUM_DEPTS;
final String plan = getPlanInString("EXPLAIN PLAN FOR " + groupByMultipleQuery, JSON_FORMAT);
System.out.println("Plan: " + plan);
jsonExchangeOrderChecker(plan, false, 1, "hash32asdouble\\(.*, hash32asdouble\\(.*, hash32asdouble\\(.*\\) \\) \\) ");
// Run the query and verify the output
final TestBuilder testBuilder = testBuilder().sqlQuery(groupByMultipleQuery).unOrdered().baselineColumns(groupByMultipleQueryBaselineColumns);
for (int i = 0; i < NUM_DEPTS; i++) {
testBuilder.baselineValues(new Object[] { (long) i, (long) 0, (long) 0, (long) numOccurrances });
}
testBuilder.go();
}
use of org.apache.drill.test.TestBuilder in project drill by axbaretto.
the class TestCorruptParquetDateCorrection method testReadPartitionedOnCorrectDates.
/**
* Test reading a directory full of partitioned parquet files with dates, these files have a drill version
* number of "1.9.0-SNAPSHOT" and parquet-writer version number of "2" in their footers, so we can be certain
* they do not have corruption. The option to disable the correction is passed, but it will not change the result
* in the case where we are certain correction is NOT needed. For more info see DRILL-4203.
*/
@Test
public void testReadPartitionedOnCorrectDates() throws Exception {
try {
for (String selection : new String[] { "*", "date_col" }) {
// for sanity, try reading all partitions without a filter
TestBuilder builder = testBuilder().sqlQuery("select %s from table(dfs.`%s` (type => 'parquet', autoCorrectCorruptDates => false))", selection, CORRECT_PARTITIONED_DATES_1_9_PATH).unOrdered().baselineColumns("date_col");
addDateBaselineValues(builder);
builder.go();
String query = format("select %s from table(dfs.`%s` (type => 'parquet', autoCorrectCorruptDates => false))" + " where date_col = date '1970-01-01'", selection, CORRECT_PARTITIONED_DATES_1_9_PATH);
// verify that pruning is actually taking place
testPlanMatchingPatterns(query, new String[] { "numFiles=1" }, null);
// read with a filter on the partition column
testBuilder().sqlQuery(query).unOrdered().baselineColumns("date_col").baselineValues(new DateTime(1970, 1, 1, 0, 0)).go();
}
} finally {
test("alter session reset all");
}
}
use of org.apache.drill.test.TestBuilder in project drill by axbaretto.
the class TestCorruptParquetDateCorrection method testQueryWithCorruptedDates.
// according to SQL spec. '4.4.3.5 Datetime types' year should be less than 9999
@Test(expected = UserRemoteException.class)
public void testQueryWithCorruptedDates() throws Exception {
try {
TestBuilder builder = testBuilder().sqlQuery("select * from table(dfs.`%s` (type => 'parquet', autoCorrectCorruptDates => false))", CORRUPTED_PARTITIONED_DATES_1_2_PATH).unOrdered().baselineColumns("date_col");
addCorruptedDateBaselineValues(builder);
builder.go();
String query = "select * from table(dfs.`%s` (type => 'parquet', " + "autoCorrectCorruptDates => false)) where date_col = cast('15334-03-17' as date)";
test(query, CORRUPTED_PARTITIONED_DATES_1_2_PATH);
} catch (UserRemoteException e) {
Assert.assertTrue(e.getMessage().contains("Year out of range"));
throw e;
} finally {
test("alter session reset all");
}
}
Aggregations