use of org.apache.drill.test.TestBuilder in project drill by apache.
the class TestDynamicUDFSupport method testConcurrentRemoteRegistryUpdateWithDuplicates.
@Test
public void testConcurrentRemoteRegistryUpdateWithDuplicates() throws Exception {
RemoteFunctionRegistry remoteFunctionRegistry = spyRemoteFunctionRegistry();
final CountDownLatch latch1 = new CountDownLatch(1);
final CountDownLatch latch2 = new CountDownLatch(1);
final CountDownLatch latch3 = new CountDownLatch(1);
doAnswer(invocation -> {
latch3.countDown();
latch1.await();
invocation.callRealMethod();
latch2.countDown();
return null;
}).doAnswer(invocation -> {
latch1.countDown();
latch2.await();
invocation.callRealMethod();
return null;
}).when(remoteFunctionRegistry).updateRegistry(any(Registry.class), any(DataChangeVersion.class));
final String jar1 = defaultBinaryJar;
copyDefaultJarsToStagingArea();
final String copyJarName = "drill-custom-lower-copy";
final String jar2 = buildAndCopyJarsToStagingArea(copyJarName, "**/CustomLowerFunction.java", null);
final String query = "create function using jar '%s'";
Thread thread1 = new Thread(new TestBuilderRunner(testBuilder().sqlQuery(query, jar1).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("The following UDFs in jar %s have been registered:\n" + "[custom_lower(VARCHAR-REQUIRED)]", jar1))));
Thread thread2 = new Thread(new TestBuilderRunner(testBuilder().sqlQuery(query, jar2).unOrdered().baselineColumns("ok", "summary").baselineValues(false, String.format("Found duplicated function in %s: custom_lower(VARCHAR-REQUIRED)", jar1))));
thread1.start();
latch3.await();
thread2.start();
thread1.join();
thread2.join();
DataChangeVersion version = new DataChangeVersion();
Registry registry = remoteFunctionRegistry.getRegistry(version);
assertEquals("Remote registry version should match", 2, version.getVersion());
List<Jar> jarList = registry.getJarList();
assertEquals("Only one jar should be registered", 1, jarList.size());
assertEquals("Jar name should match", jar1, jarList.get(0).getName());
verify(remoteFunctionRegistry, times(2)).updateRegistry(any(Registry.class), any(DataChangeVersion.class));
}
use of org.apache.drill.test.TestBuilder in project drill by apache.
the class TestComplexTypeReader method test_array.
// DRILL-4410
@Test
public // ListVector allocation
void test_array() throws Exception {
long numRecords = 100000;
final String file1 = "arrays1.json";
final String file2 = "arrays2.json";
final Path path1 = dirTestWatcher.getRootDir().toPath().resolve(file1);
final Path path2 = dirTestWatcher.getRootDir().toPath().resolve(file2);
Files.createFile(path1);
Files.createFile(path2);
final String arrayString = "[ \"abcdef\", \"ghijkl\", \"mnopqr\", \"stuvwx\", \"yz1234\", \"567890\" ] ";
try (PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(path1.toFile(), true)))) {
for (long i = 0; i < numRecords; i++) {
out.println("{ \"id\" : " + i + ", \"array\" : " + arrayString + "}");
}
} catch (IOException e) {
throw e;
}
try (PrintWriter out = new PrintWriter(new BufferedWriter(new FileWriter(path2.toFile(), true)))) {
for (long i = 0; i < numRecords; i++) {
out.println("{ \"id\" : " + i + ", \"array\" : " + arrayString + "}");
}
} catch (IOException e) {
throw e;
}
TestBuilder testBuilder = testBuilder().sqlQuery("select * from dfs.`%s` `arrays1` INNER JOIN dfs.`%s` `arrays2` ON " + "(`arrays1`.id = `arrays2`.id)", file1, file2).unOrdered().baselineColumns("id", "id0", "array", "array0");
for (long i = 0; i < numRecords; i++) {
testBuilder.baselineValues(i, i, listOf("abcdef", "ghijkl", "mnopqr", "stuvwx", "yz1234", "567890"), listOf("abcdef", "ghijkl", "mnopqr", "stuvwx", "yz1234", "567890"));
}
testBuilder.go();
}
use of org.apache.drill.test.TestBuilder in project drill by apache.
the class TestCorruptParquetDateCorrection method testReadNewMetadataCacheFileOverOldAndNewFiles.
@Test
public void testReadNewMetadataCacheFileOverOldAndNewFiles() throws Exception {
File meta = dirTestWatcher.copyResourceToRoot(PARQUET_4203.resolve("mixed_version_partitioned_metadata.requires_replace.txt"), MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER.resolve(Metadata.OLD_METADATA_FILENAME));
dirTestWatcher.replaceMetaDataContents(meta, dirTestWatcher.getRootDir(), null);
// for sanity, try reading all partitions without a filter
TestBuilder builder = testBuilder().sqlQuery("select date_col from dfs.`%s`", MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER).unOrdered().baselineColumns("date_col");
addDateBaselineValues(builder);
addDateBaselineValues(builder);
addDateBaselineValues(builder);
builder.go();
String query = format("select date_col from dfs.`%s` where date_col = date '1970-01-01'", MIXED_CORRUPTED_AND_CORRECT_PARTITIONED_FOLDER);
// verify that pruning is actually taking place
testPlanMatchingPatterns(query, new String[] { "numFiles=3", "usedMetadataFile=true" }, null);
// read with a filter on the partition column
testBuilder().sqlQuery(query).unOrdered().baselineColumns("date_col").baselineValues(LocalDate.of(1970, 1, 1)).baselineValues(LocalDate.of(1970, 1, 1)).baselineValues(LocalDate.of(1970, 1, 1)).go();
}
use of org.apache.drill.test.TestBuilder in project drill by apache.
the class TestCorruptParquetDateCorrection method readFilesWithUserDisabledAutoCorrection.
private void readFilesWithUserDisabledAutoCorrection() throws Exception {
// for bad values) to set the flag that the values are corrupt
for (String selection : new String[] { "*", "date_col" }) {
TestBuilder builder = testBuilder().sqlQuery("select %s from table(dfs.`%s` (type => 'parquet', autoCorrectCorruptDates => false))", selection, MIXED_CORRUPTED_AND_CORRECT_DATES_PATH).unOrdered().baselineColumns("date_col");
addDateBaselineValues(builder);
addCorruptedDateBaselineValues(builder);
addCorruptedDateBaselineValues(builder);
addCorruptedDateBaselineValues(builder);
builder.go();
}
}
use of org.apache.drill.test.TestBuilder in project drill by apache.
the class TestCorruptParquetDateCorrection method testCorruptValueDetectionDuringPruning.
@Test
public void testCorruptValueDetectionDuringPruning() throws Exception {
try {
for (String selection : new String[] { "*", "date_col" }) {
for (Path table : new Path[] { CORRUPTED_PARTITIONED_DATES_1_2_PATH, CORRUPTED_PARTITIONED_DATES_1_4_0_PATH }) {
// for sanity, try reading all partitions without a filter
TestBuilder builder = testBuilder().sqlQuery("select %s from dfs.`%s`", selection, table).unOrdered().baselineColumns("date_col");
addDateBaselineValues(builder);
builder.go();
String query = format("select %s from dfs.`%s`" + " where date_col = date '1970-01-01'", selection, table);
// verify that pruning is actually taking place
testPlanMatchingPatterns(query, new String[] { "numFiles=1" }, null);
// read with a filter on the partition column
testBuilder().sqlQuery(query).unOrdered().baselineColumns("date_col").baselineValues(LocalDate.of(1970, 1, 1)).go();
}
}
} finally {
resetAllSessionOptions();
}
}
Aggregations