use of io.trino.plugin.deltalake.util.TestingHadoop in project trino by trinodb.
the class TestDeltaLakeAdlsConnectorSmokeTest method testDropSchemaExternalFiles.
@Test
public void testDropSchemaExternalFiles() {
// TODO move this test to base class, so it's exercised for S3 too
String schemaName = "externalFileSchema";
String schemaDir = fullAdlsUrl() + "drop-schema-with-external-files/";
String subDir = schemaDir + "subdir/";
String externalFile = subDir + "external-file";
TestingHadoop hadoopContainer = dockerizedDataLake.getTestingHadoop();
// Create file in a subdirectory of the schema directory before creating schema
hadoopContainer.runCommandInContainer("hdfs", "dfs", "-mkdir", "-p", subDir);
hadoopContainer.runCommandInContainer("hdfs", "dfs", "-touchz", externalFile);
query(format("CREATE SCHEMA %s WITH (location = '%s')", schemaName, schemaDir));
assertThat(hadoopContainer.executeInContainer("hdfs", "dfs", "-test", "-e", externalFile).getExitCode()).as("external file exists after creating schema").isEqualTo(0);
query("DROP SCHEMA " + schemaName);
assertThat(hadoopContainer.executeInContainer("hdfs", "dfs", "-test", "-e", externalFile).getExitCode()).as("external file exists after dropping schema").isEqualTo(0);
// Test behavior without external file
hadoopContainer.runCommandInContainer("hdfs", "dfs", "-rm", "-r", subDir);
query(format("CREATE SCHEMA %s WITH (location = '%s')", schemaName, schemaDir));
assertThat(hadoopContainer.executeInContainer("hdfs", "dfs", "-test", "-d", schemaDir).getExitCode()).as("schema directory exists after creating schema").isEqualTo(0);
query("DROP SCHEMA " + schemaName);
assertThat(hadoopContainer.executeInContainer("hdfs", "dfs", "-test", "-e", externalFile).getExitCode()).as("schema directory deleted after dropping schema without external file").isEqualTo(1);
}
Aggregations