use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.
the class TestSchemaCommands method testAlterRemoveForTable.
@Test
public void testAlterRemoveForTable() throws Exception {
String tableName = "table_for_alter_add";
String table = String.format("dfs.tmp.%s", tableName);
try {
run("create table %s as select 'a' as c from (values(1))", table);
File schemaPath = Paths.get(dirTestWatcher.getDfsTestTmpDir().getPath(), tableName, SchemaProvider.DEFAULT_SCHEMA_NAME).toFile();
run("create schema (col1 int, col2 varchar, col3 boolean, col4 int) for table %s " + "properties ('prop1' = 'a', 'prop2' = 'b', 'prop3' = 'c', 'prop4' = 'd')", table);
testBuilder().sqlQuery("alter schema for table %s remove " + "columns (col2, col4) " + "properties ('prop2', 'prop4')", table).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", table)).go();
SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaPath.getPath()));
assertTrue(schemaProvider.exists());
TupleMetadata schema = schemaProvider.read().getSchema();
assertEquals(2, schema.size());
assertEquals(2, schema.properties().size());
} finally {
run("drop table if exists %s", table);
}
}
use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.
the class TestSchemaCommands method testCreateUsingLoadEmptyFile.
@Test
public void testCreateUsingLoadEmptyFile() throws Exception {
File tmpDir = dirTestWatcher.getTmpDir();
File rawSchema = new File(tmpDir, "raw_empty.schema");
File schemaFile = new File(tmpDir, "schema_for_create_using_load_empty_file.schema");
try {
assertTrue(rawSchema.createNewFile());
testBuilder().sqlQuery("create schema load '%s' path '%s' properties ('k1'='v1', 'k2' = 'v2')", rawSchema.getPath(), schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
assertTrue(schemaFile.exists());
SchemaContainer schemaContainer = schemaProvider.read();
assertNull(schemaContainer.getTable());
TupleMetadata schema = schemaContainer.getSchema();
assertNotNull(schema);
assertEquals(0, schema.size());
assertEquals(2, schema.properties().size());
} finally {
FileUtils.deleteQuietly(rawSchema);
FileUtils.deleteQuietly(schemaFile);
}
}
use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.
the class TestSchemaCommands method testAlterAddColumns.
@Test
public void testAlterAddColumns() throws Exception {
File tmpDir = dirTestWatcher.getTmpDir();
File schemaFile = new File(tmpDir, "alter_schema_add_columns.schema");
assertFalse(schemaFile.exists());
try {
run("create schema (col1 int) path '%s' properties ('prop1' = 'a')", schemaFile.getPath());
testBuilder().sqlQuery("alter schema path '%s' add " + "columns (col2 varchar) ", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", schemaFile.getPath())).go();
SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
TupleMetadata schema = schemaProvider.read().getSchema();
assertEquals(2, schema.size());
assertEquals("col1", schema.fullName(0));
assertEquals("col2", schema.fullName(1));
assertEquals(1, schema.properties().size());
} finally {
FileUtils.deleteQuietly(schemaFile);
}
}
use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.
the class TestSchemaCommands method testAlterAddProperties.
@Test
public void testAlterAddProperties() throws Exception {
File tmpDir = dirTestWatcher.getTmpDir();
File schemaFile = new File(tmpDir, "alter_schema_add_properties.schema");
assertFalse(schemaFile.exists());
try {
run("create schema (col1 int) path '%s' properties ('prop1' = 'a')", schemaFile.getPath());
testBuilder().sqlQuery("alter schema path '%s' add " + "properties ('prop2' = 'b', 'prop3' = 'c')", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", schemaFile.getPath())).go();
SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
TupleMetadata schema = schemaProvider.read().getSchema();
assertEquals(1, schema.size());
Map<String, String> expectedProperties = new HashMap<>();
expectedProperties.put("prop1", "a");
expectedProperties.put("prop2", "b");
expectedProperties.put("prop3", "c");
assertEquals(expectedProperties, schema.properties());
} finally {
FileUtils.deleteQuietly(schemaFile);
}
}
use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.
the class IcebergFormatPlugin method getGroupScan.
@Override
public AbstractGroupScan getGroupScan(String userName, FileSelection selection, List<SchemaPath> columns, MetadataProviderManager metadataProviderManager) throws IOException {
SchemaProvider schemaProvider = metadataProviderManager.getSchemaProvider();
TupleMetadata schema = schemaProvider != null ? schemaProvider.read().getSchema() : null;
return IcebergGroupScan.builder().userName(userName).formatPlugin(this).schema(schema).path(getPath(selection)).columns(columns).maxRecords(-1).build();
}
Aggregations