Search in sources :

Example 11 with SchemaProvider

use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.

the class TestSchemaCommands method testAlterAddForTable.

@Test
public void testAlterAddForTable() throws Exception {
    String tableName = "table_for_alter_add";
    String table = String.format("dfs.tmp.%s", tableName);
    try {
        run("create table %s as select 'a' as c from (values(1))", table);
        File schemaPath = Paths.get(dirTestWatcher.getDfsTestTmpDir().getPath(), tableName, SchemaProvider.DEFAULT_SCHEMA_NAME).toFile();
        run("create schema (col int) for table %s properties ('prop1' = 'a')", table);
        testBuilder().sqlQuery("alter schema for table %s add or replace " + "columns (col2 varchar, col3 boolean) " + "properties ('prop2' = 'd', 'prop3' = 'c')", table).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", table)).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaPath.getPath()));
        assertTrue(schemaProvider.exists());
        TupleMetadata schema = schemaProvider.read().getSchema();
        assertEquals(3, schema.size());
        assertEquals(3, schema.properties().size());
    } finally {
        run("drop table if exists %s", table);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 12 with SchemaProvider

use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.

the class TestSchemaCommands method testSuccessfulCreateForPath.

@Test
public void testSuccessfulCreateForPath() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "schema_for_successful_create_for_path.schema");
    assertFalse(schemaFile.exists());
    try {
        testBuilder().sqlQuery("create schema (i int not null, v varchar) path '%s'", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNull(schemaContainer.getTable());
        assertNotNull(schemaContainer.getSchema());
        TupleMetadata schema = schemaContainer.getSchema();
        ColumnMetadata intColumn = schema.metadata("i");
        assertFalse(intColumn.isNullable());
        assertEquals(TypeProtos.MinorType.INT, intColumn.type());
        ColumnMetadata varcharColumn = schema.metadata("v");
        assertTrue(varcharColumn.isNullable());
        assertEquals(TypeProtos.MinorType.VARCHAR, varcharColumn.type());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 13 with SchemaProvider

use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.

the class TestSchemaCommands method testCreateWithoutSchemaProperties.

@Test
public void testCreateWithoutSchemaProperties() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "schema_for_create_without_properties.schema");
    assertFalse(schemaFile.exists());
    try {
        testBuilder().sqlQuery("create schema (i int not null) path '%s'", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNull(schemaContainer.getTable());
        TupleMetadata schema = schemaContainer.getSchema();
        assertNotNull(schema);
        assertNotNull(schema.properties());
        assertEquals(0, schema.properties().size());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 14 with SchemaProvider

use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.

the class TestSchemaCommands method testAlterRemoveSuccess.

@Test
public void testAlterRemoveSuccess() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "alter_schema_remove_success.schema");
    assertFalse(schemaFile.exists());
    try {
        run("create schema (col1 int, col2 varchar, col3 boolean, col4 int) path '%s' " + "properties ('prop1' = 'a', 'prop2' = 'b', 'prop3' = 'c', 'prop4' = 'd')", schemaFile.getPath());
        testBuilder().sqlQuery("alter schema path '%s' remove " + "columns (col2, col4) " + "properties ('prop2', 'prop4')", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        TupleMetadata schema = schemaProvider.read().getSchema();
        assertEquals(2, schema.size());
        assertEquals("col1", schema.fullName(0));
        assertEquals("col3", schema.fullName(1));
        Map<String, String> expectedProperties = new HashMap<>();
        expectedProperties.put("prop1", "a");
        expectedProperties.put("prop3", "c");
        assertEquals(expectedProperties, schema.properties());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 15 with SchemaProvider

use of org.apache.drill.exec.record.metadata.schema.SchemaProvider in project drill by apache.

the class TestSchemaCommands method testAlterRemoveColumns.

@Test
public void testAlterRemoveColumns() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "alter_schema_remove_columns.schema");
    assertFalse(schemaFile.exists());
    try {
        run("create schema (col1 int, col2 varchar, col3 boolean, col4 int) path '%s' " + "properties ('prop1' = 'a', 'prop2' = 'b', 'prop3' = 'c', 'prop4' = 'd')", schemaFile.getPath());
        testBuilder().sqlQuery("alter schema path '%s' remove " + "columns (col2, col4) ", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Schema for [%s] was updated", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        TupleMetadata schema = schemaProvider.read().getSchema();
        assertEquals(2, schema.size());
        assertEquals("col1", schema.fullName(0));
        assertEquals("col3", schema.fullName(1));
        assertEquals(4, schema.properties().size());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Aggregations

SchemaProvider (org.apache.drill.exec.record.metadata.schema.SchemaProvider)20 File (java.io.File)19 SqlTest (org.apache.drill.categories.SqlTest)19 UnlikelyTest (org.apache.drill.categories.UnlikelyTest)19 PathSchemaProvider (org.apache.drill.exec.record.metadata.schema.PathSchemaProvider)19 ClusterTest (org.apache.drill.test.ClusterTest)19 Path (org.apache.hadoop.fs.Path)19 Test (org.junit.Test)19 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)17 SchemaContainer (org.apache.drill.exec.record.metadata.schema.SchemaContainer)10 LinkedHashMap (java.util.LinkedHashMap)7 HashMap (java.util.HashMap)5 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)3 LocalDate (java.time.LocalDate)1