Search in sources :

Example 1 with SchemaContainer

use of org.apache.drill.exec.record.metadata.schema.SchemaContainer in project drill by apache.

the class TestSchemaCommands method testCreateWithoutColumns.

@Test
public void testCreateWithoutColumns() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "schema_for_create_without_columns.schema");
    assertFalse(schemaFile.exists());
    try {
        testBuilder().sqlQuery("create schema () " + "path '%s' " + "properties ('prop' = 'val')", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNull(schemaContainer.getTable());
        TupleMetadata schema = schemaContainer.getSchema();
        assertNotNull(schema);
        assertTrue(schema.isEmpty());
        assertEquals("val", schema.property("prop"));
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 2 with SchemaContainer

use of org.apache.drill.exec.record.metadata.schema.SchemaContainer in project drill by apache.

the class TestSchemaCommands method testCreateWithSchemaProperties.

@Test
public void testCreateWithSchemaProperties() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "schema_for_create_with_properties.schema");
    assertFalse(schemaFile.exists());
    try {
        testBuilder().sqlQuery("create schema (i int not null) path '%s' " + "properties ('k1' = 'v1', 'k2' = 'v2', 'k3' = 'v3')", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNull(schemaContainer.getTable());
        TupleMetadata schema = schemaContainer.getSchema();
        assertNotNull(schema);
        Map<String, String> properties = new LinkedHashMap<>();
        properties.put("k1", "v1");
        properties.put("k2", "v2");
        properties.put("k3", "v3");
        assertEquals(properties.size(), schema.properties().size());
        assertEquals(properties, schema.properties());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) LinkedHashMap(java.util.LinkedHashMap) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 3 with SchemaContainer

use of org.apache.drill.exec.record.metadata.schema.SchemaContainer in project drill by apache.

the class TestSchemaCommands method testCreateWithVariousColumnProperties.

@Test
public void testCreateWithVariousColumnProperties() throws Exception {
    File tmpDir = dirTestWatcher.getTmpDir();
    File schemaFile = new File(tmpDir, "schema_for_create_with_various_column_properties.schema");
    assertFalse(schemaFile.exists());
    try {
        testBuilder().sqlQuery("create schema ( " + "a int not null default '10', " + "b date format 'yyyy-MM-dd' default '2017-01-31', " + "c varchar properties {'k1' = 'v1', 'k2' = 'v2'}) " + "path '%s'", schemaFile.getPath()).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", schemaFile.getPath())).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNull(schemaContainer.getTable());
        TupleMetadata schema = schemaContainer.getSchema();
        assertNotNull(schema);
        assertEquals(3, schema.size());
        ColumnMetadata a = schema.metadata("a");
        assertTrue(a.decodeDefaultValue() instanceof Integer);
        assertEquals(10, a.decodeDefaultValue());
        assertEquals("10", a.defaultValue());
        ColumnMetadata b = schema.metadata("b");
        assertTrue(b.decodeDefaultValue() instanceof LocalDate);
        assertEquals("yyyy-MM-dd", b.format());
        assertEquals(LocalDate.parse("2017-01-31"), b.decodeDefaultValue());
        assertEquals("2017-01-31", b.defaultValue());
        ColumnMetadata c = schema.metadata("c");
        Map<String, String> properties = new LinkedHashMap<>();
        properties.put("k1", "v1");
        properties.put("k2", "v2");
        assertEquals(properties, c.properties());
        assertEquals(0, schema.properties().size());
    } finally {
        FileUtils.deleteQuietly(schemaFile);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) LocalDate(java.time.LocalDate) LinkedHashMap(java.util.LinkedHashMap) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 4 with SchemaContainer

use of org.apache.drill.exec.record.metadata.schema.SchemaContainer in project drill by apache.

the class TestSchemaCommands method testDescribeDefault.

@Test
public void testDescribeDefault() throws Exception {
    String tableName = "table_describe_default";
    String table = String.format("dfs.tmp.%s", tableName);
    try {
        run("create table %s as select 'a' as c from (values(1))", table);
        run("create schema (col int) for table %s", table);
        File schemaFile = Paths.get(dirTestWatcher.getDfsTestTmpDir().getPath(), tableName, SchemaProvider.DEFAULT_SCHEMA_NAME).toFile();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaFile.getPath()));
        SchemaContainer schemaContainer = schemaProvider.read();
        String schema = PathSchemaProvider.WRITER.writeValueAsString(schemaContainer);
        testBuilder().sqlQuery("describe schema for table %s", table).unOrdered().baselineColumns("schema").baselineValues(schema).go();
        testBuilder().sqlQuery("describe schema for table %s", table).unOrdered().sqlBaselineQuery("desc schema for table %s", table).go();
    } finally {
        run("drop table if exists %s", table);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Example 5 with SchemaContainer

use of org.apache.drill.exec.record.metadata.schema.SchemaContainer in project drill by apache.

the class TestSchemaCommands method testSuccessfulCreateOrReplaceForTable.

@Test
public void testSuccessfulCreateOrReplaceForTable() throws Exception {
    String tableName = "table_for_successful_create_or_replace_for_table";
    String table = String.format("dfs.tmp.%s", tableName);
    try {
        run("create table %s as select 'a' as c from (values(1))", table);
        File schemaPath = Paths.get(dirTestWatcher.getDfsTestTmpDir().getPath(), tableName, SchemaProvider.DEFAULT_SCHEMA_NAME).toFile();
        assertFalse(schemaPath.exists());
        testBuilder().sqlQuery("create schema (c varchar not null) for table %s", table).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", table)).go();
        SchemaProvider schemaProvider = new PathSchemaProvider(new Path(schemaPath.getPath()));
        assertTrue(schemaProvider.exists());
        SchemaContainer schemaContainer = schemaProvider.read();
        assertNotNull(schemaContainer.getTable());
        assertEquals(String.format("dfs.tmp.`%s`", tableName), schemaContainer.getTable());
        assertNotNull(schemaContainer.getSchema());
        ColumnMetadata column = schemaContainer.getSchema().metadata("c");
        assertFalse(column.isNullable());
        assertEquals(TypeProtos.MinorType.VARCHAR, column.type());
        testBuilder().sqlQuery("create or replace schema (c varchar) for table %s", table).unOrdered().baselineColumns("ok", "summary").baselineValues(true, String.format("Created schema for [%s]", table)).go();
        assertTrue(schemaProvider.exists());
        SchemaContainer updatedSchemaContainer = schemaProvider.read();
        assertNotNull(updatedSchemaContainer.getTable());
        assertEquals(String.format("dfs.tmp.`%s`", tableName), updatedSchemaContainer.getTable());
        assertNotNull(updatedSchemaContainer.getSchema());
        ColumnMetadata updatedColumn = updatedSchemaContainer.getSchema().metadata("c");
        assertTrue(updatedColumn.isNullable());
        assertEquals(TypeProtos.MinorType.VARCHAR, updatedColumn.type());
    } finally {
        run("drop table if exists %s", table);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaContainer(org.apache.drill.exec.record.metadata.schema.SchemaContainer) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) SchemaProvider(org.apache.drill.exec.record.metadata.schema.SchemaProvider) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) File(java.io.File) PathSchemaProvider(org.apache.drill.exec.record.metadata.schema.PathSchemaProvider) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest) SqlTest(org.apache.drill.categories.SqlTest)

Aggregations

File (java.io.File)10 SqlTest (org.apache.drill.categories.SqlTest)10 UnlikelyTest (org.apache.drill.categories.UnlikelyTest)10 PathSchemaProvider (org.apache.drill.exec.record.metadata.schema.PathSchemaProvider)10 SchemaContainer (org.apache.drill.exec.record.metadata.schema.SchemaContainer)10 SchemaProvider (org.apache.drill.exec.record.metadata.schema.SchemaProvider)10 ClusterTest (org.apache.drill.test.ClusterTest)10 Path (org.apache.hadoop.fs.Path)10 Test (org.junit.Test)10 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)7 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)3 LinkedHashMap (java.util.LinkedHashMap)2 LocalDate (java.time.LocalDate)1