Search in sources :

Example 81 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestMetaStoreMultipleEncryptionZones method truncateTableWithSameEncryptionZones.

@Test
public void truncateTableWithSameEncryptionZones() throws Throwable {
    String dbName = "encrdb9";
    String tblName1 = "encrtbl1";
    String tblName2 = "encrtbl2";
    String typeName = "Person";
    client.dropTable(dbName, tblName1);
    client.dropTable(dbName, tblName2);
    silentDropDatabase(dbName);
    new DatabaseBuilder().setName(dbName).addParam("repl.source.for", "1, 2, 3").create(client, hiveConf);
    client.dropType(typeName);
    Type typ1 = new Type();
    typ1.setName(typeName);
    typ1.setFields(new ArrayList<>(2));
    typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
    typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
    client.createType(typ1);
    new TableBuilder().setDbName(dbName).setTableName(tblName1).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, hiveConf);
    Table tbl = client.getTable(dbName, tblName1);
    Assert.assertNotNull(tbl);
    new TableBuilder().setDbName(dbName).setTableName(tblName2).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, hiveConf);
    Path dirDb = new Path(warehouse.getWhRoot(), dbName + ".db");
    warehouseFs.delete(dirDb, true);
    warehouseFs.mkdirs(dirDb);
    EncryptionZoneUtils.createEncryptionZone(dirDb, "test_key_db", conf);
    Path dirTbl1 = new Path(dirDb, tblName1);
    warehouseFs.mkdirs(dirTbl1);
    Path part11 = new Path(dirTbl1, "part1");
    createFile(part11, "testClearer11");
    Path dirTbl2 = new Path(dirDb, tblName2);
    warehouseFs.mkdirs(dirTbl2);
    Path part12 = new Path(dirTbl2, "part1");
    createFile(part12, "testClearer12");
    boolean exceptionThrown = false;
    try {
        client.truncateTable(dbName, tblName1, null);
    } catch (MetaException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
    }
    assertFalse(exceptionThrown);
    assertFalse(warehouseFs.exists(part11));
    try {
        client.getTable(dbName, tblName1);
    } catch (NoSuchObjectException e) {
        exceptionThrown = true;
    }
    assertFalse(exceptionThrown);
    try {
        client.truncateTable(dbName, tblName2, null);
    } catch (MetaException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
    }
    assertFalse(exceptionThrown);
    assertFalse(warehouseFs.exists(part12));
    try {
        client.getTable(dbName, tblName2);
    } catch (NoSuchObjectException e) {
        exceptionThrown = true;
    }
    assertFalse(exceptionThrown);
}
Also used : Path(org.apache.hadoop.fs.Path) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Type(org.apache.hadoop.hive.metastore.api.Type) RecycleType(org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test)

Example 82 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestTenantBasedStorageHierarchy method createTableWithCapabilities.

private Table createTableWithCapabilities(Map<String, Object> props) throws Exception {
    String catalog = (String) props.getOrDefault("CATALOG", MetaStoreUtils.getDefaultCatalog(conf));
    String dbName = (String) props.getOrDefault("DBNAME", "simpdb");
    String tblName = (String) props.getOrDefault("TBLNAME", "test_table");
    TableType type = (TableType) props.getOrDefault("TBLTYPE", TableType.MANAGED_TABLE);
    int buckets = ((Integer) props.getOrDefault("BUCKETS", -1)).intValue();
    String properties = (String) props.getOrDefault("PROPERTIES", "");
    String location = (String) (props.get("LOCATION"));
    boolean dropDb = ((Boolean) props.getOrDefault("DROPDB", Boolean.TRUE)).booleanValue();
    int partitionCount = ((Integer) props.getOrDefault("PARTITIONS", 0)).intValue();
    final String typeName = "Person";
    if (type == TableType.EXTERNAL_TABLE) {
        if (!properties.contains("EXTERNAL=TRUE")) {
            properties.concat(";EXTERNAL=TRUE;");
        }
    }
    Map<String, String> table_params = new HashMap();
    if (properties.length() > 0) {
        String[] propArray = properties.split(";");
        for (String prop : propArray) {
            String[] keyValue = prop.split("=");
            table_params.put(keyValue[0], keyValue[1]);
        }
    }
    Catalog cat = null;
    try {
        cat = client.getCatalog(catalog);
    } catch (NoSuchObjectException e) {
        LOG.debug("Catalog does not exist, creating a new one");
        try {
            if (cat == null) {
                cat = new Catalog();
                cat.setName(catalog.toLowerCase());
                Warehouse wh = new Warehouse(conf);
                cat.setLocationUri(wh.getWhRootExternal().toString() + File.separator + catalog);
                cat.setDescription("Non-hive catalog");
                client.createCatalog(cat);
                LOG.debug("Catalog " + catalog + " created");
            }
        } catch (Exception ce) {
            LOG.warn("Catalog " + catalog + " could not be created");
        }
    } catch (Exception e) {
        LOG.error("Creation of a new catalog failed, aborting test");
        throw e;
    }
    try {
        client.dropTable(dbName, tblName);
    } catch (Exception e) {
        LOG.info("Drop table failed for " + dbName + "." + tblName);
    }
    try {
        if (dropDb)
            silentDropDatabase(dbName);
    } catch (Exception e) {
        LOG.info("Drop database failed for " + dbName);
    }
    if (dropDb)
        new DatabaseBuilder().setName(dbName).setCatalogName(catalog).create(client, conf);
    try {
        client.dropType(typeName);
    } catch (Exception e) {
        LOG.info("Drop type failed for " + typeName);
    }
    Type typ1 = new Type();
    typ1.setName(typeName);
    typ1.setFields(new ArrayList<>(2));
    typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
    typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
    client.createType(typ1);
    TableBuilder builder = new TableBuilder().setCatName(catalog).setDbName(dbName).setTableName(tblName).setCols(typ1.getFields()).setType(type.name()).setLocation(location).setNumBuckets(buckets).setTableParams(table_params).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc");
    if (location != null)
        builder.setLocation(location);
    if (buckets > 0)
        builder.setNumBuckets(buckets).addBucketCol("name");
    if (partitionCount > 0) {
        builder.addPartCol("partcol", "string");
    }
    if (type == TableType.MANAGED_TABLE) {
        if (properties.contains("transactional=true") && !properties.contains("transactional_properties=insert_only")) {
            builder.setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
            builder.setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
            builder.setSerdeLib("org.apache.hadoop.hive.ql.io.orc.OrcSerde");
            builder.addStorageDescriptorParam("inputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcInputFormat");
            builder.addStorageDescriptorParam("outputFormat", "org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat");
        }
    }
    Table tbl = builder.create(client, conf);
    LOG.info("Table " + tbl.getTableName() + " created:type=" + tbl.getTableType());
    if (partitionCount > 0) {
        List<Partition> partitions = new ArrayList<>();
        List<List<String>> partValues = new ArrayList<>();
        for (int i = 1; i <= partitionCount; i++) {
            partValues.add(Lists.newArrayList("" + i));
        }
        for (List<String> vals : partValues) {
            addPartition(client, tbl, vals);
        }
    }
    if (isThriftClient) {
        // the createTable() above does not update the location in the 'tbl'
        // object when the client is a thrift client and the code below relies
        // on the location being present in the 'tbl' object - so get the table
        // from the metastore
        tbl = client.getTable(catalog, dbName, tblName);
        LOG.info("Fetched Table " + tbl.getTableName() + " created:type=" + tbl.getTableType());
    }
    return tbl;
}
Also used : HashMap(java.util.HashMap) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) ArrayList(java.util.ArrayList) List(java.util.List) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) Catalog(org.apache.hadoop.hive.metastore.api.Catalog) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) TException(org.apache.thrift.TException) IOException(java.io.IOException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Type(org.apache.hadoop.hive.metastore.api.Type) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException)

Example 83 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestAcidTableSetup method testTransactionalValidation.

@Test
public void testTransactionalValidation() throws Throwable {
    String dbName = "acidDb";
    silentDropDatabase(dbName);
    Database db = new Database();
    db.setName(dbName);
    client.createDatabase(db);
    String tblName = "acidTable";
    Map<String, String> fields = new HashMap<>();
    fields.put("name", ColumnType.STRING_TYPE_NAME);
    fields.put("income", ColumnType.INT_TYPE_NAME);
    Type type = createType("Person1", fields);
    Map<String, String> params = new HashMap<>();
    params.put("transactional", "");
    // Fail - No "transactional" property is specified
    try {
        Table t = new TableBuilder().setDbName(dbName).setTableName(tblName).setTableParams(params).setCols(type.getFields()).build(conf);
        client.createTable(t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", e.getMessage());
    }
    // Fail - "transactional" property is set to an invalid value
    try {
        params.clear();
        params.put("transactional", "foobar");
        Table t = new TableBuilder().setDbName(dbName).setTableName(tblName).setTableParams(params).setCols(type.getFields()).build(conf);
        client.createTable(t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("'transactional' property of TBLPROPERTIES may only have value 'true': acidDb.acidTable", e.getMessage());
    }
    // Fail - "transactional" is set to true, but the table is not bucketed
    try {
        params.clear();
        params.put("transactional", "true");
        Table t = new TableBuilder().setDbName(dbName).setTableName(tblName).setTableParams(params).setCols(type.getFields()).build(conf);
        client.createTable(t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", e.getMessage());
    }
    List<String> bucketCols = new ArrayList<>();
    bucketCols.add("income");
    // Fail - "transactional" is set to true, and the table is bucketed, but doesn't use ORC
    try {
        params.clear();
        params.put("transactional", "true");
        Table t = new TableBuilder().setDbName(dbName).setTableName(tblName).setTableParams(params).setCols(type.getFields()).setBucketCols(bucketCols).build(conf);
        client.createTable(t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable", e.getMessage());
    }
    // Succeed - "transactional" is set to true, and the table is bucketed, and uses ORC
    params.clear();
    params.put("transactional", "true");
    Table t = new TableBuilder().setDbName(dbName).setTableName(tblName).setTableParams(params).setCols(type.getFields()).setBucketCols(bucketCols).setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").build(conf);
    client.createTable(t);
    assertTrue("CREATE TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
    // Fail - trying to set "transactional" to "false" is not allowed
    try {
        params.clear();
        params.put("transactional", "false");
        t = new Table();
        t.setParameters(params);
        t.setDbName(dbName);
        t.setTableName(tblName);
        client.alter_table(dbName, tblName, t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset: acidDb.acidTable", e.getMessage());
    }
    // Fail - trying to set "transactional" to "true" but doesn't satisfy bucketing and Input/OutputFormat requirement
    try {
        tblName += "1";
        params.clear();
        t = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(type.getFields()).setInputFormat("org.apache.hadoop.mapred.FileInputFormat").build(conf);
        client.createTable(t);
        params.put("transactional", "true");
        t.setParameters(params);
        client.alter_table(dbName, tblName, t);
        fail("Expected exception");
    } catch (MetaException e) {
        assertEquals("The table must be stored using an ACID compliant format (such as ORC): acidDb.acidTable1", e.getMessage());
    }
    // Succeed - trying to set "transactional" to "true", and satisfies bucketing and Input/OutputFormat requirement
    tblName += "2";
    params.clear();
    t = new TableBuilder().setDbName(dbName).setTableName(tblName).setCols(type.getFields()).setNumBuckets(1).setBucketCols(bucketCols).setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat").setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat").build(conf);
    client.createTable(t);
    params.put("transactional", "true");
    t.setParameters(params);
    client.alter_table(dbName, tblName, t);
    assertTrue("ALTER TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
}
Also used : Type(org.apache.hadoop.hive.metastore.api.Type) Table(org.apache.hadoop.hive.metastore.api.Table) HashMap(java.util.HashMap) Database(org.apache.hadoop.hive.metastore.api.Database) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test)

Example 84 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestMetaStoreMultipleEncryptionZones method truncateTableWithDifferentEncryptionZonesDifferentKey.

@Test
public void truncateTableWithDifferentEncryptionZonesDifferentKey() throws Throwable {
    String dbName1 = "encrdb1";
    String dbName2 = "encrdb2";
    String tblName1 = "encrtbl1";
    String tblName2 = "encrtbl2";
    String typeName = "Person";
    silentDropDatabase(dbName1);
    silentDropDatabase(dbName2);
    new DatabaseBuilder().setName(dbName1).addParam("repl.source.for", "1, 2, 3").create(client, hiveConf);
    new DatabaseBuilder().setName(dbName2).addParam("repl.source.for", "1, 2, 3").create(client, hiveConf);
    client.dropType(typeName);
    Type typ1 = new Type();
    typ1.setName(typeName);
    typ1.setFields(new ArrayList<>(2));
    typ1.getFields().add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
    typ1.getFields().add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
    client.createType(typ1);
    Path dirDb1 = new Path(warehouse.getWhRoot(), dbName1 + ".db");
    warehouseFs.mkdirs(dirDb1);
    EncryptionZoneUtils.createEncryptionZone(dirDb1, "test_key_db", conf);
    Path dirTbl1 = new Path(dirDb1, tblName1);
    warehouseFs.mkdirs(dirTbl1);
    Path part11 = new Path(dirTbl1, "part1");
    createFile(part11, "testClearer11");
    Path dirDb2 = new Path(warehouse.getWhRoot(), dbName2 + ".db");
    warehouseFs.mkdirs(dirDb2);
    EncryptionZoneUtils.createEncryptionZone(dirDb2, "test_key_db", conf);
    Path dirTbl2 = new Path(dirDb2, tblName2);
    warehouseFs.mkdirs(dirTbl2);
    Path part12 = new Path(dirTbl2, "part1");
    createFile(part12, "testClearer12");
    new TableBuilder().setDbName(dbName1).setTableName(tblName1).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, hiveConf);
    Table tbl = client.getTable(dbName1, tblName1);
    Assert.assertNotNull(tbl);
    new TableBuilder().setDbName(dbName2).setTableName(tblName2).setCols(typ1.getFields()).setNumBuckets(1).addBucketCol("name").addStorageDescriptorParam("test_param_1", "Use this for comments etc").create(client, hiveConf);
    boolean exceptionThrown = false;
    try {
        client.truncateTable(dbName1, tblName1, null);
    } catch (MetaException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
    }
    assertFalse(exceptionThrown);
    assertFalse(warehouseFs.exists(part11));
    assertNotNull(client.getTable(dbName1, tblName1));
    exceptionThrown = false;
    try {
        client.truncateTable(dbName2, tblName2, null);
    } catch (MetaException e) {
        exceptionThrown = true;
        assertTrue(e.getMessage().contains("can't be moved from encryption zone"));
    }
    assertFalse(exceptionThrown);
    assertFalse(warehouseFs.exists(part12));
    assertNotNull(client.getTable(dbName2, tblName2));
}
Also used : Path(org.apache.hadoop.fs.Path) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Type(org.apache.hadoop.hive.metastore.api.Type) RecycleType(org.apache.hadoop.hive.metastore.ReplChangeManager.RecycleType) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) Test(org.junit.Test)

Example 85 with TableBuilder

use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.

the class TestAppendPartitions method otherCatalog.

@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void otherCatalog() throws TException {
    String catName = "append_partition_catalog";
    Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
    client.createCatalog(cat);
    String dbName = "append_partition_database_in_other_catalog";
    Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
    String tableName = "table_in_other_catalog";
    new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").create(client, metaStore.getConf());
    Partition created = client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1"));
    Assert.assertEquals(1, created.getValuesSize());
    Assert.assertEquals("a1", created.getValues().get(0));
    Partition fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
    created.setWriteId(fetched.getWriteId());
    Assert.assertEquals(created, fetched);
    created = client.appendPartition(catName, dbName, tableName, "partcol=a2");
    Assert.assertEquals(1, created.getValuesSize());
    Assert.assertEquals("a2", created.getValues().get(0));
    fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
    created.setWriteId(fetched.getWriteId());
    Assert.assertEquals(created, fetched);
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Partition(org.apache.hadoop.hive.metastore.api.Partition) CatalogBuilder(org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) Catalog(org.apache.hadoop.hive.metastore.api.Catalog) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Aggregations

TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)136 Table (org.apache.hadoop.hive.metastore.api.Table)111 Test (org.junit.Test)92 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)81 Database (org.apache.hadoop.hive.metastore.api.Database)40 Partition (org.apache.hadoop.hive.metastore.api.Partition)36 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)35 PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)33 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)31 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)30 ArrayList (java.util.ArrayList)28 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)27 SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)25 CatalogBuilder (org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder)23 Path (org.apache.hadoop.fs.Path)19 Catalog (org.apache.hadoop.hive.metastore.api.Catalog)19 Type (org.apache.hadoop.hive.metastore.api.Type)19 InvalidOperationException (org.apache.hadoop.hive.metastore.api.InvalidOperationException)17 TException (org.apache.thrift.TException)16 IOException (java.io.IOException)15