Search in sources :

Example 16 with SourceTable

use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.

the class TestTablesCreateDropAlterTruncate method tablesInOtherCatalogs.

@Test
public void tablesInOtherCatalogs() throws TException, URISyntaxException {
    String catName = "create_etc_tables_in_other_catalogs";
    Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
    client.createCatalog(cat);
    String dbName = "db_in_other_catalog";
    // For this one don't specify a location to make sure it gets put in the catalog directory
    Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
    Table table = new TableBuilder().inDb(db).setTableName("mvSource").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
    client.createTable(table);
    SourceTable sourceTable = createSourceTable(table);
    String[] tableNames = new String[4];
    for (int i = 0; i < tableNames.length; i++) {
        tableNames[i] = "table_in_other_catalog_" + i;
        TableBuilder builder = new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
        // Make one have a non-standard location
        if (i == 0) {
            builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
        }
        // Make one partitioned
        if (i == 2) {
            builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
        }
        // Make one a materialized view
        if (i == 3) {
            builder.setType(TableType.MATERIALIZED_VIEW.name()).setRewriteEnabled(true).addMaterializedViewReferencedTable(sourceTable);
        }
        client.createTable(builder.build(metaStore.getConf()));
    }
    // Add partitions for the partitioned table
    String[] partVals = new String[3];
    Table partitionedTable = client.getTable(catName, dbName, tableNames[2]);
    for (int i = 0; i < partVals.length; i++) {
        partVals[i] = "part" + i;
        new PartitionBuilder().inTable(partitionedTable).addValue(partVals[i]).addToTable(client, metaStore.getConf());
    }
    // Get tables, make sure the locations are correct
    for (int i = 0; i < tableNames.length; i++) {
        Table t = client.getTable(catName, dbName, tableNames[i]);
        Assert.assertEquals(catName, t.getCatName());
        String expectedLocation = (i < 1) ? new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() : new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", tableNames[i]).toURI().toString();
        Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
        File dir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(dir.exists() && dir.isDirectory());
    }
    // Make sure getting table in the wrong catalog does not work
    try {
        Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
    // test getAllTables
    Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName));
    Assert.assertEquals(tableNames.length + 1, fetchedNames.size());
    for (String tableName : tableNames) {
        Assert.assertTrue(fetchedNames.contains(tableName));
    }
    fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
    for (String tableName : tableNames) {
        Assert.assertFalse(fetchedNames.contains(tableName));
    }
    // test getMaterializedViewsForRewriting
    List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName);
    Assert.assertEquals(1, materializedViews.size());
    Assert.assertEquals(tableNames[3], materializedViews.get(0));
    fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
    Assert.assertFalse(fetchedNames.contains(tableNames[3]));
    // test getTableObjectsByName
    List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(2, fetchedTables.size());
    Collections.sort(fetchedTables);
    Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
    Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
    fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(0, fetchedTables.size());
    // Test altering the table
    Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
    t.getParameters().put("test", "test");
    client.alter_table(catName, dbName, tableNames[0], t);
    t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
    Assert.assertEquals("test", t.getParameters().get("test"));
    // Alter a table in the wrong catalog
    try {
        client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
        Assert.fail();
    } catch (InvalidOperationException e) {
    // NOP
    }
    // Update the metadata for the materialized view
    CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
    Table table1 = new TableBuilder().inDb(db).setTableName("mvSource2").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
    client.createTable(table1);
    sourceTable = createSourceTable(table1);
    cm.addToTablesUsed(TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName()));
    cm.addToSourceTables(sourceTable);
    cm.unsetMaterializationTime();
    client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
    List<String> partNames = new ArrayList<>();
    for (String partVal : partVals) {
        partNames.add("pcol1=" + partVal);
    }
    // Truncate a table
    client.truncateTable(catName, dbName, tableNames[0], partNames);
    // Truncate a table in the wrong catalog
    try {
        client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames);
        Assert.fail();
    } catch (NoSuchObjectException | TApplicationException e) {
    // NOP
    }
    // Drop a table from the wrong catalog
    try {
        client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false);
        Assert.fail();
    } catch (NoSuchObjectException | TApplicationException e) {
    // NOP
    }
    // Should ignore the failure
    client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true);
    // Have to do this in reverse order so that we drop the materialized view first.
    for (int i = tableNames.length - 1; i >= 0; i--) {
        t = client.getTable(catName, dbName, tableNames[i]);
        File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
            client.dropTable(catName, dbName, tableNames[i], false, false);
            Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        } else {
            client.dropTable(catName, dbName, tableNames[i]);
            Assert.assertFalse(tableDir.exists());
        }
    }
    client.dropTable(table.getCatName(), table.getDbName(), table.getTableName());
    client.dropTable(table1.getCatName(), table1.getDbName(), table1.getTableName());
    Assert.assertEquals(0, client.getAllTables(catName, dbName).size());
}
Also used : SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) Table(org.apache.hadoop.hive.metastore.api.Table) TestHiveMetaStore.createSourceTable(org.apache.hadoop.hive.metastore.TestHiveMetaStore.createSourceTable) ArrayList(java.util.ArrayList) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) TestHiveMetaStore.createSourceTable(org.apache.hadoop.hive.metastore.TestHiveMetaStore.createSourceTable) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) URI(java.net.URI) Catalog(org.apache.hadoop.hive.metastore.api.Catalog) TApplicationException(org.apache.thrift.TApplicationException) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) CreationMetadata(org.apache.hadoop.hive.metastore.api.CreationMetadata) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) CatalogBuilder(org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test) MetastoreCheckinTest(org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)

Example 17 with SourceTable

use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.

the class ObjectStore method convertToCreationMetadata.

private CreationMetadata convertToCreationMetadata(MCreationMetadata s) throws MetaException {
    if (s == null) {
        return null;
    }
    Set<String> tablesUsed = new HashSet<>();
    List<SourceTable> sourceTables = new ArrayList<>(s.getTables().size());
    for (MMVSource mtbl : s.getTables()) {
        tablesUsed.add(Warehouse.getQualifiedName(mtbl.getTable().getDatabase().getName(), mtbl.getTable().getTableName()));
        sourceTables.add(convertToSourceTable(mtbl, s.getCatalogName()));
    }
    CreationMetadata r = new CreationMetadata(s.getCatalogName(), s.getDbName(), s.getTblName(), tablesUsed);
    r.setMaterializationTime(s.getMaterializationTime());
    if (s.getTxnList() != null) {
        r.setValidTxnList(s.getTxnList());
    }
    r.setSourceTables(sourceTables);
    return r;
}
Also used : MCreationMetadata(org.apache.hadoop.hive.metastore.model.MCreationMetadata) CreationMetadata(org.apache.hadoop.hive.metastore.api.CreationMetadata) ArrayList(java.util.ArrayList) SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable) MMVSource(org.apache.hadoop.hive.metastore.model.MMVSource) HashSet(java.util.HashSet)

Example 18 with SourceTable

use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.

the class TestHiveMetaStore method createSourceTable.

public static SourceTable createSourceTable(Table table) {
    SourceTable sourceTable = new SourceTable();
    sourceTable.setTable(table);
    sourceTable.setInsertedCount(0L);
    sourceTable.setUpdatedCount(0L);
    sourceTable.setDeletedCount(0L);
    return sourceTable;
}
Also used : SourceTable(org.apache.hadoop.hive.metastore.api.SourceTable)

Aggregations

SourceTable (org.apache.hadoop.hive.metastore.api.SourceTable)18 Table (org.apache.hadoop.hive.metastore.api.Table)8 HashSet (java.util.HashSet)7 ArrayList (java.util.ArrayList)6 TableName (org.apache.hadoop.hive.common.TableName)4 CreationMetadata (org.apache.hadoop.hive.metastore.api.CreationMetadata)4 Table (org.apache.hadoop.hive.ql.metadata.Table)4 HashMap (java.util.HashMap)3 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)3 IOException (java.io.IOException)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 Collectors (java.util.stream.Collectors)2 ValidTxnWriteIdList (org.apache.hadoop.hive.common.ValidTxnWriteIdList)2 TableType (org.apache.hadoop.hive.metastore.TableType)2 Warehouse (org.apache.hadoop.hive.metastore.Warehouse)2 Materialization (org.apache.hadoop.hive.metastore.api.Materialization)2 MTable (org.apache.hadoop.hive.metastore.model.MTable)2 MVersionTable (org.apache.hadoop.hive.metastore.model.MVersionTable)2