Search in sources :

Example 56 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class NonCatCallsWithCatalog method alterPartitions.

@Test
public void alterPartitions() throws TException {
    String dbName = "alter_partition_database_in_other_catalog";
    Database db = new DatabaseBuilder().setName(dbName).build(conf);
    db.unsetCatalogName();
    client.createDatabase(db);
    String tableName = "table_in_other_catalog";
    Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").build(conf);
    table.unsetCatName();
    client.createTable(table);
    Partition[] parts = new Partition[5];
    for (int i = 0; i < 5; i++) {
        parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)).build(conf);
        parts[i].unsetCatName();
    }
    client.add_partitions(Arrays.asList(parts));
    Partition newPart = client.getPartition(dbName, tableName, Collections.singletonList("a0"));
    newPart.getParameters().put("test_key", "test_value");
    client.alter_partition(dbName, tableName, newPart);
    Partition fetched = client.getPartition(dbName, tableName, Collections.singletonList("a0"));
    Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
    newPart = client.getPartition(dbName, tableName, Collections.singletonList("a1"));
    newPart.setLastAccessTime(3);
    Partition newPart1 = client.getPartition(dbName, tableName, Collections.singletonList("a2"));
    newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere"));
    client.alter_partitions(dbName, tableName, Arrays.asList(newPart, newPart1));
    fetched = client.getPartition(dbName, tableName, Collections.singletonList("a1"));
    Assert.assertEquals(3L, fetched.getLastAccessTime());
    fetched = client.getPartition(dbName, tableName, Collections.singletonList("a2"));
    Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
    newPart = client.getPartition(dbName, tableName, Collections.singletonList("a4"));
    newPart.getParameters().put("test_key", "test_value");
    EnvironmentContext ec = new EnvironmentContext();
    ec.setProperties(Collections.singletonMap("a", "b"));
    client.alter_partition(dbName, tableName, newPart, ec);
    fetched = client.getPartition(dbName, tableName, Collections.singletonList("a4"));
    Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
    client.dropDatabase(dbName, true, true, true);
}
Also used : EnvironmentContext(org.apache.hadoop.hive.metastore.api.EnvironmentContext) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) Test(org.junit.Test)

Example 57 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class NonCatCallsWithCatalog method addPartitions.

@Test
public void addPartitions() throws TException {
    String dbName = "add_partition_database_in_other_catalog";
    Database db = new DatabaseBuilder().setName(dbName).build(conf);
    db.unsetCatalogName();
    client.createDatabase(db);
    String tableName = "table_in_other_catalog";
    Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").build(conf);
    table.unsetCatName();
    client.createTable(table);
    Partition[] parts = new Partition[5];
    for (int i = 0; i < parts.length; i++) {
        parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(conf);
        parts[i].unsetCatName();
    }
    client.add_partition(parts[0]);
    Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2])));
    client.add_partitions(Arrays.asList(parts[3], parts[4]), true, false);
    for (int i = 0; i < parts.length; i++) {
        Partition fetched = client.getPartition(dbName, tableName, Collections.singletonList("a" + i));
        Assert.assertEquals(dbName, fetched.getDbName());
        Assert.assertEquals(tableName, fetched.getTableName());
        Assert.assertEquals(expectedCatalog(), fetched.getCatName());
    }
    client.dropDatabase(dbName, true, true, true);
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) Test(org.junit.Test)

Example 58 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class NonCatCallsWithCatalog method tablesCreateDropAlterTruncate.

@Test
public void tablesCreateDropAlterTruncate() throws TException, URISyntaxException {
    String dbName = "db_in_other_catalog";
    // For this one don't specify a location to make sure it gets put in the catalog directory
    Database db = new DatabaseBuilder().setName(dbName).build(conf);
    db.unsetCatalogName();
    client.createDatabase(db);
    String[] tableNames = new String[4];
    for (int i = 0; i < tableNames.length; i++) {
        tableNames[i] = "table_in_other_catalog_" + i;
        TableBuilder builder = new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
        // Make one have a non-standard location
        if (i == 0) {
            builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
        }
        // Make one partitioned
        if (i == 2) {
            builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
        }
        // Make one a materialized view
        /*
      // TODO HIVE-18991
      if (i == 3) {
        builder.setType(TableType.MATERIALIZED_VIEW.name())
            .setRewriteEnabled(true)
            .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]);
      }
      */
        Table t = builder.build(conf);
        t.unsetCatName();
        client.createTable(t);
    }
    // Add partitions for the partitioned table
    String[] partVals = new String[3];
    Table partitionedTable = client.getTable(dbName, tableNames[2]);
    for (int i = 0; i < partVals.length; i++) {
        partVals[i] = "part" + i;
        Partition p = new PartitionBuilder().inTable(partitionedTable).addValue(partVals[i]).build(conf);
        p.unsetCatName();
        client.add_partition(p);
    }
    // Get tables, make sure the locations are correct
    for (int i = 0; i < tableNames.length; i++) {
        Table t = client.getTable(dbName, tableNames[i]);
        Assert.assertEquals(expectedCatalog(), t.getCatName());
        String expectedLocation = (i < 1) ? new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() : new File(expectedBaseDir() + File.separatorChar + dbName + ".db", tableNames[i]).toURI().toString();
        Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
        File dir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(dir.exists() && dir.isDirectory());
    }
    // Make sure getting table in the wrong catalog does not work
    try {
        Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
    // test getAllTables
    Set<String> fetchedNames = new HashSet<>(client.getAllTables(dbName));
    Assert.assertEquals(tableNames.length, fetchedNames.size());
    for (String tableName : tableNames) {
        Assert.assertTrue(fetchedNames.contains(tableName));
    }
    fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
    for (String tableName : tableNames) {
        Assert.assertFalse(fetchedNames.contains(tableName));
    }
    // test getMaterializedViewsForRewriting
    /* TODO HIVE-18991
    List<String> materializedViews = client.getMaterializedViewsForRewriting(dbName);
    Assert.assertEquals(1, materializedViews.size());
    Assert.assertEquals(tableNames[3], materializedViews.get(0));
    */
    fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
    Assert.assertFalse(fetchedNames.contains(tableNames[3]));
    // test getTableObjectsByName
    List<Table> fetchedTables = client.getTableObjectsByName(dbName, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(2, fetchedTables.size());
    Collections.sort(fetchedTables);
    Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
    Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
    fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, Arrays.asList(tableNames[0], tableNames[1]));
    Assert.assertEquals(0, fetchedTables.size());
    // Test altering the table
    Table t = client.getTable(dbName, tableNames[0]).deepCopy();
    t.getParameters().put("test", "test");
    client.alter_table(dbName, tableNames[0], t);
    t = client.getTable(dbName, tableNames[0]).deepCopy();
    Assert.assertEquals("test", t.getParameters().get("test"));
    // Alter a table in the wrong catalog
    try {
        client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
        Assert.fail();
    } catch (InvalidOperationException e) {
    // NOP
    }
    // Update the metadata for the materialized view
    /* TODO HIVE-18991
    CreationMetadata cm = client.getTable(dbName, tableNames[3]).getCreationMetadata();
    cm.addToTablesUsed(dbName + "." + tableNames[1]);
    client.updateCreationMetadata(dbName, tableNames[3], cm);
    */
    List<String> partNames = new ArrayList<>();
    for (String partVal : partVals) {
        partNames.add("pcol1=" + partVal);
    }
    // Truncate a table
    client.truncateTable(dbName, tableNames[0], partNames);
    // Have to do this in reverse order so that we drop the materialized view first.
    for (int i = tableNames.length - 1; i >= 0; i--) {
        t = client.getTable(dbName, tableNames[i]);
        File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
        Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
            client.dropTable(dbName, tableNames[i], false, false);
            Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
        } else {
            client.dropTable(dbName, tableNames[i]);
            Assert.assertFalse(tableDir.exists());
        }
    }
    Assert.assertEquals(0, client.getAllTables(dbName).size());
}
Also used : Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) ArrayList(java.util.ArrayList) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) URI(java.net.URI) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 59 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class NonCatCallsWithCatalog method dropPartitions.

@Test
public void dropPartitions() throws TException {
    String dbName = "drop_partition_database_in_other_catalog";
    Database db = new DatabaseBuilder().setName(dbName).build(conf);
    db.unsetCatalogName();
    client.createDatabase(db);
    String tableName = "table_in_other_catalog";
    Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").build(conf);
    table.unsetCatName();
    client.createTable(table);
    Partition[] parts = new Partition[2];
    for (int i = 0; i < parts.length; i++) {
        parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(conf);
        parts[i].unsetCatName();
    }
    client.add_partitions(Arrays.asList(parts));
    List<Partition> fetched = client.listPartitions(dbName, tableName, (short) -1);
    Assert.assertEquals(parts.length, fetched.size());
    Assert.assertTrue(client.dropPartition(dbName, tableName, Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false)));
    try {
        client.getPartition(dbName, tableName, Collections.singletonList("a0"));
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
    Assert.assertTrue(client.dropPartition(dbName, tableName, "partcol=a1", true));
    try {
        client.getPartition(dbName, tableName, Collections.singletonList("a1"));
        Assert.fail();
    } catch (NoSuchObjectException e) {
    // NOP
    }
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Partition(org.apache.hadoop.hive.metastore.api.Partition) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) Test(org.junit.Test)

Example 60 with PartitionBuilder

use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.

the class TestFilterHooks method creatEnv.

/**
 * This is called in each test after the configuration is set in each test case
 * @throws Exception
 */
protected void creatEnv(Configuration conf) throws Exception {
    client = createClient(conf);
    client.dropDatabase(DBNAME1, true, true, true);
    client.dropDatabase(DBNAME2, true, true, true);
    Database db1 = new DatabaseBuilder().setName(DBNAME1).setCatalogName(Warehouse.DEFAULT_CATALOG_NAME).create(client, conf);
    Database db2 = new DatabaseBuilder().setName(DBNAME2).setCatalogName(Warehouse.DEFAULT_CATALOG_NAME).create(client, conf);
    new TableBuilder().setDbName(DBNAME1).setTableName(TAB1).addCol("id", "int").addCol("name", "string").create(client, conf);
    Table tab2 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB2).addCol("id", "int").addPartCol("name", "string").create(client, conf);
    new PartitionBuilder().inTable(tab2).addValue("value1").addToTable(client, conf);
    new PartitionBuilder().inTable(tab2).addValue("value2").addToTable(client, conf);
    TestTxnDbUtil.cleanDb(conf);
    TestTxnDbUtil.prepDb(conf);
    client.compact2(DBNAME1, TAB1, null, CompactionType.MAJOR, new HashMap<>());
    client.compact2(DBNAME1, TAB2, "name=value1", CompactionType.MINOR, new HashMap<>());
}
Also used : DatabaseBuilder(org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder) Table(org.apache.hadoop.hive.metastore.api.Table) PartitionBuilder(org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder) Database(org.apache.hadoop.hive.metastore.api.Database) TableBuilder(org.apache.hadoop.hive.metastore.client.builder.TableBuilder)

Aggregations

PartitionBuilder (org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder)75 Partition (org.apache.hadoop.hive.metastore.api.Partition)63 Test (org.junit.Test)47 Table (org.apache.hadoop.hive.metastore.api.Table)44 TableBuilder (org.apache.hadoop.hive.metastore.client.builder.TableBuilder)33 MetastoreCheckinTest (org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest)28 DatabaseBuilder (org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder)27 Database (org.apache.hadoop.hive.metastore.api.Database)22 ArrayList (java.util.ArrayList)14 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)10 CatalogBuilder (org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder)10 EnvironmentContext (org.apache.hadoop.hive.metastore.api.EnvironmentContext)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)7 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)7 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)7 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)7 Catalog (org.apache.hadoop.hive.metastore.api.Catalog)6 HashMap (java.util.HashMap)5 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)5 HashSet (java.util.HashSet)4