use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestTablesCreateDropAlterTruncate method tablesInOtherCatalogs.
@Test
public void tablesInOtherCatalogs() throws TException, URISyntaxException {
String catName = "create_etc_tables_in_other_catalogs";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "db_in_other_catalog";
// For this one don't specify a location to make sure it gets put in the catalog directory
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
Table table = new TableBuilder().inDb(db).setTableName("mvSource").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
client.createTable(table);
SourceTable sourceTable = createSourceTable(table);
String[] tableNames = new String[4];
for (int i = 0; i < tableNames.length; i++) {
tableNames[i] = "table_in_other_catalog_" + i;
TableBuilder builder = new TableBuilder().inDb(db).setTableName(tableNames[i]).addCol("col1_" + i, ColumnType.STRING_TYPE_NAME).addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
// Make one have a non-standard location
if (i == 0) {
builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
}
// Make one partitioned
if (i == 2) {
builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
}
// Make one a materialized view
if (i == 3) {
builder.setType(TableType.MATERIALIZED_VIEW.name()).setRewriteEnabled(true).addMaterializedViewReferencedTable(sourceTable);
}
client.createTable(builder.build(metaStore.getConf()));
}
// Add partitions for the partitioned table
String[] partVals = new String[3];
Table partitionedTable = client.getTable(catName, dbName, tableNames[2]);
for (int i = 0; i < partVals.length; i++) {
partVals[i] = "part" + i;
new PartitionBuilder().inTable(partitionedTable).addValue(partVals[i]).addToTable(client, metaStore.getConf());
}
// Get tables, make sure the locations are correct
for (int i = 0; i < tableNames.length; i++) {
Table t = client.getTable(catName, dbName, tableNames[i]);
Assert.assertEquals(catName, t.getCatName());
String expectedLocation = (i < 1) ? new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() : new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", tableNames[i]).toURI().toString();
Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
File dir = new File(new URI(t.getSd().getLocation()).getPath());
Assert.assertTrue(dir.exists() && dir.isDirectory());
}
// Make sure getting table in the wrong catalog does not work
try {
Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
Assert.fail();
} catch (NoSuchObjectException e) {
// NOP
}
// test getAllTables
Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName));
Assert.assertEquals(tableNames.length + 1, fetchedNames.size());
for (String tableName : tableNames) {
Assert.assertTrue(fetchedNames.contains(tableName));
}
fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
for (String tableName : tableNames) {
Assert.assertFalse(fetchedNames.contains(tableName));
}
// test getMaterializedViewsForRewriting
List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName);
Assert.assertEquals(1, materializedViews.size());
Assert.assertEquals(tableNames[3], materializedViews.get(0));
fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
Assert.assertFalse(fetchedNames.contains(tableNames[3]));
// test getTableObjectsByName
List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName, Arrays.asList(tableNames[0], tableNames[1]));
Assert.assertEquals(2, fetchedTables.size());
Collections.sort(fetchedTables);
Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, Arrays.asList(tableNames[0], tableNames[1]));
Assert.assertEquals(0, fetchedTables.size());
// Test altering the table
Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
t.getParameters().put("test", "test");
client.alter_table(catName, dbName, tableNames[0], t);
t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
Assert.assertEquals("test", t.getParameters().get("test"));
// Alter a table in the wrong catalog
try {
client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
Assert.fail();
} catch (InvalidOperationException e) {
// NOP
}
// Update the metadata for the materialized view
CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
Table table1 = new TableBuilder().inDb(db).setTableName("mvSource2").addCol("col1_1", ColumnType.STRING_TYPE_NAME).addCol("col2_2", ColumnType.INT_TYPE_NAME).build(metaStore.getConf());
client.createTable(table1);
sourceTable = createSourceTable(table1);
cm.addToTablesUsed(TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName()));
cm.addToSourceTables(sourceTable);
cm.unsetMaterializationTime();
client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
List<String> partNames = new ArrayList<>();
for (String partVal : partVals) {
partNames.add("pcol1=" + partVal);
}
// Truncate a table
client.truncateTable(catName, dbName, tableNames[0], partNames);
// Truncate a table in the wrong catalog
try {
client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames);
Assert.fail();
} catch (NoSuchObjectException | TApplicationException e) {
// NOP
}
// Drop a table from the wrong catalog
try {
client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false);
Assert.fail();
} catch (NoSuchObjectException | TApplicationException e) {
// NOP
}
// Should ignore the failure
client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true);
// Have to do this in reverse order so that we drop the materialized view first.
for (int i = tableNames.length - 1; i >= 0; i--) {
t = client.getTable(catName, dbName, tableNames[i]);
File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
client.dropTable(catName, dbName, tableNames[i], false, false);
Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
} else {
client.dropTable(catName, dbName, tableNames[i]);
Assert.assertFalse(tableDir.exists());
}
}
client.dropTable(table.getCatName(), table.getDbName(), table.getTableName());
client.dropTable(table1.getCatName(), table1.getDbName(), table1.getTableName());
Assert.assertEquals(0, client.getAllTables(catName, dbName).size());
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestDropPartitions method createPartition.
protected Partition createPartition(String tableName, String location, List<String> values, List<FieldSchema> partCols, Map<String, String> partParams) throws Exception {
new PartitionBuilder().setDbName(DB_NAME).setTableName(tableName).setValues(values).setCols(partCols).setLocation(location).setPartParams(partParams).addToTable(client, metaStore.getConf());
Partition partition = client.getPartition(DB_NAME, tableName, values);
return partition;
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestGetPartitions method otherCatalog.
@Test
@ConditionalIgnoreOnSessionHiveMetastoreClient
public void otherCatalog() throws TException {
String catName = "get_partition_catalog";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "get_partition_database_in_other_catalog";
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "table_in_other_catalog";
Table table = new TableBuilder().inDb(db).setTableName(tableName).addCol("id", "int").addCol("name", "string").addPartCol("partcol", "string").addTableParam("PARTITION_LEVEL_PRIVILEGE", "true").create(client, metaStore.getConf());
Partition[] parts = new Partition[5];
for (int i = 0; i < parts.length; i++) {
parts[i] = new PartitionBuilder().inTable(table).addValue("a" + i).build(metaStore.getConf());
}
client.add_partitions(Arrays.asList(parts));
Partition fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
Assert.assertEquals(catName, fetched.getCatName());
Assert.assertEquals("a0", fetched.getValues().get(0));
fetched = client.getPartition(catName, dbName, tableName, "partcol=a0");
Assert.assertEquals(catName, fetched.getCatName());
Assert.assertEquals("a0", fetched.getValues().get(0));
List<Partition> fetchedParts = client.getPartitionsByNames(catName, dbName, tableName, Arrays.asList("partcol=a0", "partcol=a1"));
Assert.assertEquals(2, fetchedParts.size());
Set<String> vals = new HashSet<>(fetchedParts.size());
for (Partition part : fetchedParts) {
vals.add(part.getValues().get(0));
}
Assert.assertTrue(vals.contains("a0"));
Assert.assertTrue(vals.contains("a1"));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestGetPartitions method addPartition.
protected void addPartition(IMetaStoreClient client, Table table, List<String> values) throws TException {
PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
values.forEach(val -> partitionBuilder.addValue(val));
client.add_partition(partitionBuilder.build(metaStore.getConf()));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestListPartitions method addPartition.
protected void addPartition(IMetaStoreClient client, Table table, List<String> values) throws TException {
PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
values.forEach(val -> partitionBuilder.addValue(val));
client.add_partition(partitionBuilder.build(metaStore.getConf()));
}
Aggregations