use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientListPartitionsTempTable method createTestTable.
@Override
protected Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List<String> partCols, boolean setPartitionLevelPrivileges) throws TException {
TableBuilder builder = new TableBuilder().setDbName(dbName).setTableName(tableName).addCol("id", "int").addCol("name", "string").setTemporary(true);
partCols.forEach(col -> builder.addPartCol(col, "string"));
Table table = builder.build(conf);
if (setPartitionLevelPrivileges) {
table.putToParameters(PART_PRIV, "true");
}
client.createTable(table);
return table;
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientAppendPartitionTempTable method createTable.
@Override
protected Table createTable(String tableName, List<FieldSchema> partCols, Map<String, String> tableParams, String tableType, String location) throws Exception {
TableBuilder builder = new TableBuilder().setDbName(DB_NAME).setTableName(tableName).addCol("test_id", "int", "test col id").addCol("test_value", "string", "test col value").setPartCols(partCols).setType(tableType).setLocation(location).setTemporary(true);
if (tableParams != null) {
builder.setTableParams(tableParams);
}
builder.create(getClient(), conf);
return getClient().getTable(DB_NAME, tableName);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientExchangePartitionsTempTable method createNonTempTable.
private Table createNonTempTable(String dbName, String tableName, List<FieldSchema> partCols, String location) throws Exception {
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("test_id", INT_COL_TYPE, "test col id"));
cols.add(new FieldSchema("test_value", "string", "test col value"));
new TableBuilder().setDbName(dbName).setTableName(tableName).setCols(cols).setPartCols(partCols).setLocation(location).setTemporary(false).create(getClient(), getMetaStore().getConf());
return getClient().getTable(dbName, tableName);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestTablesCreateDropAlterTruncate method moveTablesBetweenCatalogsOnAlter.
@Test(expected = InvalidOperationException.class)
public void moveTablesBetweenCatalogsOnAlter() throws TException {
String catName = "move_table_between_catalogs_on_alter";
Catalog cat = new CatalogBuilder().setName(catName).setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)).build();
client.createCatalog(cat);
String dbName = "a_db";
// For this one don't specify a location to make sure it gets put in the catalog directory
Database db = new DatabaseBuilder().setName(dbName).setCatalogName(catName).create(client, metaStore.getConf());
String tableName = "non_movable_table";
Table before = new TableBuilder().inDb(db).setTableName(tableName).addCol("col1", ColumnType.STRING_TYPE_NAME).addCol("col2", ColumnType.INT_TYPE_NAME).create(client, metaStore.getConf());
Table after = before.deepCopy();
after.setCatName(DEFAULT_CATALOG_NAME);
client.alter_table(catName, dbName, tableName, after);
}
use of org.apache.hadoop.hive.metastore.client.builder.TableBuilder in project hive by apache.
the class TestTablesCreateDropAlterTruncate method setUp.
@Before
public void setUp() throws Exception {
// Get new client
client = metaStore.getClient();
// Clean up the database
client.dropDatabase(OTHER_DATABASE, true, true, true);
// Drop every table in the default database
for (String tableName : client.getAllTables(DEFAULT_DATABASE)) {
client.dropTable(DEFAULT_DATABASE, tableName, true, true, true);
}
// Clean up trash
metaStore.cleanWarehouseDirs();
testTables[0] = new TableBuilder().setTableName("test_table").addCol("test_col", "int").create(client, metaStore.getConf());
testTables[1] = new TableBuilder().setTableName("test_view").addCol("test_col", "int").setType("VIRTUAL_VIEW").create(client, metaStore.getConf());
testTables[2] = new TableBuilder().setTableName("test_table_to_find_1").addCol("test_col", "int").create(client, metaStore.getConf());
testTables[3] = new TableBuilder().setTableName("test_partitioned_table").addCol("test_col1", "int").addCol("test_col2", "int").addPartCol("test_part_col", "int").create(client, metaStore.getConf());
testTables[4] = new TableBuilder().setTableName("external_table_for_test").addCol("test_col", "int").setLocation(metaStore.getExternalWarehouseRoot() + "/external/table_dir").addTableParam("EXTERNAL", "TRUE").setType("EXTERNAL_TABLE").create(client, metaStore.getConf());
new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
testTables[5] = new TableBuilder().setDbName(OTHER_DATABASE).setTableName("test_table").addCol("test_col", "int").create(client, metaStore.getConf());
// Create partitions for the partitioned table
for (int i = 0; i < 2; i++) {
new PartitionBuilder().inTable(testTables[3]).addValue("a" + i).addToTable(client, metaStore.getConf());
}
// Add an external partition too
new PartitionBuilder().inTable(testTables[3]).addValue("a2").setLocation(metaStore.getWarehouseRoot() + "/external/a2").addToTable(client, metaStore.getConf());
// Add data files to the partitioned table
List<Partition> partitions = client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short) -1);
for (Partition partition : partitions) {
Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
metaStore.createFile(dataFile, "100");
}
// Reload tables from the MetaStore, and create data files
for (int i = 0; i < testTables.length; i++) {
testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName());
if (testTables[i].getPartitionKeys().isEmpty()) {
if (testTables[i].getSd().getLocation() != null) {
Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile");
metaStore.createFile(dataFile, "100");
}
}
}
partitionedTable = testTables[3];
externalTable = testTables[4];
}
Aggregations