use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestFilterHooks method setUp.
@BeforeClass
public static void setUp() throws Exception {
DummyMetaStoreFilterHookImpl.blockResults = false;
conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
MetastoreConf.setClass(conf, ConfVars.FILTER_HOOK, DummyMetaStoreFilterHookImpl.class, MetaStoreFilterHook.class);
MetaStoreTestUtils.setConfForStandloneMode(conf);
MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
msc = new HiveMetaStoreClient(conf);
msc.dropDatabase(DBNAME1, true, true, true);
msc.dropDatabase(DBNAME2, true, true, true);
Database db1 = new DatabaseBuilder().setName(DBNAME1).build();
msc.createDatabase(db1);
Database db2 = new DatabaseBuilder().setName(DBNAME2).build();
msc.createDatabase(db2);
Table tab1 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB1).addCol("id", "int").addCol("name", "string").build();
msc.createTable(tab1);
Table tab2 = new TableBuilder().setDbName(DBNAME1).setTableName(TAB2).addCol("id", "int").addPartCol("name", "string").build();
msc.createTable(tab2);
Partition part1 = new PartitionBuilder().fromTable(tab2).addValue("value1").build();
msc.add_partition(part1);
Partition part2 = new PartitionBuilder().fromTable(tab2).addValue("value2").build();
msc.add_partition(part2);
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientAlterPartitionsTempTable method testAlterPartitionsCheckRollbackUnknownPartition.
@Test
public void testAlterPartitionsCheckRollbackUnknownPartition() throws Exception {
createTable4PartColsParts(getClient());
Table table = getClient().getTable(DB_NAME, TABLE_NAME);
Partition newPart1 = new PartitionBuilder().inTable(table).addValue("1111").addValue("1111").addValue("11").build(conf);
List<Partition> oldPartitions = getClient().listPartitions(DB_NAME, TABLE_NAME, (short) -1);
Partition newPart2 = new Partition(oldPartitions.get(0));
makeTestChangesOnPartition(newPart2);
assertPartitionRollback(oldPartitions, Lists.newArrayList(newPart2, newPart1));
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientAlterPartitionsTempTable method addPartition.
@Override
protected void addPartition(IMetaStoreClient client, Table table, List<String> values) throws TException {
PartitionBuilder builder = new PartitionBuilder().inTable(table);
values.forEach(builder::addValue);
Partition partition = builder.build(conf);
getClient().add_partition(partition);
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientDropPartitionsTempTable method createPartition.
@Override
protected Partition createPartition(String tableName, String location, List<String> values, List<FieldSchema> partCols, Map<String, String> partParams) throws Exception {
new PartitionBuilder().setDbName(DB_NAME).setTableName(tableName).setValues(values).setCols(partCols).setLocation(location).setPartParams(partParams).addToTable(getClient(), conf);
Partition partition = getClient().getPartition(DB_NAME, tableName, values);
return partition;
}
use of org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder in project hive by apache.
the class TestSessionHiveMetastoreClientDropPartitionsTempTable method createPartition.
@Override
protected Partition createPartition(List<String> values, List<FieldSchema> partCols) throws Exception {
new PartitionBuilder().setDbName(DB_NAME).setTableName(TABLE_NAME).setValues(values).setCols(partCols).addToTable(getClient(), conf);
org.apache.hadoop.hive.metastore.api.Partition partition = getClient().getPartition(DB_NAME, TABLE_NAME, values);
return partition;
}
Aggregations