use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestMutations method testTransactionBatchEmptyCommitPartitioned.
@Test
public void testTransactionBatchEmptyCommitPartitioned() throws Exception {
Table table = partitionedTableBuilder.addPartition(ASIA_INDIA).create(metaStoreClient);
MutatorClient client = new MutatorClientBuilder().addSinkTable(table.getDbName(), table.getTableName(), true).metaStoreUri(metaStoreUri).build();
client.connect();
Transaction transaction = client.newTransaction();
transaction.begin();
transaction.commit();
assertThat(transaction.getState(), is(COMMITTED));
client.close();
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestMutations method testTransactionBatchEmptyAbortPartitioned.
@Test
public void testTransactionBatchEmptyAbortPartitioned() throws Exception {
Table table = partitionedTableBuilder.addPartition(ASIA_INDIA).create(metaStoreClient);
MutatorClient client = new MutatorClientBuilder().addSinkTable(table.getDbName(), table.getTableName(), true).metaStoreUri(metaStoreUri).build();
client.connect();
Transaction transaction = client.newTransaction();
List<AcidTable> destinations = client.getTables();
transaction.begin();
MutatorFactory mutatorFactory = new ReflectiveMutatorFactory(conf, MutableRecord.class, RECORD_ID_COLUMN, BUCKET_COLUMN_INDEXES);
MutatorCoordinator coordinator = new MutatorCoordinatorBuilder().metaStoreUri(metaStoreUri).table(destinations.get(0)).mutatorFactory(mutatorFactory).build();
coordinator.close();
transaction.abort();
assertThat(transaction.getState(), is(ABORTED));
client.close();
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestAcidTableSerializer method testSerializeDeserialize.
@Test
public void testSerializeDeserialize() throws Exception {
Database database = StreamingTestUtils.databaseBuilder(new File("/tmp")).name("db_1").build();
Table table = StreamingTestUtils.tableBuilder(database).name("table_1").addColumn("one", "string").addColumn("two", "integer").partitionKeys("partition").addPartition("p1").buckets(10).build();
AcidTable acidTable = new AcidTable("db_1", "table_1", true, TableType.SINK);
acidTable.setTable(table);
acidTable.setTransactionId(42L);
String encoded = AcidTableSerializer.encode(acidTable);
System.out.println(encoded);
AcidTable decoded = AcidTableSerializer.decode(encoded);
assertThat(decoded.getDatabaseName(), is("db_1"));
assertThat(decoded.getTableName(), is("table_1"));
assertThat(decoded.createPartitions(), is(true));
assertThat(decoded.getOutputFormatName(), is("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat"));
assertThat(decoded.getTotalBuckets(), is(10));
assertThat(decoded.getQualifiedName(), is("DB_1.TABLE_1"));
assertThat(decoded.getTransactionId(), is(42L));
assertThat(decoded.getTableType(), is(TableType.SINK));
assertThat(decoded.getTable(), is(table));
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class HCatClientHMSImpl method dropPartitions.
@Override
public void dropPartitions(String dbName, String tableName, Map<String, String> partitionSpec, boolean ifExists, boolean deleteData) throws HCatException {
LOG.info("HCatClient dropPartitions(db=" + dbName + ",table=" + tableName + ", partitionSpec: [" + partitionSpec + "]).");
try {
dbName = checkDB(dbName);
Table table = hmsClient.getTable(dbName, tableName);
if (hiveConfig.getBoolVar(HiveConf.ConfVars.METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS)) {
try {
dropPartitionsUsingExpressions(table, partitionSpec, ifExists, deleteData);
} catch (SemanticException parseFailure) {
LOG.warn("Could not push down partition-specification to back-end, for dropPartitions(). Resorting to iteration.", parseFailure);
dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
}
} else {
// Not using expressions.
dropPartitionsIteratively(dbName, tableName, partitionSpec, ifExists, deleteData);
}
} catch (NoSuchObjectException e) {
throw new ObjectNotFoundException("NoSuchObjectException while dropping partition. " + "Either db(" + dbName + ") or table(" + tableName + ") missing.", e);
} catch (MetaException e) {
throw new HCatException("MetaException while dropping partition.", e);
} catch (TException e) {
throw new ConnectionFailureException("TException while dropping partition.", e);
}
}
use of org.apache.hadoop.hive.metastore.api.Table in project hive by apache.
the class TestHeartbeatTimerTask method createTable.
private static List<Table> createTable() {
Table table = new Table();
table.setDbName("DB");
table.setTableName("TABLE");
return Arrays.asList(table);
}
Aggregations