use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class TestListPartitions method testListPartitionSpecsByFilterEmptyFilter.
@Test
public void testListPartitionSpecsByFilterEmptyFilter() throws Exception {
List<List<String>> values = createTable4PartColsParts(client).testValues;
PartitionSpecProxy pproxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "", -1);
assertPartitionsSpecProxy(pproxy, values);
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class HiveMetaStoreClient method alter_table.
@Override
public void alter_table(String catName, String dbName, String tbl_name, Table new_tbl, EnvironmentContext envContext, String validWriteIds) throws InvalidOperationException, MetaException, TException {
HiveMetaHook hook = getHook(new_tbl);
if (hook != null) {
hook.preAlterTable(new_tbl, envContext);
}
AlterTableRequest req = new AlterTableRequest(dbName, tbl_name, new_tbl);
req.setCatName(catName);
req.setValidWriteIdList(validWriteIds);
req.setEnvironmentContext(envContext);
if (processorCapabilities != null) {
req.setProcessorCapabilities(new ArrayList<String>(Arrays.asList(processorCapabilities)));
req.setProcessorIdentifier(processorIdentifier);
}
boolean success = false;
try {
client.alter_table_req(req);
if (hook != null) {
PartitionSpecProxy partitionSpecProxy = listPartitionSpecs(catName, dbName, tbl_name, Integer.MAX_VALUE);
hook.commitAlterTable(new_tbl, envContext, partitionSpecProxy);
}
success = true;
} finally {
if (!success && (hook != null)) {
hook.rollbackAlterTable(new_tbl, envContext);
}
}
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class ObjectStore method addPartitions.
@Override
public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List<MTablePrivilege> tabGrants = null;
List<MTableColumnPrivilege> tabColumnGrants = null;
MTable table = this.getMTable(catName, dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(catName, dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName);
}
if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table " + dbName + "." + tblName + ": " + partitionSpec);
}
PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
int now = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> partitionKeys = convertToFieldSchemas(table.getPartitionKeys());
while (iterator.hasNext()) {
Partition part = iterator.next();
if (isValidPartition(part, partitionKeys, ifNotExists)) {
MPartition mpart = convertToMPart(part, table, true);
pm.makePersistent(mpart);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption(), tab.getAuthorizer()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
pm.makePersistent(new MPartitionColumnPrivilege(col.getPrincipalName(), col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col.getGrantorType(), col.getGrantOption(), col.getAuthorizer()));
}
}
}
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class TestAddPartitionsFromPartSpec method testAddPartitionSpecWithSharedSDChangeRootPath.
@Test
public void testAddPartitionSpecWithSharedSDChangeRootPath() throws Exception {
Table table = createTable();
String rootPath = table.getSd().getLocation() + "/addPartSpecRootPath/";
String rootPath1 = table.getSd().getLocation() + "/someotherpath/";
PartitionWithoutSD partition = buildPartitionWithoutSD(Lists.newArrayList("2014"), 0);
PartitionSpecProxy partitionSpecProxy = buildPartitionSpecWithSharedSD(Lists.newArrayList(partition), buildSD(rootPath));
partitionSpecProxy.setRootLocation(rootPath1);
client.add_partitions_pspec(partitionSpecProxy);
Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2014"));
Assert.assertEquals(rootPath1 + "partwithoutsd0", resultPart.getSd().getLocation());
}
use of org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy in project hive by apache.
the class TestAddPartitionsFromPartSpec method testAddPartitionSpecPartAlreadyExists.
@Test
public void testAddPartitionSpecPartAlreadyExists() throws Exception {
createTable();
String tableLocation = metaStore.getWarehouseRoot() + "/" + TABLE_NAME;
Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2016", tableLocation + "/year=2016a");
client.add_partition(partition);
List<Partition> partitions = buildPartitions(DB_NAME, TABLE_NAME, Lists.newArrayList("2014", "2015", "2016", "2017", "2018"));
PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, partitions);
try {
client.add_partitions_pspec(partitionSpecProxy);
Assert.fail("AlreadyExistsException should have happened.");
} catch (AlreadyExistsException e) {
// Expected exception
}
List<Partition> parts = client.listPartitions(DB_NAME, TABLE_NAME, MAX);
Assert.assertNotNull(parts);
Assert.assertEquals(1, parts.size());
Assert.assertEquals(partition.getValues(), parts.get(0).getValues());
for (Partition part : partitions) {
Assert.assertFalse(metaStore.isPathExists(new Path(part.getSd().getLocation())));
}
}
Aggregations