use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class TestExchangePartitions method testExchangePartitionsOneFail.
@Test
public void testExchangePartitionsOneFail() throws Exception {
Partition partition = buildPartition(destTable, Lists.newArrayList("2017", "march", "22"), null);
client.add_partition(partition);
Map<String, String> partitionSpecs = getPartitionSpec(Lists.newArrayList("2017", "", ""));
try {
client.exchange_partitions(partitionSpecs, DB_NAME, sourceTable.getTableName(), DB_NAME, destTable.getTableName());
Assert.fail("Exception should have been thrown as one of the partitions already exists in the dest table.");
} catch (MetaException e) {
// Expected exception
}
checkRemainingPartitions(sourceTable, destTable, Lists.newArrayList(partitions[0], partitions[2], partitions[3], partitions[4]));
List<Partition> partitionsInDestTable = client.listPartitions(destTable.getDbName(), destTable.getTableName(), MAX);
Assert.assertEquals(1, partitionsInDestTable.size());
Assert.assertEquals(partitions[1].getValues(), partitionsInDestTable.get(0).getValues());
Assert.assertTrue(metaStore.isPathExists(new Path(partitionsInDestTable.get(0).getSd().getLocation())));
Partition resultPart = client.getPartition(sourceTable.getDbName(), sourceTable.getTableName(), partitions[1].getValues());
Assert.assertNotNull(resultPart);
Assert.assertTrue(metaStore.isPathExists(new Path(partitions[1].getSd().getLocation())));
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class FileOutputFormatContainer method checkOutputSpecs.
@Override
public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException {
OutputJobInfo jobInfo = HCatOutputFormat.getJobInfo(context.getConfiguration());
IMetaStoreClient client = null;
try {
HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration());
client = HCatUtil.getHiveMetastoreClient(hiveConf);
handleDuplicatePublish(context, jobInfo, client, new Table(jobInfo.getTableInfo().getTable()));
} catch (MetaException e) {
throw new IOException(e);
} catch (TException e) {
throw new IOException(e);
} finally {
HCatUtil.closeHiveClientQuietly(client);
}
if (!jobInfo.isDynamicPartitioningUsed()) {
JobConf jobConf = new JobConf(context.getConfiguration());
getBaseOutputFormat().checkOutputSpecs(null, jobConf);
// checkoutputspecs might've set some properties we need to have context reflect that
HCatUtil.copyConf(jobConf, context.getConfiguration());
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class ObjectStore method grantRole.
@Override
public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor, PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException {
boolean success = false;
boolean commited = false;
try {
openTransaction();
MRoleMap roleMap = null;
try {
roleMap = this.getMSecurityUserRoleMap(userName, principalType, role.getRoleName());
} catch (Exception e) {
}
if (roleMap != null) {
throw new InvalidObjectException("Principal " + userName + " already has the role " + role.getRoleName());
}
if (principalType == PrincipalType.ROLE) {
validateRole(userName);
}
MRole mRole = getMRole(role.getRoleName());
long now = System.currentTimeMillis() / 1000;
MRoleMap roleMember = new MRoleMap(userName, principalType.toString(), mRole, (int) now, grantor, grantorType.toString(), grantOption);
pm.makePersistent(roleMember);
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class ObjectStore method generateConstraintName.
private String generateConstraintName(String... parameters) throws MetaException {
int hashcode = ArrayUtils.toString(parameters).hashCode() & 0xfffffff;
int counter = 0;
final int MAX_RETRIES = 10;
while (counter < MAX_RETRIES) {
String currName = (parameters.length == 0 ? "constraint_" : parameters[parameters.length - 1]) + "_" + hashcode + "_" + System.currentTimeMillis() + "_" + (counter++);
if (!constraintNameAlreadyExists(currName)) {
return currName;
}
}
throw new MetaException("Error while trying to generate the constraint name for " + ArrayUtils.toString(parameters));
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project hive by apache.
the class ObjectStore method addPartitions.
@Override
public boolean addPartitions(String dbName, String tblName, List<Partition> parts) throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List<MTablePrivilege> tabGrants = null;
List<MTableColumnPrivilege> tabColumnGrants = null;
MTable table = this.getMTable(dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
}
List<Object> toPersist = new ArrayList<>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table " + dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, true);
toPersist.add(mpart);
int now = (int) (System.currentTimeMillis() / 1000);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(), col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
if (CollectionUtils.isNotEmpty(toPersist)) {
pm.makePersistentAll(toPersist);
pm.flush();
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
Aggregations