use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project hive by apache.
the class ObjectStore method createResourcePlan.
@Override
public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName, int defaultPoolSize) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException {
boolean commited = false;
String rpName = normalizeIdentifier(resourcePlan.getName());
if (rpName.isEmpty()) {
throw new InvalidObjectException("Resource name cannot be empty.");
}
MWMResourcePlan rp = null;
if (copyFromName == null) {
Integer queryParallelism = null;
if (resourcePlan.isSetQueryParallelism()) {
queryParallelism = resourcePlan.getQueryParallelism();
if (queryParallelism <= 0) {
throw new InvalidObjectException("Query parallelism should be positive.");
}
}
rp = new MWMResourcePlan(rpName, queryParallelism, Status.DISABLED);
} else {
rp = new MWMResourcePlan(rpName, null, Status.DISABLED);
}
try {
openTransaction();
pm.makePersistent(rp);
if (copyFromName != null) {
MWMResourcePlan copyFrom = getMWMResourcePlan(copyFromName, false);
if (copyFrom == null) {
throw new NoSuchObjectException(copyFromName);
}
copyRpContents(rp, copyFrom);
} else {
// all the RawStore-s. Right now there's no method to create a pool.
if (defaultPoolSize > 0) {
MWMPool defaultPool = new MWMPool(rp, "default", 1.0, defaultPoolSize, null);
pm.makePersistent(defaultPool);
rp.setPools(Sets.newHashSet(defaultPool));
rp.setDefaultPool(defaultPool);
}
}
commited = commitTransaction();
} catch (InvalidOperationException e) {
throw new RuntimeException(e);
} catch (Exception e) {
checkForConstraintException(e, "Resource plan already exists: ");
throw e;
} finally {
if (!commited) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class CatalogThriftHiveMetastore method requestWrapper.
private <R> R requestWrapper(final String methodName, final Object[] args, final ThriftSupplier<R> supplier) throws TException {
final long start = registry.clock().wallTime();
registry.counter(registry.createId(Metrics.CounterThrift.getMetricName() + "." + methodName)).increment();
try {
log.info("+++ Thrift({}): Calling {}({})", catalogName, methodName, args);
return supplier.get();
} catch (MetacatAlreadyExistsException e) {
log.error(e.getMessage(), e);
throw new AlreadyExistsException(e.getMessage());
} catch (MetacatNotFoundException e) {
log.error(e.getMessage(), e);
throw new NoSuchObjectException(e.getMessage());
} catch (MetacatPreconditionFailedException e) {
log.error(e.getMessage(), e);
throw new InvalidObjectException(e.getMessage());
} catch (TException e) {
log.error(e.getMessage(), e);
throw e;
} catch (Exception e) {
registry.counter(registry.createId(Metrics.CounterThrift.getMetricName() + "." + methodName).withTags(Metrics.tagStatusFailureMap)).increment();
final String message = String.format("%s -- %s failed", e.getMessage(), methodName);
log.error(message, e);
final MetaException me = new MetaException(message);
me.initCause(e);
throw me;
} finally {
final long duration = registry.clock().wallTime() - start;
this.registry.timer(Metrics.TimerThriftRequest.getMetricName() + "." + methodName).record(duration, TimeUnit.MILLISECONDS);
log.info("+++ Thrift({}): Time taken to complete {} is {} ms", catalogName, methodName, duration);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
private List<Partition> getPartitions(final QualifiedName tableName, @Nullable final String filter, @Nullable final List<String> partitionIds, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
List<Partition> partitionList = null;
if (!Strings.isNullOrEmpty(filter)) {
partitionList = metacatHiveClient.listPartitionsByFilter(databasename, tablename, filter);
} else {
if (partitionIds != null) {
partitionList = metacatHiveClient.getPartitions(databasename, tablename, partitionIds);
}
if (partitionList == null || partitionList.isEmpty()) {
partitionList = metacatHiveClient.getPartitions(databasename, tablename, null);
}
}
final List<Partition> filteredPartitionList = Lists.newArrayList();
partitionList.forEach(partition -> {
final String partitionName = getNameOfPartition(table, partition);
if (partitionIds == null || partitionIds.contains(partitionName)) {
filteredPartitionList.add(partition);
}
});
if (sort != null) {
if (sort.getOrder() == SortOrder.DESC) {
filteredPartitionList.sort(Collections.reverseOrder());
} else {
Collections.sort(filteredPartitionList);
}
}
return ConnectorUtils.paginate(filteredPartitionList, pageable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
use of org.apache.hadoop.hive.metastore.api.InvalidObjectException in project metacat by Netflix.
the class HiveConnectorPartitionService method addUpdateDropPartitions.
protected void addUpdateDropPartitions(final QualifiedName tableQName, final Table table, final List<String> partitionNames, final List<PartitionInfo> addedPartitionInfos, final List<PartitionHolder> existingPartitionInfos, final Set<String> deletePartitionNames) {
final String databaseName = table.getDbName();
final String tableName = table.getTableName();
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableQName, table);
try {
final List<Partition> existingPartitions = existingPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p.getPartitionInfo())).collect(Collectors.toList());
final List<Partition> addedPartitions = addedPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p)).collect(Collectors.toList());
// If alterIfExists=true, then alter partitions if they already exists
if (!existingPartitionInfos.isEmpty()) {
copyTableSdToPartitionSd(existingPartitions, table);
metacatHiveClient.alterPartitions(databaseName, tableName, existingPartitions);
}
// Copy the storage details from the table if the partition does not contain the details.
copyTableSdToPartitionSd(addedPartitions, table);
// Drop partitions with ids in 'deletePartitionNames' and add 'addedPartitionInfos' partitions
metacatHiveClient.addDropPartitions(databaseName, tableName, addedPartitions, Lists.newArrayList(deletePartitionNames));
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableQName, "", exception);
} else {
throw new TableNotFoundException(tableQName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
Aggregations