use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class RequestWrapper method processRequest.
/**
* Request wrapper to to process request.
*
* @param name name
* @param resourceRequestName request name
* @param requestTags tags that needs to be added to the registry
* @param supplier supplier
* @param <R> response
* @return response of supplier
*/
public <R> R processRequest(final QualifiedName name, final String resourceRequestName, final Map<String, String> requestTags, final Supplier<R> supplier) {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<>(name.parts());
if (requestTags != null) {
tags.putAll(requestTags);
}
tags.put("request", resourceRequestName);
tags.put("scheme", MetacatContextManager.getContext().getScheme());
registry.counter(requestCounterId.withTags(tags)).increment();
try {
// check rate limit in try-catch block in case ratelimiter throws exception.
// those exceptions can be tracked correctly, by the existing finally block that logs metrics.
checkRequestRateLimit(name, resourceRequestName, tags);
log.info("### Calling method: {} for {}", resourceRequestName, name);
return supplier.get();
} catch (UnsupportedOperationException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotSupportedException("Catalog does not support the operation. " + e.getMessage());
} catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatAlreadyExistsException(e.getMessage());
} catch (NotFoundException | MetacatNotFoundException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotFoundException(String.format("Unable to locate for %s. Details: %s", name, e.getMessage()));
} catch (InvalidMetaException | IllegalArgumentException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatBadRequestException(String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (TablePreconditionFailedException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatPreconditionFailedException(String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (ConnectorException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
for (Throwable ex : Throwables.getCausalChain(e)) {
if (ex.getMessage().contains("too many connections") || ex.getMessage().contains("Timeout: Pool empty")) {
throw new MetacatTooManyRequestsException(ex.getMessage());
}
}
throw new MetacatException(message, e);
} catch (UserMetadataServiceException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s usermetadata operation failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
throw new MetacatUserMetadataException(message);
} catch (Exception e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
if (e instanceof MetacatException) {
throw e;
} else {
throw new MetacatException(message, e);
}
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("### Time taken to complete {} for {} is {} ms", resourceRequestName, name, duration);
tryAddTableTypeTag(tags, name);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
private List<Partition> getPartitions(final QualifiedName tableName, @Nullable final String filter, @Nullable final List<String> partitionIds, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
List<Partition> partitionList = null;
if (!Strings.isNullOrEmpty(filter)) {
partitionList = metacatHiveClient.listPartitionsByFilter(databasename, tablename, filter);
} else {
if (partitionIds != null) {
partitionList = metacatHiveClient.getPartitions(databasename, tablename, partitionIds);
}
if (partitionList == null || partitionList.isEmpty()) {
partitionList = metacatHiveClient.getPartitions(databasename, tablename, null);
}
}
final List<Partition> filteredPartitionList = Lists.newArrayList();
partitionList.forEach(partition -> {
final String partitionName = getNameOfPartition(table, partition);
if (partitionIds == null || partitionIds.contains(partitionName)) {
filteredPartitionList.add(partition);
}
});
if (sort != null) {
if (sort.getOrder() == SortOrder.DESC) {
filteredPartitionList.sort(Collections.reverseOrder());
} else {
Collections.sort(filteredPartitionList);
}
}
return ConnectorUtils.paginate(filteredPartitionList, pageable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorPartitionService method addUpdateDropPartitions.
protected void addUpdateDropPartitions(final QualifiedName tableQName, final Table table, final List<String> partitionNames, final List<PartitionInfo> addedPartitionInfos, final List<PartitionHolder> existingPartitionInfos, final Set<String> deletePartitionNames) {
final String databaseName = table.getDbName();
final String tableName = table.getTableName();
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableQName, table);
try {
final List<Partition> existingPartitions = existingPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p.getPartitionInfo())).collect(Collectors.toList());
final List<Partition> addedPartitions = addedPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p)).collect(Collectors.toList());
// If alterIfExists=true, then alter partitions if they already exists
if (!existingPartitionInfos.isEmpty()) {
copyTableSdToPartitionSd(existingPartitions, table);
metacatHiveClient.alterPartitions(databaseName, tableName, existingPartitions);
}
// Copy the storage details from the table if the partition does not contain the details.
copyTableSdToPartitionSd(addedPartitions, table);
// Drop partitions with ids in 'deletePartitionNames' and add 'addedPartitionInfos' partitions
metacatHiveClient.addDropPartitions(databaseName, tableName, addedPartitions, Lists.newArrayList(deletePartitionNames));
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableQName, "", exception);
} else {
throw new TableNotFoundException(tableQName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* By default(checkIfExists=true and aletrIfExists=false), this method adds the provided list of partitions.
* If a partition already exists, it is dropped first before adding it.
* If checkIfExists=false, the method adds the partitions to the table. If a partition already exists,
* an AlreadyExistsException error is thrown.
* If alterIfExists=true, the method updates existing partitions and adds non-existant partitions.
* If a partition in the provided partition list has all the details, then it is used. If the details are missing,
* then the table details are inherited. This is mostly for the storage information.
*/
@Override
public PartitionsSaveResponse savePartitions(final ConnectorRequestContext requestContext, final QualifiedName tableQName, final PartitionsSaveRequest partitionsSaveRequest) {
final String databaseName = tableQName.getDatabaseName();
final String tableName = tableQName.getTableName();
final Table table;
try {
table = metacatHiveClient.getTableByName(databaseName, tableName);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableQName, exception);
} catch (TException e) {
throw new ConnectorException(String.format("Failed getting hive table %s", tableQName), e);
}
// New partitions
final List<PartitionInfo> addedPartitionInfos = Lists.newArrayList();
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
final List<String> partitionNames = partitionInfos.stream().map(part -> {
final String partitionName = part.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
// New partition names
final List<String> addedPartitionNames = Lists.newArrayList();
// Updated partition names
final List<String> existingPartitionNames = Lists.newArrayList();
// Existing partitions
final List<PartitionHolder> existingPartitionHolders = Lists.newArrayList();
// Existing partition map
Map<String, PartitionHolder> existingPartitionMap = Collections.emptyMap();
//
if (partitionsSaveRequest.getCheckIfExists() || partitionsSaveRequest.getAlterIfExists()) {
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final PartitionHolder existingPartitionHolder = existingPartitionMap.get(partitionName);
if (existingPartitionHolder == null) {
addedPartitionNames.add(partitionName);
addedPartitionInfos.add(partitionInfo);
} else {
final String partitionUri = partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
final String existingPartitionUri = getPartitionUri(existingPartitionHolder);
if (partitionUri == null || !partitionUri.equals(existingPartitionUri)) {
existingPartitionNames.add(partitionName);
// We need to copy the existing partition info and
if (partitionInfo.getSerde() == null) {
partitionInfo.setSerde(new StorageInfo());
}
if (partitionInfo.getAudit() == null) {
partitionInfo.setAudit(new AuditInfo());
}
if (StringUtils.isBlank(partitionUri)) {
partitionInfo.getSerde().setUri(existingPartitionUri);
}
// unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
if (existingPartitionHolder.getPartition() != null) {
final Partition existingPartition = existingPartitionHolder.getPartition();
partitionInfo.getSerde().setParameters(existingPartition.getParameters());
partitionInfo.getAudit().setCreatedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getCreateTime()));
partitionInfo.getAudit().setLastModifiedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getLastAccessTime()));
} else {
final PartitionInfo existingPartitionInfo = existingPartitionHolder.getPartitionInfo();
if (existingPartitionInfo.getSerde() != null) {
partitionInfo.getSerde().setParameters(existingPartitionInfo.getSerde().getParameters());
}
if (existingPartitionInfo.getAudit() != null) {
partitionInfo.getAudit().setCreatedDate(existingPartitionInfo.getAudit().getCreatedDate());
partitionInfo.getAudit().setLastModifiedDate(existingPartitionInfo.getAudit().getLastModifiedDate());
}
}
existingPartitionHolder.setPartitionInfo(partitionInfo);
existingPartitionHolders.add(existingPartitionHolder);
} else {
addedPartitionInfos.add(partitionInfo);
}
}
}
}
final Set<String> deletePartitionNames = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionNames.addAll(existingPartitionNames);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionNames.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders, deletePartitionNames);
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionNames);
result.setUpdated(existingPartitionNames);
return result;
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorTableService method update.
protected void update(final ConnectorRequestContext requestContext, final Table existingTable, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
updateTable(requestContext, existingTable, tableInfo);
metacatHiveClient.alterTable(tableName.getDatabaseName(), tableName.getTableName(), existingTable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed update hive table %s", tableName), exception);
}
}
Aggregations