use of com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* {@inheritDoc}.
*/
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
// New partitions
final List<Partition> hivePartitions = Lists.newArrayList();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
// Existing partitions
final List<Partition> existingHivePartitions = Lists.newArrayList();
// Existing partition map
Map<String, Partition> existingPartitionMap = Collections.emptyMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionInfos.stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final Partition hivePartition = existingPartitionMap.get(partitionName);
if (hivePartition == null) {
addedPartitionIds.add(partitionName);
hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
} else {
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
existingPartitionIds.add(partitionName);
existingPartition.setParameters(hivePartition.getParameters());
existingPartition.setCreateTime(hivePartition.getCreateTime());
existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
existingHivePartitions.add(existingPartition);
}
}
}
final Set<String> deletePartitionIds = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionIds.addAll(existingPartitionIds);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
copyTableSdToPartitionSd(existingHivePartitions, table);
metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
}
copyTableSdToPartitionSd(hivePartitions, table);
metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionIds);
result.setUpdated(existingPartitionIds);
return result;
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableName, "", exception);
} else {
throw new TableNotFoundException(tableName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
final List<String> ids = getFakePartitionName(hivePartitions);
throw new PartitionAlreadyExistsException(tableName, ids, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException in project metacat by Netflix.
the class RequestWrapper method processRequest.
/**
* Request wrapper to to process request.
*
* @param name name
* @param resourceRequestName request name
* @param supplier supplier
* @param <R> response
* @return response of supplier
*/
public <R> R processRequest(final QualifiedName name, final String resourceRequestName, final Supplier<R> supplier) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>(name.parts());
tags.put("request", resourceRequestName);
registry.counter(requestCounterId.withTags(tags)).increment();
try {
log.info("### Calling method: {} for {}", resourceRequestName, name);
return supplier.get();
} catch (UnsupportedOperationException e) {
log.error(e.getMessage(), e);
throw new MetacatNotSupportedException("Catalog does not support the operation");
} catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) {
log.error(e.getMessage(), e);
throw new MetacatAlreadyExistsException(e.getMessage());
} catch (NotFoundException | MetacatNotFoundException e) {
log.error(e.getMessage(), e);
throw new MetacatNotFoundException(String.format("Unable to locate for %s. Details: %s", name, e.getMessage()));
} catch (InvalidMetaException | IllegalArgumentException e) {
log.error(e.getMessage(), e);
throw new MetacatBadRequestException(String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (ConnectorException e) {
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
registry.counter(requestFaillureCounterId.withTags(tags)).increment();
throw new MetacatException(message, Response.Status.INTERNAL_SERVER_ERROR, e);
} catch (UserMetadataServiceException e) {
final String message = String.format("%s.%s -- %s usermetadata operation failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
throw new MetacatUserMetadataException(message);
} catch (Exception e) {
registry.counter(requestFaillureCounterId.withTags(tags)).increment();
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
throw new MetacatException(message, Response.Status.INTERNAL_SERVER_ERROR, e);
} finally {
final long duration = registry.clock().monotonicTime() - start;
log.info("### Time taken to complete {} is {} ms", resourceRequestName, duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
use of com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException in project metacat by Netflix.
the class S3ConnectorPartitionService method create.
@Override
public void create(@Nonnull final ConnectorContext context, @Nonnull final PartitionInfo partitionInfo) {
final QualifiedName name = partitionInfo.getName();
log.debug("Start: Create partition {}", name);
final QualifiedName tableName = QualifiedName.ofTable(catalogName, name.getDatabaseName(), name.getTableName());
// Table
final Table table = getTable(tableName);
final List<Partition> partitions = partitionDao.getPartitions(table.getId(), Lists.newArrayList(name.getPartitionName()), null, null, null, null);
if (!partitions.isEmpty()) {
throw new PartitionAlreadyExistsException(tableName, name.getPartitionName());
}
partitionDao.save(infoConverter.toPartition(table, partitionInfo));
log.debug("End: Create partition {}", name);
}
Aggregations