use of com.netflix.metacat.common.server.connectors.exception.InvalidMetaException in project metacat by Netflix.
the class HiveConnectorDatabaseService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (!qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
}
return ConnectorUtils.paginate(databaseInfos, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.InvalidMetaException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
use of com.netflix.metacat.common.server.connectors.exception.InvalidMetaException in project metacat by Netflix.
the class HiveConnectorTableService method update.
/**
* Update a resource with the given metadata.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table existingTable = hiveMetacatConverters.fromTableInfo(get(requestContext, tableInfo.getName()));
if (existingTable.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
throw new TableNotFoundException(tableName);
}
updateTable(requestContext, existingTable, tableInfo);
metacatHiveClient.alterTable(tableName.getDatabaseName(), tableName.getTableName(), existingTable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed update hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.InvalidMetaException in project metacat by Netflix.
the class RequestWrapper method processRequest.
/**
* Request wrapper to to process request.
*
* @param name name
* @param resourceRequestName request name
* @param supplier supplier
* @param <R> response
* @return response of supplier
*/
public <R> R processRequest(final QualifiedName name, final String resourceRequestName, final Supplier<R> supplier) {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<>(name.parts());
tags.put("request", resourceRequestName);
registry.counter(requestCounterId.withTags(tags)).increment();
try {
log.info("### Calling method: {} for {}", resourceRequestName, name);
return supplier.get();
} catch (UnsupportedOperationException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotSupportedException("Catalog does not support the operation");
} catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatAlreadyExistsException(e.getMessage());
} catch (NotFoundException | MetacatNotFoundException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatNotFoundException(String.format("Unable to locate for %s. Details: %s", name, e.getMessage()));
} catch (InvalidMetaException | IllegalArgumentException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatBadRequestException(String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (TablePreconditionFailedException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
log.error(e.getMessage(), e);
throw new MetacatPreconditionFailedException(String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (ConnectorException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
throw new MetacatException(message, e);
} catch (UserMetadataServiceException e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s usermetadata operation failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
throw new MetacatUserMetadataException(message);
} catch (Exception e) {
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
final String message = String.format("%s.%s -- %s failed for %s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage(), resourceRequestName, name);
log.error(message, e);
throw new MetacatException(message, e);
} finally {
final long duration = registry.clock().wallTime() - start;
log.info("### Time taken to complete {} for {} is {} ms", resourceRequestName, name, duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
use of com.netflix.metacat.common.server.connectors.exception.InvalidMetaException in project metacat by Netflix.
the class PartitionUtil method getPartValuesFromPartName.
/**
* Retrieves the partition values from the partition name. This method also validates the partition keys to that
* of the table.
*
* @param tableQName table name
* @param table table
* @param partName partition name
* @return list of partition values
*/
public static List<String> getPartValuesFromPartName(final QualifiedName tableQName, final Table table, final String partName) {
if (Strings.isNullOrEmpty(partName)) {
throw new InvalidMetaException(tableQName, partName, null);
}
final LinkedHashMap<String, String> partSpec = new LinkedHashMap<>();
Warehouse.makeSpecFromName(partSpec, new Path(partName));
final List<String> values = new ArrayList<>();
for (FieldSchema field : table.getPartitionKeys()) {
final String key = field.getName();
final String val = partSpec.get(key);
if (val == null) {
throw new InvalidMetaException(tableQName, partName, null);
}
values.add(val);
}
return values;
}
Aggregations