use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
try {
final List<Partition> partitions = getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable());
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.
the class HiveConnectorTableService method create.
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException | InvalidObjectException exception) {
// the NoSuchObjectException is converted into InvalidObjectException in hive client
if (exception.getMessage().startsWith(tableName.getDatabaseName())) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
} else {
// table name or column invalid defintion exception
throw new InvalidMetaException(tableName, exception);
}
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.
the class HiveConnectorTableService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<TableInfo> tableInfos = Lists.newArrayList();
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
}
// supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
}
return ConnectorUtils.paginate(tableInfos, pageable);
} catch (MetaException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.MetaException in project metacat by Netflix.
the class MetacatHiveClient method addDropPartitions.
/**
* {@inheritDoc}.
*/
@Override
public void addDropPartitions(final String dbName, final String tableName, final List<Partition> partitions, final List<String> delPartitionNames) throws TException {
try (HiveMetastoreClient client = createMetastoreClient()) {
try {
dropHivePartitions(client, dbName, tableName, delPartitionNames);
client.add_partitions(partitions);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("One or more partitions are invalid.", e);
} catch (TException e) {
throw new TException(String.format("Internal server error adding/dropping partitions for table %s.%s", dbName, tableName), e);
}
}
}
Aggregations