use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project metacat by Netflix.
the class HiveConnectorTableService method listNames.
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
if (tableFilter == null || tableName.startsWith(tableFilter)) {
final QualifiedName qualifiedName = QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
qualifiedNames.add(qualifiedName);
}
}
////supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listNames hive table %s", name), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project metacat by Netflix.
the class HiveConnectorTableService method update.
/**
* Update a resource with the given metadata.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table existingTable = hiveMetacatConverters.fromTableInfo(get(requestContext, tableInfo.getName()));
if (existingTable.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
throw new TableNotFoundException(tableName);
}
updateTable(requestContext, existingTable, tableInfo);
metacatHiveClient.alterTable(tableName.getDatabaseName(), tableName.getTableName(), existingTable);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(tableName, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed update hive table %s", tableName), exception);
}
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project drill by apache.
the class DrillHiveMetaStoreClient method getHiveReadEntryHelper.
/** Helper method which gets table metadata. Retries once if the first call to fetch the metadata fails */
protected static HiveReadEntry getHiveReadEntryHelper(final IMetaStoreClient mClient, final String dbName, final String tableName) throws TException {
Table table = null;
try {
table = mClient.getTable(dbName, tableName);
} catch (MetaException | NoSuchObjectException e) {
throw e;
} catch (TException e) {
logger.warn("Failure while attempting to get hive table. Retries once. ", e);
try {
mClient.close();
} catch (Exception ex) {
logger.warn("Failure while attempting to close existing hive metastore connection. May leak connection.", ex);
}
mClient.reconnect();
table = mClient.getTable(dbName, tableName);
}
if (table == null) {
throw new UnknownTableException(String.format("Unable to find table '%s'.", tableName));
}
List<Partition> partitions;
try {
partitions = mClient.listPartitions(dbName, tableName, (short) -1);
} catch (NoSuchObjectException | MetaException e) {
throw e;
} catch (TException e) {
logger.warn("Failure while attempting to get hive partitions. Retries once. ", e);
try {
mClient.close();
} catch (Exception ex) {
logger.warn("Failure while attempting to close existing hive metastore connection. May leak connection.", ex);
}
mClient.reconnect();
partitions = mClient.listPartitions(dbName, tableName, (short) -1);
}
List<HiveTableWrapper.HivePartitionWrapper> hivePartitionWrappers = Lists.newArrayList();
HiveTableWithColumnCache hiveTable = new HiveTableWithColumnCache(table, new ColumnListsCache(table));
for (Partition partition : partitions) {
hivePartitionWrappers.add(createPartitionWithSpecColumns(hiveTable, partition));
}
if (hivePartitionWrappers.isEmpty()) {
hivePartitionWrappers = null;
}
return new HiveReadEntry(new HiveTableWrapper(hiveTable), hivePartitionWrappers);
}
use of org.apache.hadoop.hive.metastore.api.NoSuchObjectException in project cdap by caskdata.
the class BaseHiveExploreService method getTableInfo.
@Override
public TableInfo getTableInfo(String namespace, @Nullable String databaseName, String table) throws ExploreException, TableNotFoundException {
startAndWait();
// TODO check if the database user is allowed to access if security is enabled
try {
String db = databaseName != null ? databaseName : getHiveDatabase(namespace);
Table tableInfo = getMetaStoreClient().getTable(db, table);
List<FieldSchema> tableFields = tableInfo.getSd().getCols();
// in the storage descriptor. If columns are missing, do a separate call for schema.
if (tableFields == null || tableFields.isEmpty()) {
// don't call .getSchema()... class not found exception if we do in the thrift code...
tableFields = getMetaStoreClient().getFields(db, table);
}
ImmutableList.Builder<TableInfo.ColumnInfo> schemaBuilder = ImmutableList.builder();
Set<String> fieldNames = Sets.newHashSet();
for (FieldSchema column : tableFields) {
schemaBuilder.add(new TableInfo.ColumnInfo(column.getName(), column.getType(), column.getComment()));
fieldNames.add(column.getName());
}
ImmutableList.Builder<TableInfo.ColumnInfo> partitionKeysBuilder = ImmutableList.builder();
for (FieldSchema column : tableInfo.getPartitionKeys()) {
TableInfo.ColumnInfo columnInfo = new TableInfo.ColumnInfo(column.getName(), column.getType(), column.getComment());
partitionKeysBuilder.add(columnInfo);
// since they show up when you do a 'describe <table>' command.
if (!fieldNames.contains(column.getName())) {
schemaBuilder.add(columnInfo);
}
}
// its a cdap generated table if it uses our storage handler, or if a property is set on the table.
String cdapName = null;
Map<String, String> tableParameters = tableInfo.getParameters();
if (tableParameters != null) {
cdapName = tableParameters.get(Constants.Explore.CDAP_NAME);
}
// tables created after CDAP 2.6 should set the "cdap.name" property, but older ones
// do not. So also check if it uses a cdap storage handler.
String storageHandler = tableInfo.getParameters().get("storage_handler");
boolean isDatasetTable = cdapName != null || DatasetStorageHandler.class.getName().equals(storageHandler) || StreamStorageHandler.class.getName().equals(storageHandler);
return new TableInfo(tableInfo.getTableName(), tableInfo.getDbName(), tableInfo.getOwner(), (long) tableInfo.getCreateTime() * 1000, (long) tableInfo.getLastAccessTime() * 1000, tableInfo.getRetention(), partitionKeysBuilder.build(), tableInfo.getParameters(), tableInfo.getTableType(), schemaBuilder.build(), tableInfo.getSd().getLocation(), tableInfo.getSd().getInputFormat(), tableInfo.getSd().getOutputFormat(), tableInfo.getSd().isCompressed(), tableInfo.getSd().getNumBuckets(), tableInfo.getSd().getSerdeInfo().getSerializationLib(), tableInfo.getSd().getSerdeInfo().getParameters(), isDatasetTable);
} catch (NoSuchObjectException e) {
throw new TableNotFoundException(e);
} catch (TException e) {
throw new ExploreException(e);
}
}
Aggregations