use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class JdbcConnectorTableService method get.
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name) {
log.debug("Beginning to get table metadata for qualified name {} for request {}", name, context);
try (Connection connection = this.dataSource.getConnection()) {
final String database = name.getDatabaseName();
connection.setSchema(database);
final ImmutableList.Builder<FieldInfo> fields = ImmutableList.builder();
try (ResultSet columns = this.getColumns(connection, name)) {
while (columns.next()) {
final String type = columns.getString("TYPE_NAME");
final String size = columns.getString("COLUMN_SIZE");
final String precision = columns.getString("DECIMAL_DIGITS");
final String sourceType = this.buildSourceType(type, size, precision);
final FieldInfo.FieldInfoBuilder fieldInfo = FieldInfo.builder().name(columns.getString("COLUMN_NAME")).sourceType(sourceType).type(this.typeConverter.toMetacatType(sourceType)).comment(columns.getString("REMARKS")).isNullable(columns.getString("IS_NULLABLE").equals("YES")).defaultValue(columns.getString("COLUMN_DEF"));
if (size != null) {
fieldInfo.size(Integer.parseInt(size));
}
fields.add(fieldInfo.build());
}
}
final List<FieldInfo> fieldInfos = fields.build();
// If table does not exist, throw TableNotFoundException.
if (fieldInfos.isEmpty() && !exists(context, name)) {
throw new TableNotFoundException(name);
}
// Set table details
final TableInfo result = TableInfo.builder().name(name).fields(fields.build()).build();
setTableInfoDetails(connection, result);
log.debug("Finished getting table metadata for qualified name {} for request {}", name, context);
return result;
} catch (final SQLException se) {
throw new ConnectorException(se.getMessage(), se);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class CassandraConnectorTableService method list.
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String keyspace = name.getDatabaseName();
log.debug("Attempting to list tables in Cassandra keyspace {} for request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
// TODO: Should we include views?
final List<TableInfo> tables = Lists.newArrayList();
for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) {
if (prefix != null && !tableMetadata.getName().startsWith(prefix.getTableName())) {
continue;
}
tables.add(this.getTableInfo(name, tableMetadata));
}
// Sort
if (sort != null) {
final Comparator<TableInfo> tableComparator = Comparator.comparing((t) -> t.getName().getTableName());
ConnectorUtils.sort(tables, sort, tableComparator);
}
// Paging
final List<TableInfo> pagedTables = ConnectorUtils.paginate(tables, pageable);
log.debug("Listed {} tables in Cassandra keyspace {} for request {}", pagedTables.size(), keyspace, context);
return pagedTables;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class HiveConnectorPartitionService method addUpdateDropPartitions.
protected void addUpdateDropPartitions(final QualifiedName tableQName, final Table table, final List<String> partitionNames, final List<PartitionInfo> addedPartitionInfos, final List<PartitionHolder> existingPartitionInfos, final Set<String> deletePartitionNames) {
final String databaseName = table.getDbName();
final String tableName = table.getTableName();
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableQName, table);
try {
final List<Partition> existingPartitions = existingPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p.getPartitionInfo())).collect(Collectors.toList());
final List<Partition> addedPartitions = addedPartitionInfos.stream().map(p -> hiveMetacatConverters.fromPartitionInfo(tableInfo, p)).collect(Collectors.toList());
// If alterIfExists=true, then alter partitions if they already exists
if (!existingPartitionInfos.isEmpty()) {
copyTableSdToPartitionSd(existingPartitions, table);
metacatHiveClient.alterPartitions(databaseName, tableName, existingPartitions);
}
// Copy the storage details from the table if the partition does not contain the details.
copyTableSdToPartitionSd(addedPartitions, table);
// Drop partitions with ids in 'deletePartitionNames' and add 'addedPartitionInfos' partitions
metacatHiveClient.addDropPartitions(databaseName, tableName, addedPartitions, Lists.newArrayList(deletePartitionNames));
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableQName, "", exception);
} else {
throw new TableNotFoundException(tableQName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
Aggregations