Search in sources :

Example 6 with TableInfo

use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.

the class HiveConnectorInfoConverter method fromPartitionInfo.

/**
 * Converts from PartitionDto to the connector partition.
 *
 * @param partition Metacat partition Info
 * @return connector partition
 */
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partition) {
    final QualifiedName name = partition.getName();
    final List<String> values = Lists.newArrayListWithCapacity(16);
    Map<String, String> metadata = partition.getMetadata();
    if (metadata == null) {
        metadata = new HashMap<>();
    // can't use Collections.emptyMap()
    // which is immutable and can't be
    // modifed by add parts in the embedded
    }
    final List<FieldInfo> fields = tableInfo.getFields();
    List<FieldSchema> fieldSchemas = Collections.emptyList();
    if (notNull(fields)) {
        fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
    }
    final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
    // using the table level serialization lib
    if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
        sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
    }
    final AuditInfo auditInfo = partition.getAudit();
    final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
    final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
    if (null == name) {
        return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
    }
    if (notNull(name.getPartitionName())) {
        for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
            final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
            Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
            values.add(nameValues.get(1));
        }
    }
    final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
    final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
    return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
Also used : Date(java.util.Date) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StringUtils(org.apache.commons.lang3.StringUtils) Partition(org.apache.hadoop.hive.metastore.api.Partition) Strings(com.google.common.base.Strings) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ConnectorInfoConverter(com.netflix.metacat.common.server.connectors.ConnectorInfoConverter) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) LinkedList(java.util.LinkedList) Splitter(com.google.common.base.Splitter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ViewInfo(com.netflix.metacat.common.server.connectors.model.ViewInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) HiveTableUtil(com.netflix.metacat.connector.hive.util.HiveTableUtil) TableType(org.apache.hadoop.hive.metastore.TableType) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Database(org.apache.hadoop.hive.metastore.api.Database) Collections(java.util.Collections) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) Partition(org.apache.hadoop.hive.metastore.api.Partition) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo)

Example 7 with TableInfo

use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.

the class HiveConnectorFastTableService method update.

/**
 * Update a table with the given metadata.
 *
 * If table is an iceberg table, then lock the table for update so that no other request can update it. If the meta
 * information is invalid, then throw an error.
 * If table is not an iceberg table, then do a regular table update.
 *
 * @param requestContext The request context
 * @param tableInfo      The resource metadata
 */
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
    if (isIcebergTable(tableInfo)) {
        final QualifiedName tableName = tableInfo.getName();
        final Long tableId = directSqlTable.getTableId(tableName);
        try {
            log.debug("Locking Iceberg table {}", tableName);
            directSqlTable.lockIcebergTable(tableId, tableName);
            try {
                final TableInfo existingTableInfo = get(requestContext, tableInfo.getName());
                validateIcebergUpdate(existingTableInfo, tableInfo);
                final Table existingTable = getHiveMetacatConverters().fromTableInfo(existingTableInfo);
                super.update(requestContext, existingTable, tableInfo);
            } finally {
                directSqlTable.unlockIcebergTable(tableId);
                log.debug("Unlocked Iceberg table {}", tableName);
            }
        } catch (IllegalStateException e) {
            throw new TablePreconditionFailedException(tableName, e.getMessage());
        }
    } else {
        super.update(requestContext, tableInfo);
    }
}
Also used : Table(org.apache.hadoop.hive.metastore.api.Table) TablePreconditionFailedException(com.netflix.metacat.common.server.connectors.exception.TablePreconditionFailedException) QualifiedName(com.netflix.metacat.common.QualifiedName) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo)

Example 8 with TableInfo

use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.

the class JdbcConnectorTableService method list.

/**
 * {@inheritDoc}
 */
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    log.debug("Beginning to list table metadata for {} for request {}", name, context);
    final ImmutableList.Builder<TableInfo> builder = ImmutableList.builder();
    for (final QualifiedName tableName : this.listNames(context, name, prefix, sort, pageable)) {
        builder.add(this.get(context, tableName));
    }
    log.debug("Finished listing table metadata for {} for request {}", name, context);
    return builder.build();
}
Also used : ImmutableList(com.google.common.collect.ImmutableList) QualifiedName(com.netflix.metacat.common.QualifiedName) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo)

Example 9 with TableInfo

use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.

the class S3ConnectorTableService method update.

@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
    log.debug("Start: Update table {}", tableInfo.getName());
    final QualifiedName tableName = tableInfo.getName();
    final Table table = tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName());
    if (table == null) {
        throw new TableNotFoundException(tableName);
    }
    // we can update the fields, the uri, or the full serde
    final Location newLocation = infoConverter.toLocation(tableInfo);
    Location location = table.getLocation();
    if (location == null) {
        location = new Location();
        location.setTable(table);
        table.setLocation(location);
    }
    if (newLocation.getUri() != null) {
        location.setUri(newLocation.getUri());
    }
    final Info newInfo = newLocation.getInfo();
    if (newInfo != null) {
        final Info info = location.getInfo();
        if (info == null) {
            location.setInfo(newInfo);
            newInfo.setLocation(location);
        } else {
            if (newInfo.getInputFormat() != null) {
                info.setInputFormat(newInfo.getInputFormat());
            }
            if (newInfo.getOutputFormat() != null) {
                info.setOutputFormat(newInfo.getOutputFormat());
            }
            if (newInfo.getOwner() != null) {
                info.setOwner(newInfo.getOwner());
            }
            if (newInfo.getSerializationLib() != null) {
                info.setSerializationLib(newInfo.getSerializationLib());
            }
            if (newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()) {
                info.setParameters(newInfo.getParameters());
            }
        }
    }
    final Schema newSchema = newLocation.getSchema();
    if (newSchema != null) {
        final List<Field> newFields = newSchema.getFields();
        if (newFields != null && !newFields.isEmpty()) {
            final Schema schema = location.getSchema();
            if (schema == null) {
                location.setSchema(newSchema);
                newSchema.setLocation(location);
            } else {
                final List<Field> fields = schema.getFields();
                if (fields.isEmpty()) {
                    newFields.forEach(field -> {
                        field.setSchema(schema);
                        fields.add(field);
                    });
                } else {
                    for (int i = 0; i < newFields.size(); i++) {
                        final Field newField = newFields.get(i);
                        newField.setPos(i);
                        newField.setSchema(schema);
                        if (newField.getType() == null) {
                            newField.setType(newField.getSourceType());
                        }
                    }
                    schema.setFields(null);
                    fieldDao.delete(fields);
                    tableDao.save(table, true);
                    schema.setFields(newFields);
                }
            }
        }
    }
    log.debug("End: Update table {}", tableInfo.getName());
}
Also used : TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Field(com.netflix.metacat.connector.s3.model.Field) Table(com.netflix.metacat.connector.s3.model.Table) QualifiedName(com.netflix.metacat.common.QualifiedName) Schema(com.netflix.metacat.connector.s3.model.Schema) Info(com.netflix.metacat.connector.s3.model.Info) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) Location(com.netflix.metacat.connector.s3.model.Location)

Example 10 with TableInfo

use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.

the class CassandraConnectorTableService method get.

/**
 * {@inheritDoc}
 */
@Override
public TableInfo get(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name) {
    final String keyspace = name.getDatabaseName();
    final String table = name.getTableName();
    log.debug("Attempting to get metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
    try {
        final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
        if (keyspaceMetadata == null) {
            throw new DatabaseNotFoundException(name);
        }
        final TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
        if (tableMetadata == null) {
            throw new TableNotFoundException(name);
        }
        final TableInfo tableInfo = this.getTableInfo(name, tableMetadata);
        log.debug("Successfully got metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
        return tableInfo;
    } catch (final DriverException de) {
        log.error(de.getMessage(), de);
        throw this.getExceptionMapper().toConnectorException(de, name);
    }
}
Also used : TableMetadata(com.datastax.driver.core.TableMetadata) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) DriverException(com.datastax.driver.core.exceptions.DriverException) KeyspaceMetadata(com.datastax.driver.core.KeyspaceMetadata)

Aggregations

TableInfo (com.netflix.metacat.common.server.connectors.model.TableInfo)18 QualifiedName (com.netflix.metacat.common.QualifiedName)11 TableNotFoundException (com.netflix.metacat.common.server.connectors.exception.TableNotFoundException)11 Table (org.apache.hadoop.hive.metastore.api.Table)9 ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)7 Strings (com.google.common.base.Strings)6 ImmutableList (com.google.common.collect.ImmutableList)6 Lists (com.google.common.collect.Lists)6 DatabaseNotFoundException (com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException)6 InvalidMetaException (com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)6 PartitionInfo (com.netflix.metacat.common.server.connectors.model.PartitionInfo)6 List (java.util.List)6 Map (java.util.Map)6 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)6 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)6 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)6 Partition (org.apache.hadoop.hive.metastore.api.Partition)6 TException (org.apache.thrift.TException)6 Pageable (com.netflix.metacat.common.dto.Pageable)4 Sort (com.netflix.metacat.common.dto.Sort)4