Search in sources :

Example 11 with AuditInfo

use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.

the class DirectSqlDatabase method getDatabaseById.

private DatabaseInfo getDatabaseById(final Long id, final QualifiedName databaseName) {
    DatabaseInfo result = null;
    try {
        // Retrieve databaseRowSet info record
        final SqlRowSet databaseRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE, new Object[] { id }, new int[] { Types.BIGINT });
        if (databaseRowSet.first()) {
            final AuditInfo auditInfo = AuditInfo.builder().createdBy(databaseRowSet.getString(COL_OWNER)).build();
            // Retrieve databaseRowSet params
            final Map<String, String> metadata = Maps.newHashMap();
            try {
                final SqlRowSet paramRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE_PARAMS, new Object[] { id }, new int[] { Types.BIGINT });
                while (paramRowSet.next()) {
                    metadata.put(paramRowSet.getString(COL_PARAM_KEY), paramRowSet.getString(COL_PARAM_VALUE));
                }
            } catch (EmptyResultDataAccessException ignored) {
            }
            result = DatabaseInfo.builder().name(databaseName).uri(databaseRowSet.getString(COL_URI)).auditInfo(auditInfo).metadata(metadata).build();
        }
    } catch (EmptyResultDataAccessException e) {
        log.debug("Database {} not found.", databaseName);
        throw new DatabaseNotFoundException(databaseName);
    }
    return result;
}
Also used : SqlRowSet(org.springframework.jdbc.support.rowset.SqlRowSet) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) DatabaseInfo(com.netflix.metacat.common.server.connectors.model.DatabaseInfo) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException)

Example 12 with AuditInfo

use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.

the class HiveConnectorInfoConverter method fromTableInfo.

/**
 * Converts from TableDto to the connector table.
 *
 * @param tableInfo Metacat table Info
 * @return connector table
 */
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
    final QualifiedName name = tableInfo.getName();
    final String tableName = (name != null) ? name.getTableName() : "";
    final String databaseName = (name != null) ? name.getDatabaseName() : "";
    final StorageInfo storageInfo = tableInfo.getSerde();
    final String owner = (storageInfo != null && storageInfo.getOwner() != null) ? storageInfo.getOwner() : "";
    final AuditInfo auditInfo = tableInfo.getAudit();
    final int createTime = (auditInfo != null && auditInfo.getCreatedDate() != null) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
    final Map<String, String> params = (tableInfo.getMetadata() != null) ? tableInfo.getMetadata() : new HashMap<>();
    final List<FieldInfo> fields = tableInfo.getFields();
    List<FieldSchema> partitionFields = Collections.emptyList();
    List<FieldSchema> nonPartitionFields = Collections.emptyList();
    if (fields != null) {
        nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
        partitionFields = Lists.newArrayListWithCapacity(fields.size());
        for (FieldInfo fieldInfo : fields) {
            if (fieldInfo.isPartitionKey()) {
                partitionFields.add(metacatToHiveField(fieldInfo));
            } else {
                nonPartitionFields.add(metacatToHiveField(fieldInfo));
            }
        }
    }
    final StorageDescriptor sd = fromStorageInfo(storageInfo, nonPartitionFields);
    final ViewInfo viewInfo = tableInfo.getView();
    final String tableType = (null != viewInfo && !Strings.isNullOrEmpty(viewInfo.getViewOriginalText())) ? TableType.VIRTUAL_VIEW.name() : TableType.EXTERNAL_TABLE.name();
    return new Table(tableName, databaseName, owner, createTime, 0, 0, sd, partitionFields, params, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewOriginalText() : null, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewExpandedText() : null, tableType);
}
Also used : AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) DirectSqlTable(com.netflix.metacat.connector.hive.sql.DirectSqlTable) Table(org.apache.hadoop.hive.metastore.api.Table) QualifiedName(com.netflix.metacat.common.QualifiedName) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ViewInfo(com.netflix.metacat.common.server.connectors.model.ViewInfo) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo)

Example 13 with AuditInfo

use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.

the class HiveConnectorFastPartitionService method getIcebergPartitionInfos.

/**
 * get iceberg table partition summary.
 *
 * @param tableInfo         table info
 * @param partitionsRequest partition request
 * @return iceberg partition name and metrics mapping
 */
private List<PartitionInfo> getIcebergPartitionInfos(final TableInfo tableInfo, final PartitionListRequest partitionsRequest) {
    final QualifiedName tableName = tableInfo.getName();
    final org.apache.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(tableName, HiveTableUtil.getIcebergTableMetadataLocation(tableInfo), false).getTable();
    final Pageable pageable = partitionsRequest.getPageable();
    final Map<String, ScanSummary.PartitionMetrics> partitionMap = icebergTableHandler.getIcebergTablePartitionMap(tableName, partitionsRequest, icebergTable);
    final List<String> partitionIds = partitionsRequest.getPartitionNames();
    final Sort sort = partitionsRequest.getSort();
    final AuditInfo tableAuditInfo = tableInfo.getAudit();
    final List<PartitionInfo> filteredPartitionList = partitionMap.keySet().stream().filter(partitionName -> partitionIds == null || partitionIds.contains(partitionName)).map(partitionName -> PartitionInfo.builder().name(QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), partitionName)).serde(StorageInfo.builder().uri(getIcebergPartitionURI(tableName.getDatabaseName(), tableName.getTableName(), partitionName, partitionMap.get(partitionName).dataTimestampMillis())).build()).dataMetrics(icebergTableHandler.getDataMetadataFromIcebergMetrics(partitionMap.get(partitionName))).auditInfo(AuditInfo.builder().createdBy(tableAuditInfo.getCreatedBy()).createdDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis())).lastModifiedDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis())).build()).build()).collect(Collectors.toList());
    if (sort != null) {
        if (sort.hasSort() && sort.getSortBy().equalsIgnoreCase(DirectSqlGetPartition.FIELD_DATE_CREATED)) {
            final Comparator<PartitionInfo> dateCreatedComparator = Comparator.comparing(p -> p.getAudit() != null ? p.getAudit().getCreatedDate() : null, Comparator.nullsLast(Date::compareTo));
            ConnectorUtils.sort(filteredPartitionList, sort, dateCreatedComparator);
        } else {
            // Sort using the partition name by default
            final Comparator<PartitionInfo> nameComparator = Comparator.comparing(p -> p.getName().toString());
            ConnectorUtils.sort(filteredPartitionList, sort, nameComparator);
        }
    }
    return ConnectorUtils.paginate(filteredPartitionList, pageable);
}
Also used : StringUtils(org.apache.commons.lang.StringUtils) HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) Date(java.util.Date) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) Warehouse(org.apache.hadoop.hive.metastore.Warehouse) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) HiveConnectorPartitionService(com.netflix.metacat.connector.hive.HiveConnectorPartitionService) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Nonnull(javax.annotation.Nonnull) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Nullable(javax.annotation.Nullable) Pageable(com.netflix.metacat.common.dto.Pageable) Set(java.util.Set) QualifiedName(com.netflix.metacat.common.QualifiedName) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) IMetacatHiveClient(com.netflix.metacat.connector.hive.IMetacatHiveClient) Table(org.apache.hadoop.hive.metastore.api.Table) TimeUnit(java.util.concurrent.TimeUnit) ScanSummary(org.apache.iceberg.ScanSummary) HiveMetrics(com.netflix.metacat.connector.hive.monitoring.HiveMetrics) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) IcebergTableHandler(com.netflix.metacat.connector.hive.iceberg.IcebergTableHandler) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) HiveTableUtil(com.netflix.metacat.connector.hive.util.HiveTableUtil) PartitionUtil(com.netflix.metacat.connector.hive.util.PartitionUtil) Registry(com.netflix.spectator.api.Registry) PartitionListRequest(com.netflix.metacat.common.server.connectors.model.PartitionListRequest) VisibleForTesting(com.google.common.annotations.VisibleForTesting) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) Comparator(java.util.Comparator) Sort(com.netflix.metacat.common.dto.Sort) AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) Pageable(com.netflix.metacat.common.dto.Pageable) Sort(com.netflix.metacat.common.dto.Sort) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo)

Example 14 with AuditInfo

use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.

the class MySqlConnectorTableService method setTableInfoDetails.

/**
 * {@inheritDoc}
 */
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
    final QualifiedName tableName = tableInfo.getName();
    try (final PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)) {
        statement.setString(1, tableName.getDatabaseName());
        statement.setString(2, tableName.getTableName());
        try (final ResultSet resultSet = statement.executeQuery()) {
            if (resultSet.next()) {
                final AuditInfo auditInfo = AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATE_TIME)).lastModifiedDate(resultSet.getDate(COL_UPDATE_TIME)).build();
                tableInfo.setAudit(auditInfo);
            }
        }
    } catch (final Exception ignored) {
        log.info("Ignoring. Error getting the audit info for table {}", tableName);
    }
}
Also used : AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement)

Example 15 with AuditInfo

use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.

the class SnowflakeConnectorTableService method setTableInfoDetails.

/**
 * {@inheritDoc}
 */
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
    final QualifiedName tableName = getSnowflakeName(tableInfo.getName());
    try (PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)) {
        statement.setString(1, tableName.getDatabaseName());
        statement.setString(2, tableName.getDatabaseName());
        statement.setString(3, tableName.getTableName());
        try (ResultSet resultSet = statement.executeQuery()) {
            if (resultSet.next()) {
                final AuditInfo auditInfo = AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATED)).lastModifiedDate(resultSet.getDate(COL_LAST_ALTERED)).build();
                tableInfo.setAudit(auditInfo);
            }
        }
    } catch (final Exception ignored) {
        log.info("Ignoring. Error getting the audit info for table {}", tableName);
    }
}
Also used : AuditInfo(com.netflix.metacat.common.server.connectors.model.AuditInfo) QualifiedName(com.netflix.metacat.common.QualifiedName) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) SQLException(java.sql.SQLException)

Aggregations

AuditInfo (com.netflix.metacat.common.server.connectors.model.AuditInfo)15 QualifiedName (com.netflix.metacat.common.QualifiedName)11 StorageInfo (com.netflix.metacat.common.server.connectors.model.StorageInfo)10 PartitionInfo (com.netflix.metacat.common.server.connectors.model.PartitionInfo)7 List (java.util.List)6 Map (java.util.Map)6 Collectors (java.util.stream.Collectors)6 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)6 Table (org.apache.hadoop.hive.metastore.api.Table)6 Strings (com.google.common.base.Strings)5 Lists (com.google.common.collect.Lists)5 TableInfo (com.netflix.metacat.common.server.connectors.model.TableInfo)5 VisibleForTesting (com.google.common.annotations.VisibleForTesting)4 Pageable (com.netflix.metacat.common.dto.Pageable)4 Sort (com.netflix.metacat.common.dto.Sort)4 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)4 DatabaseInfo (com.netflix.metacat.common.server.connectors.model.DatabaseInfo)4 FieldInfo (com.netflix.metacat.common.server.connectors.model.FieldInfo)4 PartitionListRequest (com.netflix.metacat.common.server.connectors.model.PartitionListRequest)4 Instant (java.time.Instant)4