use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class DirectSqlDatabase method getDatabaseById.
private DatabaseInfo getDatabaseById(final Long id, final QualifiedName databaseName) {
DatabaseInfo result = null;
try {
// Retrieve databaseRowSet info record
final SqlRowSet databaseRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE, new Object[] { id }, new int[] { Types.BIGINT });
if (databaseRowSet.first()) {
final AuditInfo auditInfo = AuditInfo.builder().createdBy(databaseRowSet.getString(COL_OWNER)).build();
// Retrieve databaseRowSet params
final Map<String, String> metadata = Maps.newHashMap();
try {
final SqlRowSet paramRowSet = jdbcTemplate.queryForRowSet(SQL.GET_DATABASE_PARAMS, new Object[] { id }, new int[] { Types.BIGINT });
while (paramRowSet.next()) {
metadata.put(paramRowSet.getString(COL_PARAM_KEY), paramRowSet.getString(COL_PARAM_VALUE));
}
} catch (EmptyResultDataAccessException ignored) {
}
result = DatabaseInfo.builder().name(databaseName).uri(databaseRowSet.getString(COL_URI)).auditInfo(auditInfo).metadata(metadata).build();
}
} catch (EmptyResultDataAccessException e) {
log.debug("Database {} not found.", databaseName);
throw new DatabaseNotFoundException(databaseName);
}
return result;
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorInfoConverter method fromTableInfo.
/**
* Converts from TableDto to the connector table.
*
* @param tableInfo Metacat table Info
* @return connector table
*/
@Override
public Table fromTableInfo(final TableInfo tableInfo) {
final QualifiedName name = tableInfo.getName();
final String tableName = (name != null) ? name.getTableName() : "";
final String databaseName = (name != null) ? name.getDatabaseName() : "";
final StorageInfo storageInfo = tableInfo.getSerde();
final String owner = (storageInfo != null && storageInfo.getOwner() != null) ? storageInfo.getOwner() : "";
final AuditInfo auditInfo = tableInfo.getAudit();
final int createTime = (auditInfo != null && auditInfo.getCreatedDate() != null) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final Map<String, String> params = (tableInfo.getMetadata() != null) ? tableInfo.getMetadata() : new HashMap<>();
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> partitionFields = Collections.emptyList();
List<FieldSchema> nonPartitionFields = Collections.emptyList();
if (fields != null) {
nonPartitionFields = Lists.newArrayListWithCapacity(fields.size());
partitionFields = Lists.newArrayListWithCapacity(fields.size());
for (FieldInfo fieldInfo : fields) {
if (fieldInfo.isPartitionKey()) {
partitionFields.add(metacatToHiveField(fieldInfo));
} else {
nonPartitionFields.add(metacatToHiveField(fieldInfo));
}
}
}
final StorageDescriptor sd = fromStorageInfo(storageInfo, nonPartitionFields);
final ViewInfo viewInfo = tableInfo.getView();
final String tableType = (null != viewInfo && !Strings.isNullOrEmpty(viewInfo.getViewOriginalText())) ? TableType.VIRTUAL_VIEW.name() : TableType.EXTERNAL_TABLE.name();
return new Table(tableName, databaseName, owner, createTime, 0, 0, sd, partitionFields, params, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewOriginalText() : null, tableType.equals(TableType.VIRTUAL_VIEW.name()) ? tableInfo.getView().getViewExpandedText() : null, tableType);
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getIcebergPartitionInfos.
/**
* get iceberg table partition summary.
*
* @param tableInfo table info
* @param partitionsRequest partition request
* @return iceberg partition name and metrics mapping
*/
private List<PartitionInfo> getIcebergPartitionInfos(final TableInfo tableInfo, final PartitionListRequest partitionsRequest) {
final QualifiedName tableName = tableInfo.getName();
final org.apache.iceberg.Table icebergTable = this.icebergTableHandler.getIcebergTable(tableName, HiveTableUtil.getIcebergTableMetadataLocation(tableInfo), false).getTable();
final Pageable pageable = partitionsRequest.getPageable();
final Map<String, ScanSummary.PartitionMetrics> partitionMap = icebergTableHandler.getIcebergTablePartitionMap(tableName, partitionsRequest, icebergTable);
final List<String> partitionIds = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final AuditInfo tableAuditInfo = tableInfo.getAudit();
final List<PartitionInfo> filteredPartitionList = partitionMap.keySet().stream().filter(partitionName -> partitionIds == null || partitionIds.contains(partitionName)).map(partitionName -> PartitionInfo.builder().name(QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), partitionName)).serde(StorageInfo.builder().uri(getIcebergPartitionURI(tableName.getDatabaseName(), tableName.getTableName(), partitionName, partitionMap.get(partitionName).dataTimestampMillis())).build()).dataMetrics(icebergTableHandler.getDataMetadataFromIcebergMetrics(partitionMap.get(partitionName))).auditInfo(AuditInfo.builder().createdBy(tableAuditInfo.getCreatedBy()).createdDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis())).lastModifiedDate(fromEpochMilliToDate(partitionMap.get(partitionName).dataTimestampMillis())).build()).build()).collect(Collectors.toList());
if (sort != null) {
if (sort.hasSort() && sort.getSortBy().equalsIgnoreCase(DirectSqlGetPartition.FIELD_DATE_CREATED)) {
final Comparator<PartitionInfo> dateCreatedComparator = Comparator.comparing(p -> p.getAudit() != null ? p.getAudit().getCreatedDate() : null, Comparator.nullsLast(Date::compareTo));
ConnectorUtils.sort(filteredPartitionList, sort, dateCreatedComparator);
} else {
// Sort using the partition name by default
final Comparator<PartitionInfo> nameComparator = Comparator.comparing(p -> p.getName().toString());
ConnectorUtils.sort(filteredPartitionList, sort, nameComparator);
}
}
return ConnectorUtils.paginate(filteredPartitionList, pageable);
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class MySqlConnectorTableService method setTableInfoDetails.
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try (final PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getTableName());
try (final ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo = AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATE_TIME)).lastModifiedDate(resultSet.getDate(COL_UPDATE_TIME)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class SnowflakeConnectorTableService method setTableInfoDetails.
/**
* {@inheritDoc}
*/
@Override
protected void setTableInfoDetails(final Connection connection, final TableInfo tableInfo) {
final QualifiedName tableName = getSnowflakeName(tableInfo.getName());
try (PreparedStatement statement = connection.prepareStatement(SQL_GET_AUDIT_INFO)) {
statement.setString(1, tableName.getDatabaseName());
statement.setString(2, tableName.getDatabaseName());
statement.setString(3, tableName.getTableName());
try (ResultSet resultSet = statement.executeQuery()) {
if (resultSet.next()) {
final AuditInfo auditInfo = AuditInfo.builder().createdDate(resultSet.getDate(COL_CREATED)).lastModifiedDate(resultSet.getDate(COL_LAST_ALTERED)).build();
tableInfo.setAudit(auditInfo);
}
}
} catch (final Exception ignored) {
log.info("Ignoring. Error getting the audit info for table {}", tableName);
}
}
Aggregations