use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class MysqlUserMetadataService method deleteDefinitionMetadatas.
@Override
public void deleteDefinitionMetadatas(@Nonnull final List<QualifiedName> names) {
try {
final Connection conn = poolingDataSource.getConnection();
try {
final List<List<QualifiedName>> subLists = Lists.partition(names, config.getUserMetadataMaxInClauseItems());
for (List<QualifiedName> subNames : subLists) {
_deleteDefinitionMetadatas(conn, subNames);
}
conn.commit();
} catch (SQLException e) {
conn.rollback();
throw e;
} finally {
conn.close();
}
} catch (SQLException e) {
log.error("Sql exception", e);
throw new UserMetadataServiceException(String.format("Failed deleting the definition metadata for %s", names), e);
}
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class JdbcConnectorDatabaseService method listNames.
/**
* {@inheritDoc}
*/
@Override
public List<QualifiedName> listNames(@Nonnull final ConnectorContext context, @Nonnull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database names for catalog {} for request {}", catalogName, context);
try (final Connection connection = this.dataSource.getConnection()) {
final DatabaseMetaData metaData = connection.getMetaData();
final List<QualifiedName> names = Lists.newArrayList();
try (final ResultSet schemas = prefix == null || StringUtils.isEmpty(prefix.getDatabaseName()) ? metaData.getSchemas() : metaData.getSchemas(connection.getCatalog(), prefix.getDatabaseName() + JdbcConnectorUtils.MULTI_CHARACTER_SEARCH)) {
while (schemas.next()) {
final String schemaName = schemas.getString("TABLE_SCHEM").toLowerCase(Locale.ENGLISH);
// skip internal schemas
if (!schemaName.equals("information_schema")) {
names.add(QualifiedName.ofDatabase(name.getCatalogName(), schemaName));
}
}
}
// Does user want sorting?
if (sort != null) {
// We can only really sort by the database name at this level so ignore SortBy field
final Comparator<QualifiedName> comparator = Comparator.comparing(QualifiedName::getDatabaseName);
JdbcConnectorUtils.sort(names, sort, comparator);
}
// Does user want pagination?
final List<QualifiedName> results = JdbcConnectorUtils.paginate(names, pageable);
log.debug("Finished listing database names for catalog {} for request {}", catalogName, context);
return results;
} catch (final SQLException se) {
throw this.exceptionMapper.toConnectorException(se, name);
}
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class JdbcConnectorDatabaseService method list.
/**
* {@inheritDoc}
*/
@Override
public List<DatabaseInfo> list(@Nonnull final ConnectorContext context, @Nonnull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String catalogName = name.getCatalogName();
log.debug("Beginning to list database metadata for catalog {} for request {}", catalogName, context);
final ImmutableList.Builder<DatabaseInfo> builder = ImmutableList.builder();
for (final QualifiedName dbName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, dbName));
}
log.debug("Finished listing database metadata for catalog {} for request {}", catalogName, context);
return builder.build();
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class HiveConnectorInfoConverter method toTableInfo.
/**
* Converts to TableDto.
*
* @param table connector table
* @return Metacat table Info
*/
@Override
public TableInfo toTableInfo(@Nonnull @NonNull final QualifiedName name, @Nonnull @NonNull final Table table) {
final List<FieldSchema> nonPartitionColumns = (table.getSd() != null) ? table.getSd().getCols() : Collections.emptyList();
//ignore all exceptions
try {
if (nonPartitionColumns.isEmpty()) {
for (StructField field : HiveTableUtil.getTableStructFields(table)) {
final FieldSchema fieldSchema = new FieldSchema(field.getFieldName(), field.getFieldObjectInspector().getTypeName(), field.getFieldComment());
nonPartitionColumns.add(fieldSchema);
}
}
} catch (final Exception e) {
log.error(e.getMessage(), e);
}
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final Date creationDate = table.isSetCreateTime() ? epochSecondsToDate(table.getCreateTime()) : null;
final List<FieldInfo> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream().map(field -> hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
partitionColumns.stream().map(field -> hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
final AuditInfo auditInfo = AuditInfo.builder().createdDate(creationDate).build();
return TableInfo.builder().serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields).metadata(table.getParameters()).name(name).auditInfo(auditInfo).build();
}
use of com.netflix.metacat.common.QualifiedName in project metacat by Netflix.
the class HiveConnectorInfoConverter method fromPartitionInfo.
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(@Nonnull @NonNull final TableInfo tableInfo, @Nonnull @NonNull final PartitionInfo partition) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
//can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
//using the table level serialization lib
if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
Aggregations