use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class HiveConnectorInfoConverter method fromPartitionInfo.
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partition) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
// can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
// using the table level serialization lib
if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class HiveConnectorFastTableService method update.
/**
* Update a table with the given metadata.
*
* If table is an iceberg table, then lock the table for update so that no other request can update it. If the meta
* information is invalid, then throw an error.
* If table is not an iceberg table, then do a regular table update.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void update(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
if (isIcebergTable(tableInfo)) {
final QualifiedName tableName = tableInfo.getName();
final Long tableId = directSqlTable.getTableId(tableName);
try {
log.debug("Locking Iceberg table {}", tableName);
directSqlTable.lockIcebergTable(tableId, tableName);
try {
final TableInfo existingTableInfo = get(requestContext, tableInfo.getName());
validateIcebergUpdate(existingTableInfo, tableInfo);
final Table existingTable = getHiveMetacatConverters().fromTableInfo(existingTableInfo);
super.update(requestContext, existingTable, tableInfo);
} finally {
directSqlTable.unlockIcebergTable(tableId);
log.debug("Unlocked Iceberg table {}", tableName);
}
} catch (IllegalStateException e) {
throw new TablePreconditionFailedException(tableName, e.getMessage());
}
} else {
super.update(requestContext, tableInfo);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class JdbcConnectorTableService method list.
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(@Nonnull final ConnectorRequestContext context, @Nonnull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
log.debug("Beginning to list table metadata for {} for request {}", name, context);
final ImmutableList.Builder<TableInfo> builder = ImmutableList.builder();
for (final QualifiedName tableName : this.listNames(context, name, prefix, sort, pageable)) {
builder.add(this.get(context, tableName));
}
log.debug("Finished listing table metadata for {} for request {}", name, context);
return builder.build();
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class S3ConnectorTableService method update.
@Override
public void update(@Nonnull final ConnectorRequestContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Update table {}", tableInfo.getName());
final QualifiedName tableName = tableInfo.getName();
final Table table = tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName());
if (table == null) {
throw new TableNotFoundException(tableName);
}
// we can update the fields, the uri, or the full serde
final Location newLocation = infoConverter.toLocation(tableInfo);
Location location = table.getLocation();
if (location == null) {
location = new Location();
location.setTable(table);
table.setLocation(location);
}
if (newLocation.getUri() != null) {
location.setUri(newLocation.getUri());
}
final Info newInfo = newLocation.getInfo();
if (newInfo != null) {
final Info info = location.getInfo();
if (info == null) {
location.setInfo(newInfo);
newInfo.setLocation(location);
} else {
if (newInfo.getInputFormat() != null) {
info.setInputFormat(newInfo.getInputFormat());
}
if (newInfo.getOutputFormat() != null) {
info.setOutputFormat(newInfo.getOutputFormat());
}
if (newInfo.getOwner() != null) {
info.setOwner(newInfo.getOwner());
}
if (newInfo.getSerializationLib() != null) {
info.setSerializationLib(newInfo.getSerializationLib());
}
if (newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()) {
info.setParameters(newInfo.getParameters());
}
}
}
final Schema newSchema = newLocation.getSchema();
if (newSchema != null) {
final List<Field> newFields = newSchema.getFields();
if (newFields != null && !newFields.isEmpty()) {
final Schema schema = location.getSchema();
if (schema == null) {
location.setSchema(newSchema);
newSchema.setLocation(location);
} else {
final List<Field> fields = schema.getFields();
if (fields.isEmpty()) {
newFields.forEach(field -> {
field.setSchema(schema);
fields.add(field);
});
} else {
for (int i = 0; i < newFields.size(); i++) {
final Field newField = newFields.get(i);
newField.setPos(i);
newField.setSchema(schema);
if (newField.getType() == null) {
newField.setType(newField.getSourceType());
}
}
schema.setFields(null);
fieldDao.delete(fields);
tableDao.save(table, true);
schema.setFields(newFields);
}
}
}
}
log.debug("End: Update table {}", tableInfo.getName());
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class CassandraConnectorTableService method get.
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull @NonNull final ConnectorRequestContext context, @Nonnull @NonNull final QualifiedName name) {
final String keyspace = name.getDatabaseName();
final String table = name.getTableName();
log.debug("Attempting to get metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
final TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
if (tableMetadata == null) {
throw new TableNotFoundException(name);
}
final TableInfo tableInfo = this.getTableInfo(name, tableMetadata);
log.debug("Successfully got metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
return tableInfo;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
Aggregations