use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class S3ConnectorTableService method update.
@Override
public void update(@Nonnull final ConnectorContext context, @Nonnull final TableInfo tableInfo) {
log.debug("Start: Update table {}", tableInfo.getName());
final QualifiedName tableName = tableInfo.getName();
final Table table = tableDao.getBySourceDatabaseTableName(catalogName, tableName.getDatabaseName(), tableName.getTableName());
if (table == null) {
throw new TableNotFoundException(tableName);
}
//we can update the fields, the uri, or the full serde
final Location newLocation = infoConverter.toLocation(tableInfo);
Location location = table.getLocation();
if (location == null) {
location = new Location();
location.setTable(table);
table.setLocation(location);
}
if (newLocation.getUri() != null) {
location.setUri(newLocation.getUri());
}
final Info newInfo = newLocation.getInfo();
if (newInfo != null) {
final Info info = location.getInfo();
if (info == null) {
location.setInfo(newInfo);
newInfo.setLocation(location);
} else {
if (newInfo.getInputFormat() != null) {
info.setInputFormat(newInfo.getInputFormat());
}
if (newInfo.getOutputFormat() != null) {
info.setOutputFormat(newInfo.getOutputFormat());
}
if (newInfo.getOwner() != null) {
info.setOwner(newInfo.getOwner());
}
if (newInfo.getSerializationLib() != null) {
info.setSerializationLib(newInfo.getSerializationLib());
}
if (newInfo.getParameters() != null && !newInfo.getParameters().isEmpty()) {
info.setParameters(newInfo.getParameters());
}
}
}
final Schema newSchema = newLocation.getSchema();
if (newSchema != null) {
final List<Field> newFields = newSchema.getFields();
if (newFields != null && !newFields.isEmpty()) {
final Schema schema = location.getSchema();
if (schema == null) {
location.setSchema(newSchema);
newSchema.setLocation(location);
} else {
final List<Field> fields = schema.getFields();
if (fields.isEmpty()) {
newFields.forEach(field -> {
field.setSchema(schema);
fields.add(field);
});
} else {
for (int i = 0; i < newFields.size(); i++) {
final Field newField = newFields.get(i);
newField.setPos(i);
newField.setSchema(schema);
if (newField.getType() == null) {
newField.setType(newField.getSourceType());
}
}
schema.setFields(null);
fieldDao.delete(fields);
tableDao.save(table, true);
schema.setFields(newFields);
}
}
}
}
log.debug("End: Update table {}", tableInfo.getName());
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
try {
final List<Partition> partitions = getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable());
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* {@inheritDoc}.
*/
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
// New partitions
final List<Partition> hivePartitions = Lists.newArrayList();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
// Existing partitions
final List<Partition> existingHivePartitions = Lists.newArrayList();
// Existing partition map
Map<String, Partition> existingPartitionMap = Collections.emptyMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionInfos.stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final Partition hivePartition = existingPartitionMap.get(partitionName);
if (hivePartition == null) {
addedPartitionIds.add(partitionName);
hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
} else {
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
existingPartitionIds.add(partitionName);
existingPartition.setParameters(hivePartition.getParameters());
existingPartition.setCreateTime(hivePartition.getCreateTime());
existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
existingHivePartitions.add(existingPartition);
}
}
}
final Set<String> deletePartitionIds = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionIds.addAll(existingPartitionIds);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
copyTableSdToPartitionSd(existingHivePartitions, table);
metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
}
copyTableSdToPartitionSd(hivePartitions, table);
metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionIds);
result.setUpdated(existingPartitionIds);
return result;
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableName, "", exception);
} else {
throw new TableNotFoundException(tableName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
final List<String> ids = getFakePartitionName(hivePartitions);
throw new PartitionAlreadyExistsException(tableName, ids, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class CassandraConnectorTableService method get.
/**
* {@inheritDoc}
*/
@Override
public TableInfo get(@Nonnull @NonNull final ConnectorContext context, @Nonnull @NonNull final QualifiedName name) {
final String keyspace = name.getDatabaseName();
final String table = name.getTableName();
log.debug("Attempting to get metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
final TableMetadata tableMetadata = keyspaceMetadata.getTable(table);
if (tableMetadata == null) {
throw new TableNotFoundException(name);
}
final TableInfo tableInfo = this.getTableInfo(name, tableMetadata);
log.debug("Successfully got metadata for Cassandra table {}.{} for request {}", keyspace, table, context);
return tableInfo;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
use of com.netflix.metacat.common.server.connectors.model.TableInfo in project metacat by Netflix.
the class CassandraConnectorTableService method list.
/**
* {@inheritDoc}
*/
@Override
public List<TableInfo> list(@Nonnull @NonNull final ConnectorContext context, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
final String keyspace = name.getDatabaseName();
log.debug("Attempting to list tables in Cassandra keyspace {} for request {}", keyspace, context);
try {
final KeyspaceMetadata keyspaceMetadata = this.getCluster().getMetadata().getKeyspace(keyspace);
if (keyspaceMetadata == null) {
throw new DatabaseNotFoundException(name);
}
// TODO: Should we include views?
final List<TableInfo> tables = Lists.newArrayList();
for (final TableMetadata tableMetadata : keyspaceMetadata.getTables()) {
if (prefix != null && !tableMetadata.getName().startsWith(prefix.getTableName())) {
continue;
}
tables.add(this.getTableInfo(name, tableMetadata));
}
// Sort
if (sort != null) {
final Comparator<TableInfo> tableComparator = Comparator.comparing((t) -> t.getName().getTableName());
ConnectorUtils.sort(tables, sort, tableComparator);
}
// Paging
final List<TableInfo> pagedTables = ConnectorUtils.paginate(tables, pageable);
log.debug("Listed {} tables in Cassandra keyspace {} for request {}", pagedTables.size(), keyspace, context);
return pagedTables;
} catch (final DriverException de) {
log.error(de.getMessage(), de);
throw this.getExceptionMapper().toConnectorException(de, name);
}
}
Aggregations