use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class TableServiceImpl method rename.
@Override
public void rename(@Nonnull final QualifiedName oldName, @Nonnull final QualifiedName newName, final boolean isMView) {
validate(oldName);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorTableService service = connectorManager.getTableService(oldName.getCatalogName());
final TableDto oldTable = get(oldName, true).orElseThrow(() -> new TableNotFoundException(oldName));
if (oldTable != null) {
//Ignore if the operation is not supported, so that we can at least go ahead and save the user metadata
eventBus.postSync(new MetacatRenameTablePreEvent(oldName, metacatRequestContext, this, newName));
try {
log.info("Renaming {} {} to {}", isMView ? "view" : "table", oldName, newName);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
service.rename(connectorContext, oldName, newName);
} catch (UnsupportedOperationException ignored) {
}
userMetadataService.renameDefinitionMetadataKey(oldName, newName);
tagService.rename(oldName, newName.getTableName());
final TableDto dto = get(newName, true).orElseThrow(() -> new IllegalStateException("should exist"));
eventBus.postAsync(new MetacatRenameTablePostEvent(oldName, metacatRequestContext, this, oldTable, dto));
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* {@inheritDoc}.
*/
@Override
public PartitionsSaveResponse savePartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionsSaveRequest partitionsSaveRequest) {
final String databasename = tableName.getDatabaseName();
final String tablename = tableName.getTableName();
// New partitions
final List<Partition> hivePartitions = Lists.newArrayList();
try {
final Table table = metacatHiveClient.getTableByName(databasename, tablename);
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
// Existing partitions
final List<Partition> existingHivePartitions = Lists.newArrayList();
// Existing partition map
Map<String, Partition> existingPartitionMap = Collections.emptyMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionInfos.stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final Partition hivePartition = existingPartitionMap.get(partitionName);
if (hivePartition == null) {
addedPartitionIds.add(partitionName);
hivePartitions.add(hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo));
} else {
//unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
final Partition existingPartition = hiveMetacatConverters.fromPartitionInfo(tableInfo, partitionInfo);
existingPartitionIds.add(partitionName);
existingPartition.setParameters(hivePartition.getParameters());
existingPartition.setCreateTime(hivePartition.getCreateTime());
existingPartition.setLastAccessTime(hivePartition.getLastAccessTime());
existingHivePartitions.add(existingPartition);
}
}
}
final Set<String> deletePartitionIds = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionIds.addAll(existingPartitionIds);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionIds.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
if (partitionsSaveRequest.getAlterIfExists() && !existingHivePartitions.isEmpty()) {
copyTableSdToPartitionSd(existingHivePartitions, table);
metacatHiveClient.alterPartitions(databasename, tablename, existingHivePartitions);
}
copyTableSdToPartitionSd(hivePartitions, table);
metacatHiveClient.addDropPartitions(databasename, tablename, hivePartitions, Lists.newArrayList(deletePartitionIds));
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionIds);
result.setUpdated(existingPartitionIds);
return result;
} catch (NoSuchObjectException exception) {
if (exception.getMessage() != null && exception.getMessage().startsWith("Partition doesn't exist")) {
throw new PartitionNotFoundException(tableName, "", exception);
} else {
throw new TableNotFoundException(tableName, exception);
}
} catch (MetaException | InvalidObjectException exception) {
throw new InvalidMetaException("One or more partitions are invalid.", exception);
} catch (AlreadyExistsException e) {
final List<String> ids = getFakePartitionName(hivePartitions);
throw new PartitionAlreadyExistsException(tableName, ids, e);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed savePartitions hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorDatabaseService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (!qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
}
return ConnectorUtils.paginate(databaseInfos, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", "getPartitionKeys");
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetHandler<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
names.add(name);
}
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
} else {
// Handler for reading the result set
final ResultSetHandler<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
}
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getPartitionNames.
/**
* getPartitionNames.
*
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Override
public Map<String, List<QualifiedName>> getPartitionNames(@Nonnull final ConnectorContext context, @Nonnull final List<String> uris, final boolean prefixSearch) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", HiveMetrics.getPartitionNames.name());
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
// Get data source
final DataSource dataSource = DataSourceManager.get().get(catalogName);
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL_GET_PARTITION_NAMES_BY_URI);
final List<String> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" 1=2");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(uri + "%");
});
} else {
queryBuilder.append(" location in (");
Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
queryBuilder.append(")");
params.addAll(uris);
}
// Handler for reading the result set
final ResultSetHandler<Map<String, List<QualifiedName>>> handler = rs -> {
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String partitionName = rs.getString("partition_name");
final String uri = rs.getString("location");
final List<QualifiedName> partitionNames = result.get(uri);
final QualifiedName qualifiedName = QualifiedName.ofPartition(catalogName, schemaName, tableName, partitionName);
if (partitionNames == null) {
result.put(uri, Lists.newArrayList(qualifiedName));
} else {
partitionNames.add(qualifiedName);
}
}
return result;
};
try (Connection conn = dataSource.getConnection()) {
new QueryRunner().query(conn, queryBuilder.toString(), handler, params.toArray());
} catch (SQLException e) {
Throwables.propagate(e);
} finally {
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getPartitionNames is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
return result;
}
Aggregations