use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class DatabaseServiceImpl method create.
@Override
public DatabaseDto create(@Nonnull final QualifiedName name, @Nonnull final DatabaseDto dto) {
validate(name);
log.info("Creating schema {}", name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
eventBus.postSync(new MetacatCreateDatabasePreEvent(name, metacatRequestContext, this));
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
connectorManager.getDatabaseService(name.getCatalogName()).create(connectorContext, converterUtil.fromDatabaseDto(dto));
if (dto.getDefinitionMetadata() != null) {
log.info("Saving user metadata for schema {}", name);
userMetadataService.saveDefinitionMetadata(name, metacatRequestContext.getUserName(), Optional.of(dto.getDefinitionMetadata()), true);
}
final DatabaseDto createdDto = get(name, dto.getDefinitionMetadata() != null);
eventBus.postAsync(new MetacatCreateDatabasePostEvent(name, metacatRequestContext, this, createdDto));
return createdDto;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class DatabaseServiceImpl method get.
@Override
public DatabaseDto get(@Nonnull final QualifiedName name, final boolean includeUserMetadata) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final MetacatCatalogConfig config = connectorManager.getCatalogConfig(name.getCatalogName());
final ConnectorDatabaseService service = connectorManager.getDatabaseService(name.getCatalogName());
final ConnectorTableService tableService = connectorManager.getTableService(name.getCatalogName());
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<QualifiedName> tableNames = tableService.listNames(connectorContext, name, null, null, null);
List<QualifiedName> viewNames = Collections.emptyList();
if (config.isIncludeViewsWithTables()) {
// TODO JdbcMetadata returns ImmutableList.of() for views. We should change it to fetch views.
try {
viewNames = service.listViewNames(connectorContext, name);
} catch (UnsupportedOperationException ignored) {
}
}
// Check to see if schema exists
if (tableNames.isEmpty() && viewNames.isEmpty() && !exists(name)) {
throw new DatabaseNotFoundException(name);
}
final DatabaseDto dto = converterUtil.toDatabaseDto(service.get(connectorContext, name));
dto.setType(connectorManager.getCatalogConfig(name).getType());
dto.setTables(Stream.concat(tableNames.stream(), viewNames.stream()).map(QualifiedName::getTableName).sorted(String.CASE_INSENSITIVE_ORDER).collect(Collectors.toList()));
if (includeUserMetadata) {
log.info("Populate user metadata for schema {}", name);
userMetadataService.populateMetadata(dto);
}
return dto;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PartitionServiceImpl method list.
@Override
public List<PartitionDto> list(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable, final boolean includeUserDefinitionMetadata, final boolean includeUserDataMetadata, final boolean includePartitionDetails) {
if (Strings.isNullOrEmpty(filter) && (pageable == null || !pageable.isPageable()) && (partitionNames == null || partitionNames.isEmpty()) && config.getQualifiedNamesToThrowErrorWhenNoFilterOnListPartitions().contains(name)) {
throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
}
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setIncludePartitionDetails(includePartitionDetails);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> resultInfo = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
List<PartitionDto> result = Lists.newArrayList();
if (resultInfo != null && !resultInfo.isEmpty()) {
result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
final List<QualifiedName> names = Lists.newArrayList();
final List<String> uris = Lists.newArrayList();
result.forEach(partitionDto -> {
names.add(partitionDto.getName());
uris.add(partitionDto.getDataUri());
});
registry.gauge(this.partitionGetCountId.withTags(new HashMap<>(name.parts())), result.size());
log.info("Got {} partitions for {} using filter: {} and partition names: {}", result.size(), name, filter, partitionNames);
if (includeUserDefinitionMetadata || includeUserDataMetadata) {
final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata ? userMetadataService.getDefinitionMetadataMap(names) : Maps.newHashMap()));
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata ? userMetadataService.getDataMetadataMap(uris) : Maps.newHashMap()));
try {
final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures).get(1, TimeUnit.HOURS);
final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto, definitionMetadataMap.get(partitionDto.getName().toString()), dataMetadataMap.get(partitionDto.getDataUri())));
} catch (Exception e) {
Throwables.propagate(e);
}
}
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class S3ConnectorPartitionService method savePartitions.
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorContext context, @Nonnull final QualifiedName tableName, @Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
Aggregations