use of com.netflix.metacat.common.MetacatRequestContext in project metacat by Netflix.
the class PartitionServiceImpl method getPartitionUris.
@Override
public List<String> getPartitionUris(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionUris(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
} catch (final UnsupportedOperationException uoe) {
log.info("Catalog {} doesn't support getPartitionUris. Ignoring.", name.getCatalogName());
}
}
return result;
}
use of com.netflix.metacat.common.MetacatRequestContext in project metacat by Netflix.
the class TableServiceImpl method get.
@Override
public Optional<TableDto> get(@Nonnull final QualifiedName name, final boolean includeInfo, final boolean includeDefinitionMetadata, final boolean includeDataMetadata) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final ConnectorTableService service = connectorManager.getTableService(name.getCatalogName());
final TableDto table;
if (includeInfo) {
try {
table = converterUtil.toTableDto(service.get(connectorContext, name));
} catch (NotFoundException ignored) {
return Optional.empty();
}
} else {
table = new TableDto();
table.setName(name);
}
if (includeDefinitionMetadata) {
final Optional<ObjectNode> definitionMetadata = userMetadataService.getDefinitionMetadata(name);
if (definitionMetadata.isPresent()) {
table.setDefinitionMetadata(definitionMetadata.get());
}
}
if (includeDataMetadata) {
TableDto dto = table;
if (!includeInfo) {
try {
dto = converterUtil.toTableDto(service.get(connectorContext, name));
} catch (NotFoundException ignored) {
}
}
if (dto != null && dto.getSerde() != null) {
final Optional<ObjectNode> dataMetadata = userMetadataService.getDataMetadata(dto.getSerde().getUri());
if (dataMetadata.isPresent()) {
table.setDataMetadata(dataMetadata.get());
}
}
}
return Optional.of(table);
}
use of com.netflix.metacat.common.MetacatRequestContext in project metacat by Netflix.
the class MetadataService method processDeletedDataMetadata.
/**
* Deletes all the data metadata marked for deletion.
*/
public void processDeletedDataMetadata() {
// Get the data metadata that were marked deleted a number of days back
// Check if the uri is being used
// If uri is not used then delete the entry from data_metadata
log.info("Start deleting data metadata");
try {
final DateTime priorTo = DateTime.now().minusDays(config.getDataMetadataDeleteMarkerLifetimeInDays());
final int limit = 100000;
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
while (true) {
final List<String> urisToDelete = userMetadataService.getDeletedDataMetadataUris(priorTo.toDate(), 0, limit);
log.info("Count of deleted marked data metadata: {}", urisToDelete.size());
if (urisToDelete.size() > 0) {
final List<String> uris = urisToDelete.parallelStream().filter(uri -> !uri.contains("=")).map(uri -> userMetadataService.getDescendantDataUris(uri)).flatMap(Collection::stream).collect(Collectors.toList());
uris.addAll(urisToDelete);
log.info("Count of deleted marked data metadata (including descendants) : {}", uris.size());
final List<List<String>> subListsUris = Lists.partition(uris, 1000);
subListsUris.parallelStream().forEach(subUris -> {
MetacatContextManager.setContext(metacatRequestContext);
final Map<String, List<QualifiedName>> uriPartitionQualifiedNames = partitionService.getQualifiedNames(subUris, false);
final Map<String, List<QualifiedName>> uriTableQualifiedNames = tableService.getQualifiedNames(subUris, false);
final Map<String, List<QualifiedName>> uriQualifiedNames = Stream.concat(uriPartitionQualifiedNames.entrySet().stream(), uriTableQualifiedNames.entrySet().stream()).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue, (a, b) -> {
final List<QualifiedName> subNames = Lists.newArrayList(a);
subNames.addAll(b);
return subNames;
}));
final List<String> canDeleteMetadataForUris = subUris.parallelStream().filter(s -> !Strings.isNullOrEmpty(s)).filter(s -> uriQualifiedNames.get(s) == null || uriQualifiedNames.get(s).size() == 0).collect(Collectors.toList());
log.info("Start deleting data metadata: {}", canDeleteMetadataForUris.size());
userMetadataService.deleteDataMetadatas(canDeleteMetadataForUris);
userMetadataService.deleteDataMetadataDeletes(subUris);
MetacatContextManager.removeContext();
});
}
if (urisToDelete.size() < limit) {
break;
}
}
} catch (Exception e) {
registry.counter(Metrics.CounterDeleteMetaData.name()).increment();
log.warn("Failed deleting data metadata", e);
}
log.info("End deleting data metadata");
}
use of com.netflix.metacat.common.MetacatRequestContext in project metacat by Netflix.
the class DatabaseServiceImpl method get.
@Override
public DatabaseDto get(@Nonnull final QualifiedName name, final boolean includeUserMetadata) {
validate(name);
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final MetacatCatalogConfig config = connectorManager.getCatalogConfig(name.getCatalogName());
final ConnectorDatabaseService service = connectorManager.getDatabaseService(name.getCatalogName());
final ConnectorTableService tableService = connectorManager.getTableService(name.getCatalogName());
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<QualifiedName> tableNames = tableService.listNames(connectorContext, name, null, null, null);
List<QualifiedName> viewNames = Collections.emptyList();
if (config.isIncludeViewsWithTables()) {
// TODO JdbcMetadata returns ImmutableList.of() for views. We should change it to fetch views.
try {
viewNames = service.listViewNames(connectorContext, name);
} catch (UnsupportedOperationException ignored) {
}
}
// Check to see if schema exists
if (tableNames.isEmpty() && viewNames.isEmpty() && !exists(name)) {
throw new DatabaseNotFoundException(name);
}
final DatabaseDto dto = converterUtil.toDatabaseDto(service.get(connectorContext, name));
dto.setType(connectorManager.getCatalogConfig(name).getType());
dto.setTables(Stream.concat(tableNames.stream(), viewNames.stream()).map(QualifiedName::getTableName).sorted(String.CASE_INSENSITIVE_ORDER).collect(Collectors.toList()));
if (includeUserMetadata) {
log.info("Populate user metadata for schema {}", name);
userMetadataService.populateMetadata(dto);
}
return dto;
}
use of com.netflix.metacat.common.MetacatRequestContext in project metacat by Netflix.
the class PartitionServiceImpl method list.
@Override
public List<PartitionDto> list(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable, final boolean includeUserDefinitionMetadata, final boolean includeUserDataMetadata, final boolean includePartitionDetails) {
if (Strings.isNullOrEmpty(filter) && (pageable == null || !pageable.isPageable()) && (partitionNames == null || partitionNames.isEmpty()) && config.getQualifiedNamesToThrowErrorWhenNoFilterOnListPartitions().contains(name)) {
throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
}
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setIncludePartitionDetails(includePartitionDetails);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> resultInfo = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
List<PartitionDto> result = Lists.newArrayList();
if (resultInfo != null && !resultInfo.isEmpty()) {
result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
final List<QualifiedName> names = Lists.newArrayList();
final List<String> uris = Lists.newArrayList();
result.forEach(partitionDto -> {
names.add(partitionDto.getName());
uris.add(partitionDto.getDataUri());
});
registry.gauge(this.partitionGetCountId.withTags(new HashMap<>(name.parts())), result.size());
log.info("Got {} partitions for {} using filter: {} and partition names: {}", result.size(), name, filter, partitionNames);
if (includeUserDefinitionMetadata || includeUserDataMetadata) {
final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata ? userMetadataService.getDefinitionMetadataMap(names) : Maps.newHashMap()));
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata ? userMetadataService.getDataMetadataMap(uris) : Maps.newHashMap()));
try {
final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures).get(1, TimeUnit.HOURS);
final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto, definitionMetadataMap.get(partitionDto.getName().toString()), dataMetadataMap.get(partitionDto.getDataUri())));
} catch (Exception e) {
Throwables.propagate(e);
}
}
}
return result;
}
Aggregations