use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.
the class S3ConnectorPartitionService method _getConnectorPartitions.
@SuppressWarnings("checkstyle:methodname")
private List<PartitionInfo> _getConnectorPartitions(final QualifiedName tableName, final String filterExpression, final List<String> partitionIds, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
// Support for dateCreated
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
String dateCreatedSqlCriteria = null;
if (hasDateCreated) {
dateCreatedSqlCriteria = getDateCreatedSqlCriteria(filterExpression);
}
// Table
final Table table = getTable(tableName);
final Collection<String> singlePartitionExprs = getSinglePartitionExprs(filterExpression);
final List<Partition> partitions = partitionDao.getPartitions(table.getId(), partitionIds, singlePartitionExprs, dateCreatedSqlCriteria, sort, Strings.isNullOrEmpty(filterExpression) ? pageable : null);
final FilterPartition filter = new FilterPartition();
List<PartitionInfo> result = partitions.stream().filter(partition -> {
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, (partition.getCreatedDate().getTime() / 1000) + "");
}
return Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, partition.getName(), partition.getUri(), isBatched, values);
}).map(partition -> infoConverter.toPartitionInfo(tableName, table, partition)).collect(Collectors.toList());
//
if (pageable != null && pageable.isPageable() && !Strings.isNullOrEmpty(filterExpression)) {
int limit = pageable.getOffset() + pageable.getLimit();
if (result.size() < limit) {
limit = result.size();
}
if (pageable.getOffset() > limit) {
result = Lists.newArrayList();
} else {
result = result.subList(pageable.getOffset(), limit);
}
}
return result;
}
use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.
the class DruidConnectorPartitionService method getPartitions.
/**
* {@inheritDoc}
*/
@Override
public List<PartitionInfo> getPartitions(final ConnectorRequestContext context, final QualifiedName name, final PartitionListRequest partitionsRequest) {
final ObjectNode node = this.druidClient.getAllDataByName(name.getTableName());
final DataSource dataSource = DruidConverterUtil.getDatasourceFromAllSegmentJsonObject(node);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
dataSource.getSegmentList().forEach(p -> partitionInfos.add(this.druidConnectorInfoConverter.getPartitionInfoFromSegment(p)));
return partitionInfos;
}
use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.
the class S3ConnectorPartitionService method savePartitions.
@Override
public PartitionsSaveResponse savePartitions(@Nonnull final ConnectorContext context, @Nonnull final QualifiedName tableName, @Nonnull final PartitionsSaveRequest partitionsSaveRequest) {
log.debug("Start: Save partitions for table {}", tableName);
// Table
final Table table = getTable(tableName);
// New partition ids
final List<String> addedPartitionIds = Lists.newArrayList();
// Updated partition ids
final List<String> existingPartitionIds = Lists.newArrayList();
//
Map<String, Partition> existingPartitionMap = Maps.newHashMap();
if (partitionsSaveRequest.getCheckIfExists()) {
final List<String> partitionNames = partitionsSaveRequest.getPartitions().stream().map(partition -> {
final String partitionName = partition.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, infoConverter.partitionKeys(table));
return partitionName;
}).collect(Collectors.toList());
existingPartitionMap = getPartitionsByNames(table.getId(), partitionNames);
}
// New partitions
final List<Partition> s3Partitions = Lists.newArrayList();
for (PartitionInfo partition : partitionsSaveRequest.getPartitions()) {
final String partitionName = partition.getName().getPartitionName();
final Partition s3Partition = existingPartitionMap.get(partitionName);
if (s3Partition == null) {
addedPartitionIds.add(partitionName);
s3Partitions.add(infoConverter.toPartition(table, partition));
} else {
final String partitionUri = infoConverter.getUri(partition);
final String s3PartitionUri = s3Partition.getUri();
if (partitionUri != null && !partitionUri.equals(s3PartitionUri)) {
s3Partition.setUri(partitionUri);
existingPartitionIds.add(partitionName);
s3Partitions.add(s3Partition);
}
}
}
final List<String> partitionIdsForDeletes = partitionsSaveRequest.getPartitionIdsForDeletes();
if (partitionIdsForDeletes != null && !partitionIdsForDeletes.isEmpty()) {
partitionDao.deleteByNames(catalogName, tableName.getDatabaseName(), tableName.getTableName(), partitionIdsForDeletes);
}
partitionDao.save(s3Partitions);
log.debug("End: Save partitions for table {}", tableName);
return PartitionsSaveResponse.builder().added(addedPartitionIds).updated(existingPartitionIds).build();
}
use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.
the class PartitionServiceImpl method list.
@Override
public List<PartitionDto> list(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable, final boolean includeUserDefinitionMetadata, final boolean includeUserDataMetadata, final boolean includePartitionDetails) {
if (Strings.isNullOrEmpty(filter) && (pageable == null || !pageable.isPageable()) && (partitionNames == null || partitionNames.isEmpty()) && config.getQualifiedNamesToThrowErrorWhenNoFilterOnListPartitions().contains(name)) {
throw new IllegalArgumentException(String.format("No filter or limit specified for table %s", name));
}
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setIncludePartitionDetails(includePartitionDetails);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> resultInfo = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
List<PartitionDto> result = Lists.newArrayList();
if (resultInfo != null && !resultInfo.isEmpty()) {
result = resultInfo.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
final List<QualifiedName> names = Lists.newArrayList();
final List<String> uris = Lists.newArrayList();
result.forEach(partitionDto -> {
names.add(partitionDto.getName());
uris.add(partitionDto.getDataUri());
});
registry.gauge(this.partitionGetCountId.withTags(new HashMap<>(name.parts())), result.size());
log.info("Got {} partitions for {} using filter: {} and partition names: {}", result.size(), name, filter, partitionNames);
if (includeUserDefinitionMetadata || includeUserDataMetadata) {
final List<ListenableFuture<Map<String, ObjectNode>>> futures = Lists.newArrayList();
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDefinitionMetadata ? userMetadataService.getDefinitionMetadataMap(names) : Maps.newHashMap()));
futures.add(threadServiceManager.getExecutor().submit(() -> includeUserDataMetadata ? userMetadataService.getDataMetadataMap(uris) : Maps.newHashMap()));
try {
final List<Map<String, ObjectNode>> metadataResults = Futures.successfulAsList(futures).get(1, TimeUnit.HOURS);
final Map<String, ObjectNode> definitionMetadataMap = metadataResults.get(0);
final Map<String, ObjectNode> dataMetadataMap = metadataResults.get(1);
result.forEach(partitionDto -> userMetadataService.populateMetadata(partitionDto, definitionMetadataMap.get(partitionDto.getName().toString()), dataMetadataMap.get(partitionDto.getDataUri())));
} catch (Exception e) {
Throwables.propagate(e);
}
}
}
return result;
}
use of com.netflix.metacat.common.server.connectors.model.PartitionInfo in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
try {
final List<Partition> partitions = getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable());
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
Aggregations