use of com.netflix.metacat.common.dto.Pageable in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
use of com.netflix.metacat.common.dto.Pageable in project metacat by Netflix.
the class ElasticSearchMetacatRefresh method _processPartitions.
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(), qNames, excludeQualifiedNames);
final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream().map(s -> service.submit(() -> {
final QualifiedName tableName = QualifiedName.fromString(s, false);
final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
int offset = 0;
int count;
final Sort sort;
if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
sort = new Sort("id", SortOrder.ASC);
} else {
sort = new Sort("part_id", SortOrder.ASC);
}
final Pageable pageable = new Pageable(10000, offset);
do {
final List<PartitionDto> partitionDtos = partitionService.list(tableName, null, null, sort, pageable, true, true, true);
count = partitionDtos.size();
if (!partitionDtos.isEmpty()) {
final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000);
partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos)));
offset = offset + count;
pageable.setOffset(offset);
}
} while (count == 10000);
return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null));
})).collect(Collectors.toList());
final ListenableFuture<Void> processPartitionsFuture = Futures.transformAsync(Futures.successfulAsList(futures), input -> {
final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null));
});
return Futures.transformAsync(processPartitionsFuture, input -> {
elasticSearchUtil.refresh();
final List<ListenableFuture<Void>> cleanUpFutures = tables.stream().map(s -> service.submit(() -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames))).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null));
});
}
Aggregations