use of com.google.common.util.concurrent.Futures in project metacat by Netflix.
the class ElasticSearchRefresh method _processPartitions.
@SuppressWarnings("checkstyle:methodname")
private ListenableFuture<Void> _processPartitions(final List<QualifiedName> qNames) {
final List<QualifiedName> excludeQualifiedNames = config.getElasticSearchRefreshExcludeQualifiedNames();
final List<String> tables = elasticSearchUtil.getTableIdsByCatalogs(ElasticSearchDoc.Type.table.name(), qNames, excludeQualifiedNames);
final List<ListenableFuture<ListenableFuture<Void>>> futures = tables.stream().map(s -> service.submit(() -> {
final QualifiedName tableName = QualifiedName.fromString(s, false);
final List<ListenableFuture<Void>> indexFutures = Lists.newArrayList();
int offset = 0;
int count;
final Sort sort;
if ("s3".equals(tableName.getCatalogName()) || "aegisthus".equals(tableName.getCatalogName())) {
sort = new Sort("id", SortOrder.ASC);
} else {
sort = new Sort("part_id", SortOrder.ASC);
}
final Pageable pageable = new Pageable(10000, offset);
do {
final List<PartitionDto> partitionDtos = partitionService.list(tableName, sort, pageable, true, true, new GetPartitionsRequestDto(null, null, true, true));
count = partitionDtos.size();
if (!partitionDtos.isEmpty()) {
final List<List<PartitionDto>> partitionedPartitionDtos = Lists.partition(partitionDtos, 1000);
partitionedPartitionDtos.forEach(subPartitionsDtos -> indexFutures.add(indexPartitionDtos(tableName, subPartitionsDtos)));
offset = offset + count;
pageable.setOffset(offset);
}
} while (count == 10000);
return Futures.transform(Futures.successfulAsList(indexFutures), Functions.constant((Void) null), defaultService);
})).collect(Collectors.toList());
final ListenableFuture<Void> processPartitionsFuture = Futures.transformAsync(Futures.successfulAsList(futures), input -> {
final List<ListenableFuture<Void>> inputFuturesWithoutNulls = input.stream().filter(NOT_NULL).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(inputFuturesWithoutNulls), Functions.constant(null), defaultService);
}, defaultService);
return Futures.transformAsync(processPartitionsFuture, input -> {
elasticSearchUtil.refresh();
final List<ListenableFuture<Void>> cleanUpFutures = tables.stream().map(s -> service.submit(() -> partitionsCleanUp(QualifiedName.fromString(s, false), excludeQualifiedNames))).collect(Collectors.toList());
return Futures.transform(Futures.successfulAsList(cleanUpFutures), Functions.constant(null), defaultService);
}, defaultService);
}
use of com.google.common.util.concurrent.Futures in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitions.
private List<PartitionHolder> getPartitions(final String databaseName, final String tableName, @Nullable final List<String> partitionIds, @Nullable final String filterExpression, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean includePartitionDetails, final boolean forceDisableAudit) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition() : new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
final List<PartitionHolder> result = Lists.newArrayList();
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
int noOfRows = 0;
while (rs.next()) {
noOfRows++;
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionHolder(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
// Fail if the number of partitions exceeds the threshold limit.
if (result.size() > config.getMaxPartitionsThreshold()) {
registry.counter(registry.createId(HiveMetrics.CounterHiveGetPartitionsExceedThresholdFailure.getMetricName()).withTags(tableQName.parts())).increment();
final String message = String.format("Number of partitions queried for table %s exceeded the threshold %d", tableQName, config.getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
registry.gauge(registry.createId(HiveMetrics.GaugePreExpressionFilterGetPartitionsCount.getMetricName()).withTags(tableQName.parts())).set(noOfRows);
return result;
};
final List<PartitionHolder> partitions = this.getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL.SQL_GET_PARTITIONS, handler, sort, pageable, forceDisableAudit);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionHolder partitionHolder : partitions) {
partIds.add(partitionHolder.getId());
sdIds.add(partitionHolder.getSdId());
serdeIds.add(partitionHolder.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS, "sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
}
ListenableFuture<List<Void>> future = null;
try {
future = Futures.allAsList(futures);
final int getPartitionsDetailsTimeout = Integer.parseInt(configuration.getOrDefault(HiveConfigConstants.GET_PARTITION_DETAILS_TIMEOUT, "120"));
future.get(getPartitionsDetailsTimeout, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
try {
if (future != null) {
future.cancel(true);
}
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partition details.");
}
Throwables.propagate(e);
}
for (PartitionHolder partitionHolder : partitions) {
partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
partitionHolder.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionHolder.getSdId()));
partitionHolder.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));
}
}
return partitions;
}
use of com.google.common.util.concurrent.Futures in project genius by opendaylight.
the class OvsInterfaceConfigAddHelper method addConfiguration.
public List<ListenableFuture<Void>> addConfiguration(ParentRefs parentRefs, Interface interfaceNew) {
List<ListenableFuture<Void>> futures = new ArrayList<>();
// TODO Disentangle the transactions
futures.add(txRunner.callWithNewWriteOnlyTransactionAndSubmit(configTx -> {
futures.add(txRunner.callWithNewWriteOnlyTransactionAndSubmit(operTx -> {
IfTunnel ifTunnel = interfaceNew.getAugmentation(IfTunnel.class);
if (ifTunnel != null) {
addTunnelConfiguration(parentRefs, interfaceNew, ifTunnel, configTx, operTx, futures);
} else {
addVlanConfiguration(interfaceNew, parentRefs, configTx, operTx, futures);
}
}));
}));
return futures;
}
use of com.google.common.util.concurrent.Futures in project intellij by bazelbuild.
the class PrefetchServiceImpl method prefetchFiles.
private ListenableFuture<?> prefetchFiles(Project project, Set<File> excludeDirectories, Collection<File> files, boolean refetchCachedFiles, boolean fetchFileTypes) {
if (files.isEmpty() || !enabled(project)) {
return Futures.immediateFuture(null);
}
if (!refetchCachedFiles) {
long startTime = System.currentTimeMillis();
// ignore recently fetched files
files = files.stream().filter(file -> shouldPrefetch(file, startTime)).collect(Collectors.toList());
}
FileOperationProvider provider = FileOperationProvider.getInstance();
List<ListenableFuture<File>> canonicalFiles = files.stream().map(file -> FetchExecutor.EXECUTOR.submit(() -> toCanonicalFile(provider, file))).collect(Collectors.toList());
List<ListenableFuture<?>> futures = Lists.newArrayList();
for (Prefetcher prefetcher : Prefetcher.EP_NAME.getExtensions()) {
futures.add(prefetcher.prefetchFiles(project, excludeDirectories, canonicalFiles, FetchExecutor.EXECUTOR, fetchFileTypes));
}
return Futures.allAsList(futures);
}
use of com.google.common.util.concurrent.Futures in project intellij by bazelbuild.
the class BlazeIdeInterfaceAspectsImpl method updateState.
@Nullable
static State updateState(Project project, BlazeContext parentContext, @Nullable State prevState, ImmutableMap<File, Long> fileState, BlazeConfigurationHandler configHandler, WorkspaceLanguageSettings workspaceLanguageSettings, ImportRoots importRoots, AspectStrategy aspectStrategy, List<File> newFiles, List<File> removedFiles, boolean mergeWithOldState) {
Result<State> result = Scope.push(parentContext, (ScopedFunction<Result<State>>) context -> {
context.push(new TimingScope("UpdateTargetMap", EventType.Other));
ImmutableMap<File, Long> nextFileState = fileState;
if (mergeWithOldState && prevState != null) {
ImmutableMap.Builder<File, Long> fileStateBuilder = ImmutableMap.<File, Long>builder().putAll(fileState);
for (Map.Entry<File, Long> entry : prevState.fileState.entrySet()) {
if (!fileState.containsKey(entry.getKey())) {
fileStateBuilder.put(entry);
}
}
nextFileState = fileStateBuilder.build();
}
State state = new State();
state.fileState = nextFileState;
state.workspaceLanguageSettings = workspaceLanguageSettings;
state.aspectStrategyName = aspectStrategy.getName();
Map<TargetKey, TargetIdeInfo> targetMap = Maps.newHashMap();
if (prevState != null) {
targetMap.putAll(prevState.targetMap.map());
state.fileToTargetMapKey.putAll(prevState.fileToTargetMapKey);
}
if (!mergeWithOldState) {
for (File removedFile : removedFiles) {
TargetKey key = state.fileToTargetMapKey.remove(removedFile);
if (key != null) {
targetMap.remove(key);
}
}
}
AtomicLong totalSizeLoaded = new AtomicLong(0);
Set<LanguageClass> ignoredLanguages = Sets.newConcurrentHashSet();
ListeningExecutorService executor = BlazeExecutor.getInstance().getExecutor();
List<ListenableFuture<TargetFilePair>> futures = Lists.newArrayList();
for (File file : newFiles) {
futures.add(executor.submit(() -> {
totalSizeLoaded.addAndGet(file.length());
IntellijIdeInfo.TargetIdeInfo message = aspectStrategy.readAspectFile(file);
TargetIdeInfo target = protoToTarget(workspaceLanguageSettings, importRoots, message, ignoredLanguages);
return new TargetFilePair(file, target);
}));
}
Set<TargetKey> newTargets = new HashSet<>();
Set<String> configurations = new LinkedHashSet<>();
configurations.add(configHandler.defaultConfigurationPathComponent);
int duplicateTargetLabels = 0;
try {
for (TargetFilePair targetFilePair : Futures.allAsList(futures).get()) {
if (targetFilePair.target != null) {
File file = targetFilePair.file;
String config = configHandler.getConfigurationPathComponent(file);
configurations.add(config);
TargetKey key = targetFilePair.target.key;
if (targetMap.putIfAbsent(key, targetFilePair.target) == null) {
state.fileToTargetMapKey.forcePut(file, key);
} else {
if (!newTargets.add(key)) {
duplicateTargetLabels++;
}
if (Objects.equals(config, configHandler.defaultConfigurationPathComponent)) {
targetMap.put(key, targetFilePair.target);
state.fileToTargetMapKey.forcePut(file, key);
}
}
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return Result.error(null);
} catch (ExecutionException e) {
return Result.error(e);
}
context.output(PrintOutput.log(String.format("Loaded %d aspect files, total size %dkB", newFiles.size(), totalSizeLoaded.get() / 1024)));
if (duplicateTargetLabels > 0) {
context.output(new PerformanceWarning(String.format("There were %d duplicate rules, built with the following " + "configurations: %s.\nYour IDE sync is slowed down by ~%d%%.", duplicateTargetLabels, configurations, (100 * duplicateTargetLabels / targetMap.size()))));
}
ignoredLanguages.retainAll(LanguageSupport.availableAdditionalLanguages(workspaceLanguageSettings.getWorkspaceType()));
warnIgnoredLanguages(project, context, ignoredLanguages);
state.targetMap = new TargetMap(ImmutableMap.copyOf(targetMap));
return Result.of(state);
});
if (result.error != null) {
logger.error(result.error);
return null;
}
return result.result;
}
Aggregations