use of com.google.common.util.concurrent.Futures in project druid by druid-io.
the class SeekableStreamSupervisor method verifyAndMergeCheckpoints.
/**
* This method does two things -
* 1. Makes sure the checkpoints information in the taskGroup is consistent with that of the tasks, if not kill
* inconsistent tasks.
* 2. truncates the checkpoints in the taskGroup corresponding to which segments have been published, so that any newly
* created tasks for the taskGroup start indexing from after the latest published sequences.
*/
private void verifyAndMergeCheckpoints(final TaskGroup taskGroup) {
final int groupId = taskGroup.groupId;
final List<Pair<String, TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> taskSequences = new ArrayList<>();
final List<ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>>> futures = new ArrayList<>();
final List<String> taskIds = new ArrayList<>();
for (String taskId : taskGroup.taskIds()) {
final ListenableFuture<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> checkpointsFuture = taskClient.getCheckpointsAsync(taskId, true);
futures.add(checkpointsFuture);
taskIds.add(taskId);
}
try {
List<TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>>> futuresResult = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int i = 0; i < futuresResult.size(); i++) {
final TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> checkpoints = futuresResult.get(i);
final String taskId = taskIds.get(i);
if (checkpoints == null) {
try {
// catch the exception in failed futures
futures.get(i).get();
} catch (Exception e) {
stateManager.recordThrowableEvent(e);
log.error(e, "Problem while getting checkpoints for task [%s], killing the task", taskId);
killTask(taskId, "Exception[%s] while getting checkpoints", e.getClass());
taskGroup.tasks.remove(taskId);
}
} else if (checkpoints.isEmpty()) {
log.warn("Ignoring task [%s], as probably it is not started running yet", taskId);
} else {
taskSequences.add(new Pair<>(taskId, checkpoints));
}
}
} catch (Exception e) {
throw new RuntimeException(e);
}
final DataSourceMetadata rawDataSourceMetadata = indexerMetadataStorageCoordinator.retrieveDataSourceMetadata(dataSource);
if (rawDataSourceMetadata != null && !checkSourceMetadataMatch(rawDataSourceMetadata)) {
throw new IAE("Datasource metadata instance does not match required, found instance of [%s]", rawDataSourceMetadata.getClass());
}
@SuppressWarnings("unchecked") final SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType> latestDataSourceMetadata = (SeekableStreamDataSourceMetadata<PartitionIdType, SequenceOffsetType>) rawDataSourceMetadata;
final boolean hasValidOffsetsFromDb = latestDataSourceMetadata != null && latestDataSourceMetadata.getSeekableStreamSequenceNumbers() != null && ioConfig.getStream().equals(latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getStream());
final Map<PartitionIdType, SequenceOffsetType> latestOffsetsFromDb;
if (hasValidOffsetsFromDb) {
latestOffsetsFromDb = latestDataSourceMetadata.getSeekableStreamSequenceNumbers().getPartitionSequenceNumberMap();
} else {
latestOffsetsFromDb = null;
}
// order tasks of this taskGroup by the latest sequenceId
taskSequences.sort((o1, o2) -> o2.rhs.firstKey().compareTo(o1.rhs.firstKey()));
final Set<String> tasksToKill = new HashSet<>();
final AtomicInteger earliestConsistentSequenceId = new AtomicInteger(-1);
int taskIndex = 0;
while (taskIndex < taskSequences.size()) {
TreeMap<Integer, Map<PartitionIdType, SequenceOffsetType>> taskCheckpoints = taskSequences.get(taskIndex).rhs;
String taskId = taskSequences.get(taskIndex).lhs;
if (earliestConsistentSequenceId.get() == -1) {
// store
if (taskCheckpoints.entrySet().stream().anyMatch(sequenceCheckpoint -> sequenceCheckpoint.getValue().entrySet().stream().allMatch(partitionOffset -> {
OrderedSequenceNumber<SequenceOffsetType> sequence = makeSequenceNumber(partitionOffset.getValue());
OrderedSequenceNumber<SequenceOffsetType> latestOffset = makeSequenceNumber(latestOffsetsFromDb == null ? partitionOffset.getValue() : latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(), partitionOffset.getValue()));
return sequence.compareTo(latestOffset) == 0;
}) && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey())) || (pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() > 0 && earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
final SortedMap<Integer, Map<PartitionIdType, SequenceOffsetType>> latestCheckpoints = new TreeMap<>(taskCheckpoints.tailMap(earliestConsistentSequenceId.get()));
log.info("Setting taskGroup sequences to [%s] for group [%d]", latestCheckpoints, groupId);
taskGroup.checkpointSequences.clear();
taskGroup.checkpointSequences.putAll(latestCheckpoints);
} else {
log.debug("Adding task [%s] to kill list, checkpoints[%s], latestoffsets from DB [%s]", taskId, taskCheckpoints, latestOffsetsFromDb);
tasksToKill.add(taskId);
}
} else {
// check consistency with taskGroup sequences
if (taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()) == null || !(taskCheckpoints.get(taskGroup.checkpointSequences.firstKey()).equals(taskGroup.checkpointSequences.firstEntry().getValue())) || taskCheckpoints.tailMap(taskGroup.checkpointSequences.firstKey()).size() != taskGroup.checkpointSequences.size()) {
log.debug("Adding task [%s] to kill list, checkpoints[%s], taskgroup checkpoints [%s]", taskId, taskCheckpoints, taskGroup.checkpointSequences);
tasksToKill.add(taskId);
}
}
taskIndex++;
}
if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) || (taskGroup.tasks.size() == 0 && pendingCompletionTaskGroups.getOrDefault(groupId, new CopyOnWriteArrayList<>()).size() == 0)) {
// killing all tasks or no task left in the group ?
// clear state about the taskgroup so that get latest sequence information is fetched from metadata store
log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
activelyReadingTaskGroups.remove(groupId);
for (PartitionIdType partitionId : taskGroup.startingSequences.keySet()) {
partitionOffsets.put(partitionId, getNotSetMarker());
}
}
taskSequences.stream().filter(taskIdSequences -> tasksToKill.contains(taskIdSequences.lhs)).forEach(sequenceCheckpoint -> {
killTask(sequenceCheckpoint.lhs, "Killing task [%s], as its checkpoints [%s] are not consistent with group checkpoints[%s] or latest " + "persisted sequences in metadata store [%s]", sequenceCheckpoint.lhs, sequenceCheckpoint.rhs, taskGroup.checkpointSequences, latestOffsetsFromDb);
taskGroup.tasks.remove(sequenceCheckpoint.lhs);
});
}
use of com.google.common.util.concurrent.Futures in project druid by druid-io.
the class SeekableStreamSupervisor method checkTaskDuration.
private void checkTaskDuration() throws ExecutionException, InterruptedException, TimeoutException {
final List<ListenableFuture<Map<PartitionIdType, SequenceOffsetType>>> futures = new ArrayList<>();
final List<Integer> futureGroupIds = new ArrayList<>();
for (Entry<Integer, TaskGroup> entry : activelyReadingTaskGroups.entrySet()) {
Integer groupId = entry.getKey();
TaskGroup group = entry.getValue();
// find the longest running task from this group
DateTime earliestTaskStart = DateTimes.nowUtc();
for (TaskData taskData : group.tasks.values()) {
if (taskData.startTime != null && earliestTaskStart.isAfter(taskData.startTime)) {
earliestTaskStart = taskData.startTime;
}
}
boolean stopTasksEarly = false;
if (earlyStopTime != null && (earlyStopTime.isBeforeNow() || earlyStopTime.isEqualNow())) {
log.info("Early stop requested - signalling tasks to complete");
earlyStopTime = null;
stopTasksEarly = true;
}
// if this task has run longer than the configured duration, signal all tasks in the group to persist
if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow() || stopTasksEarly) {
log.info("Task group [%d] has run for [%s]", groupId, ioConfig.getTaskDuration());
futureGroupIds.add(groupId);
futures.add(checkpointTaskGroup(group, true));
}
}
List<Map<PartitionIdType, SequenceOffsetType>> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int j = 0; j < results.size(); j++) {
Integer groupId = futureGroupIds.get(j);
TaskGroup group = activelyReadingTaskGroups.get(groupId);
Map<PartitionIdType, SequenceOffsetType> endOffsets = results.get(j);
if (endOffsets != null) {
// set a timeout and put this group in pendingCompletionTaskGroups so that it can be monitored for completion
group.completionTimeout = DateTimes.nowUtc().plus(ioConfig.getCompletionTimeout());
pendingCompletionTaskGroups.computeIfAbsent(groupId, k -> new CopyOnWriteArrayList<>()).add(group);
boolean endOffsetsAreInvalid = false;
for (Entry<PartitionIdType, SequenceOffsetType> entry : endOffsets.entrySet()) {
if (entry.getValue().equals(getEndOfPartitionMarker())) {
log.info("Got end of partition marker for partition [%s] in checkTaskDuration, not updating partition offset.", entry.getKey());
endOffsetsAreInvalid = true;
}
}
// from metadata. If any endOffset values are invalid, we treat the entire set as invalid as a safety measure.
if (!endOffsetsAreInvalid) {
for (Entry<PartitionIdType, SequenceOffsetType> entry : endOffsets.entrySet()) {
partitionOffsets.put(entry.getKey(), entry.getValue());
}
} else {
for (Entry<PartitionIdType, SequenceOffsetType> entry : endOffsets.entrySet()) {
partitionOffsets.put(entry.getKey(), getNotSetMarker());
}
}
} else {
for (String id : group.taskIds()) {
killTask(id, "All tasks in group [%s] failed to transition to publishing state", groupId);
}
// if tasks did some successful incremental handoffs
for (PartitionIdType partitionId : group.startingSequences.keySet()) {
partitionOffsets.put(partitionId, getNotSetMarker());
}
}
// remove this task group from the list of current task groups now that it has been handled
activelyReadingTaskGroups.remove(groupId);
}
}
use of com.google.common.util.concurrent.Futures in project intellij by bazelbuild.
the class PackageLister method expandPackageTargets.
/**
* Expands all-in-package-recursive wildcard targets into all-in-single-package targets by
* traversing the file system, looking for child blaze packages.
*
* <p>Returns null if directory traversal failed or was cancelled.
*/
@Nullable
static Map<TargetExpression, List<TargetExpression>> expandPackageTargets(BuildSystemProvider provider, BlazeContext context, WorkspacePathResolver pathResolver, Collection<WildcardTargetPattern> wildcardPatterns) {
List<ListenableFuture<Entry<TargetExpression, List<TargetExpression>>>> futures = Lists.newArrayList();
for (WildcardTargetPattern pattern : wildcardPatterns) {
if (!pattern.isRecursive() || pattern.toString().startsWith("-")) {
continue;
}
File dir = pathResolver.resolveToFile(pattern.getBasePackage());
if (!FileOperationProvider.getInstance().isDirectory(dir)) {
continue;
}
futures.add(FetchExecutor.EXECUTOR.submit(() -> {
List<TargetExpression> expandedTargets = new ArrayList<>();
traversePackageRecursively(provider, pathResolver, dir, expandedTargets);
return Maps.immutableEntry(pattern.originalPattern, expandedTargets);
}));
}
if (futures.isEmpty()) {
return ImmutableMap.of();
}
FutureResult<List<Entry<TargetExpression, List<TargetExpression>>>> result = FutureUtil.waitForFuture(context, Futures.allAsList(futures)).withProgressMessage("Expanding wildcard target patterns...").timed("ExpandWildcardTargets", EventType.Other).onError("Expanding wildcard target patterns failed").run();
if (!result.success()) {
return null;
}
return result.result().stream().collect(Collectors.toMap(Entry::getKey, Entry::getValue, (x, y) -> x));
}
use of com.google.common.util.concurrent.Futures in project netvirt by opendaylight.
the class InterfaceStateChangeListener method add.
@Override
// TODO Clean up the exception handling
@SuppressWarnings("checkstyle:IllegalCatch")
public void add(InstanceIdentifier<Interface> identifier, Interface intrf) {
try {
if (L2vlan.class.equals(intrf.getType())) {
LOG.info("VPN Interface add event - intfName {} from InterfaceStateChangeListener", intrf.getName());
jobCoordinator.enqueueJob("VPNINTERFACE-" + intrf.getName(), () -> {
List<ListenableFuture<?>> futures = new ArrayList<>(3);
futures.add(txRunner.callWithNewReadWriteTransactionAndSubmit(CONFIGURATION, writeInvTxn -> {
// map of prefix and vpn name used, as entry in prefix-to-interface datastore
// is prerequisite for refresh Fib to avoid race condition leading to missing remote next hop
// in bucket actions on bgp-vpn delete
Map<String, Set<String>> mapOfRdAndPrefixesForRefreshFib = new HashMap<>();
ListenableFuture<?> configFuture = txRunner.callWithNewWriteOnlyTransactionAndSubmit(CONFIGURATION, writeConfigTxn -> {
ListenableFuture<?> operFuture = txRunner.callWithNewWriteOnlyTransactionAndSubmit(OPERATIONAL, writeOperTxn -> {
final String interfaceName = intrf.getName();
LOG.info("Detected interface add event for interface {}", interfaceName);
final VpnInterface vpnIf = vpnUtil.getConfiguredVpnInterface(interfaceName);
if (vpnIf != null) {
for (VpnInstanceNames vpnInterfaceVpnInstance : vpnIf.nonnullVpnInstanceNames().values()) {
String vpnName = vpnInterfaceVpnInstance.getVpnName();
String primaryRd = vpnUtil.getPrimaryRd(vpnName);
if (!vpnInterfaceManager.isVpnInstanceReady(vpnName)) {
LOG.info("VPN Interface add event - intfName {} onto vpnName {} " + "running oper-driven, VpnInstance not ready, holding" + " on", vpnIf.getName(), vpnName);
} else if (vpnUtil.isVpnPendingDelete(primaryRd)) {
LOG.error("add: Ignoring addition of vpnInterface {}, as" + " vpnInstance {} with primaryRd {} is already marked for" + " deletion", interfaceName, vpnName, primaryRd);
} else {
Uint64 intfDpnId = Uint64.ZERO;
try {
intfDpnId = InterfaceUtils.getDpIdFromInterface(intrf);
} catch (Exception e) {
LOG.error("Unable to retrieve dpnId for interface {}. " + "Process vpn interface add failed", intrf.getName(), e);
return;
}
LOG.error("InterfaceStateChangeListener- Processing ifState" + " {} add event with dpnId {}", intrf.getName(), intfDpnId);
final Uint64 dpnId = intfDpnId;
final int ifIndex = intrf.getIfIndex();
LOG.info("VPN Interface add event - intfName {} onto vpnName {}" + " running oper-driven", vpnIf.getName(), vpnName);
Set<String> prefixes = new HashSet<>();
vpnInterfaceManager.processVpnInterfaceUp(dpnId, vpnIf, primaryRd, ifIndex, false, writeConfigTxn, writeOperTxn, writeInvTxn, intrf, vpnName, prefixes);
mapOfRdAndPrefixesForRefreshFib.put(primaryRd, prefixes);
}
}
}
});
futures.add(operFuture);
// Synchronous submit of operTxn
operFuture.get();
});
Futures.addCallback(configFuture, new VpnInterfaceCallBackHandler(mapOfRdAndPrefixesForRefreshFib), MoreExecutors.directExecutor());
futures.add(configFuture);
// TODO: Allow immediateFailedFuture from writeCfgTxn to cancel writeInvTxn as well.
Futures.addCallback(configFuture, new PostVpnInterfaceThreadWorker(intrf.getName(), true, "Operational"), MoreExecutors.directExecutor());
}));
return futures;
});
}
} catch (Exception e) {
LOG.error("Exception caught in Interface {} Operational State Up event", intrf.getName(), e);
}
}
use of com.google.common.util.concurrent.Futures in project netvirt by opendaylight.
the class VpnInterfaceManager method updateVpnInterfacesForUnProcessAdjancencies.
public void updateVpnInterfacesForUnProcessAdjancencies(String vpnName) {
String primaryRd = vpnUtil.getVpnRd(vpnName);
VpnInstanceOpDataEntry vpnInstanceOpData = vpnUtil.getVpnInstanceOpData(primaryRd);
if (vpnInstanceOpData == null || vpnInstanceOpData.getVpnToDpnList() == null) {
return;
}
List<VpnToDpnList> vpnToDpnLists = new ArrayList<>(vpnInstanceOpData.getVpnToDpnList().values());
if (vpnToDpnLists == null || vpnToDpnLists.isEmpty()) {
return;
}
LOG.debug("Update the VpnInterfaces for Unprocessed Adjancencies for vpnName:{}", vpnName);
vpnToDpnLists.forEach(vpnToDpnList -> {
if (vpnToDpnList.getVpnInterfaces() == null) {
return;
}
vpnToDpnList.nonnullVpnInterfaces().values().forEach(vpnInterface -> {
try {
InstanceIdentifier<VpnInterfaceOpDataEntry> existingVpnInterfaceId = VpnUtil.getVpnInterfaceOpDataEntryIdentifier(vpnInterface.getInterfaceName(), vpnName);
Optional<VpnInterfaceOpDataEntry> vpnInterfaceOptional = SingleTransactionDataBroker.syncReadOptional(dataBroker, LogicalDatastoreType.OPERATIONAL, existingVpnInterfaceId);
if (!vpnInterfaceOptional.isPresent()) {
return;
}
List<Adjacency> configVpnAdjacencies = vpnUtil.getAdjacenciesForVpnInterfaceFromConfig(vpnInterface.getInterfaceName());
if (configVpnAdjacencies == null) {
LOG.debug("There is no adjacency available for vpnInterface:{}", vpnInterface);
return;
}
List<Adjacency> operationVpnAdjacencies = new ArrayList<>(vpnInterfaceOptional.get().augmentation(AdjacenciesOp.class).nonnullAdjacency().values());
// Due to insufficient rds, some of the extra route wont get processed when it is added.
// The unprocessed adjacencies will be present in config vpn interface DS but will be missing
// in operational DS. These unprocessed adjacencies will be handled below.
// To obtain unprocessed adjacencies, filtering is done by which the missing adjacencies in
// operational DS are retrieved which is used to call addNewAdjToVpnInterface method.
configVpnAdjacencies.stream().filter(adjacency -> operationVpnAdjacencies.stream().noneMatch(operationalAdjacency -> Objects.equals(operationalAdjacency.getIpAddress(), adjacency.getIpAddress()))).forEach(adjacency -> {
LOG.debug("Processing the vpnInterface{} for the Ajacency:{}", vpnInterface, adjacency);
jobCoordinator.enqueueJob("VPNINTERFACE-" + vpnInterface.getInterfaceName(), () -> {
// if the oper tx goes in
if (vpnUtil.isAdjacencyEligibleToVpn(adjacency, vpnName)) {
List<ListenableFuture<?>> futures = new ArrayList<>();
futures.add(txRunner.callWithNewWriteOnlyTransactionAndSubmit(OPERATIONAL, operTx -> {
// set of prefix used, as entry in prefix-to-interface datastore
// is prerequisite for refresh Fib to avoid race condition leading
// to missing remote next hop in bucket actions on bgp-vpn delete
Set<String> prefixListForRefreshFib = new HashSet<>();
ListenableFuture<?> configTxFuture = txRunner.callWithNewReadWriteTransactionAndSubmit(CONFIGURATION, confTx -> addNewAdjToVpnInterface(existingVpnInterfaceId, primaryRd, adjacency, vpnInterfaceOptional.get().getDpnId(), operTx, confTx, confTx, prefixListForRefreshFib));
Futures.addCallback(configTxFuture, new VpnInterfaceCallBackHandler(primaryRd, prefixListForRefreshFib), MoreExecutors.directExecutor());
futures.add(configTxFuture);
}));
return futures;
} else {
return emptyList();
}
});
});
} catch (InterruptedException | ExecutionException e) {
LOG.error("updateVpnInterfacesForUnProcessAdjancencies: Failed to read data store for vpn {} rd {}", vpnName, primaryRd);
}
});
});
}
Aggregations