use of com.google.common.util.concurrent.FutureCallback in project genius by opendaylight.
the class AlivenessMonitor method monitorUnpause.
@Override
public Future<RpcResult<Void>> monitorUnpause(MonitorUnpauseInput input) {
LOG.debug("Monitor Unpause operation invoked for monitor id: {}", input.getMonitorId());
final SettableFuture<RpcResult<Void>> result = SettableFuture.create();
final Long monitorId = input.getMonitorId();
final ReadOnlyTransaction tx = dataBroker.newReadOnlyTransaction();
ListenableFuture<Optional<MonitoringInfo>> readInfoResult = tx.read(LogicalDatastoreType.OPERATIONAL, getMonitoringInfoId(monitorId));
Futures.addCallback(readInfoResult, new FutureCallback<Optional<MonitoringInfo>>() {
@Override
public void onFailure(Throwable error) {
tx.close();
String msg = String.format("Unable to read monitoring info associated with monitor id %d", monitorId);
LOG.error("Monitor unpause Failed. {}", msg, error);
result.set(RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, msg, error).build());
}
@Override
public void onSuccess(@Nonnull Optional<MonitoringInfo> optInfo) {
if (optInfo.isPresent()) {
final MonitoringInfo info = optInfo.get();
ListenableFuture<Optional<MonitorProfile>> readProfile = tx.read(LogicalDatastoreType.OPERATIONAL, getMonitorProfileId(info.getProfileId()));
Futures.addCallback(readProfile, new FutureCallback<Optional<MonitorProfile>>() {
@Override
public void onFailure(Throwable error) {
tx.close();
String msg = String.format("Unable to read Monitoring profile associated with id %d", info.getProfileId());
LOG.warn("Monitor unpause Failed. {}", msg, error);
result.set(RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, msg, error).build());
}
@Override
public void onSuccess(@Nonnull Optional<MonitorProfile> optProfile) {
tx.close();
if (optProfile.isPresent()) {
updateMonitorStatusTo(monitorId, MonitorStatus.Started, currentStatus -> (currentStatus == MonitorStatus.Paused || currentStatus == MonitorStatus.Stopped));
MonitorProfile profile = optProfile.get();
LOG.debug("Monitor Resume - Scheduling monitoring task with Id: {}", monitorId);
EtherTypes protocolType = profile.getProtocolType();
if (protocolType == EtherTypes.Bfd) {
LOG.debug("disabling bfd for hwvtep tunnel montior id {}", monitorId);
((HwVtepTunnelsStateHandler) alivenessProtocolHandlerRegistry.get(protocolType)).resetMonitoringTask(true);
} else {
scheduleMonitoringTask(info, profile.getMonitorInterval());
}
result.set(RpcResultBuilder.<Void>success().build());
} else {
String msg = String.format("Monitoring profile associated with id %d is not present", info.getProfileId());
LOG.warn("Monitor unpause Failed. {}", msg);
result.set(RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, msg).build());
}
}
}, callbackExecutorService);
} else {
tx.close();
String msg = String.format("Monitoring info associated with id %d is not present", monitorId);
LOG.warn("Monitor unpause Failed. {}", msg);
result.set(RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, msg).build());
}
}
}, callbackExecutorService);
return result;
}
use of com.google.common.util.concurrent.FutureCallback in project genius by opendaylight.
the class AlivenessMonitor method processReceivedMonitorKey.
private void processReceivedMonitorKey(final String monitorKey) {
Preconditions.checkNotNull(monitorKey, "Monitor Key required to process the state");
LOG.debug("Processing monitorKey: {} for received packet", monitorKey);
final Semaphore lock = lockMap.get(monitorKey);
LOG.debug("Acquiring lock for monitor key : {} to process monitor packet", monitorKey);
acquireLock(lock);
final ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
ListenableFuture<Optional<MonitoringState>> stateResult = tx.read(LogicalDatastoreType.OPERATIONAL, getMonitorStateId(monitorKey));
// READ Callback
Futures.addCallback(stateResult, new FutureCallback<Optional<MonitoringState>>() {
@Override
public void onSuccess(@Nonnull Optional<MonitoringState> optState) {
if (optState.isPresent()) {
final MonitoringState currentState = optState.get();
if (LOG.isTraceEnabled()) {
LOG.trace("OnPacketReceived : Monitoring state from ODS : {} ", currentState);
}
// Long responsePendingCount = currentState.getResponsePendingCount();
//
// Need to relook at the pending count logic to support N
// out of M scenarios
// if (currentState.getState() != LivenessState.Up) {
// //Reset responsePendingCount when state changes from DOWN
// to UP
// responsePendingCount = INITIAL_COUNT;
// }
//
// if (responsePendingCount > INITIAL_COUNT) {
// responsePendingCount =
// currentState.getResponsePendingCount() - 1;
// }
Long responsePendingCount = INITIAL_COUNT;
final boolean stateChanged = currentState.getState() == LivenessState.Down || currentState.getState() == LivenessState.Unknown;
final MonitoringState state = new MonitoringStateBuilder().setMonitorKey(monitorKey).setState(LivenessState.Up).setResponsePendingCount(responsePendingCount).build();
tx.merge(LogicalDatastoreType.OPERATIONAL, getMonitorStateId(monitorKey), state);
ListenableFuture<Void> writeResult = tx.submit();
// WRITE Callback
Futures.addCallback(writeResult, new FutureCallback<Void>() {
@Override
public void onSuccess(Void noarg) {
releaseLock(lock);
if (stateChanged) {
// send notifications
if (LOG.isTraceEnabled()) {
LOG.trace("Sending notification for monitor Id : {} with Current State: {}", currentState.getMonitorId(), LivenessState.Up);
}
publishNotification(currentState.getMonitorId(), LivenessState.Up);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Successful in writing monitoring state {} to ODS", state);
}
}
}
@Override
public void onFailure(Throwable error) {
releaseLock(lock);
LOG.warn("Error in writing monitoring state : {} to Datastore", monitorKey, error);
if (LOG.isTraceEnabled()) {
LOG.trace("Error in writing monitoring state: {} to Datastore", state);
}
}
}, callbackExecutorService);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Monitoring State not available for key: {} to process the Packet received", monitorKey);
}
// Complete the transaction
tx.submit();
releaseLock(lock);
}
}
@Override
public void onFailure(Throwable error) {
LOG.error("Error when reading Monitoring State for key: {} to process the Packet received", monitorKey, error);
// FIXME: Not sure if the transaction status is valid to cancel
tx.cancel();
releaseLock(lock);
}
}, callbackExecutorService);
}
use of com.google.common.util.concurrent.FutureCallback in project genius by opendaylight.
the class HwVtepTunnelsStateHandler method update.
@Override
public void update(@Nonnull Tunnels oldTunnelInfo, @Nonnull Tunnels updatedTunnelInfo) {
List<BfdStatus> oldBfdStatus = oldTunnelInfo.getBfdStatus();
List<BfdStatus> newBfdStatus = updatedTunnelInfo.getBfdStatus();
LivenessState oldTunnelOpState = getTunnelOpState(oldBfdStatus);
final LivenessState newTunnelOpState = getTunnelOpState(newBfdStatus);
if (oldTunnelOpState == newTunnelOpState) {
LOG.debug("Tunnel state of old tunnel {} and update tunnel {} are same", oldTunnelInfo, updatedTunnelInfo);
return;
}
updatedTunnelInfo.getTunnelUuid();
String interfaceName = "<TODO>";
// TODO: find out the corresponding interface using tunnelIdentifier or
// any attributes of tunnelInfo object
final String monitorKey = getBfdMonitorKey(interfaceName);
LOG.debug("Processing monitorKey: {} for received Tunnels update DCN", monitorKey);
final Semaphore lock = alivenessMonitor.getLock(monitorKey);
LOG.debug("Acquiring lock for monitor key : {} to process monitor DCN", monitorKey);
alivenessMonitor.acquireLock(lock);
final ReadWriteTransaction tx = dataBroker.newReadWriteTransaction();
ListenableFuture<Optional<MonitoringState>> stateResult = tx.read(LogicalDatastoreType.OPERATIONAL, getMonitorStateId(monitorKey));
Futures.addCallback(stateResult, new FutureCallback<Optional<MonitoringState>>() {
@Override
public void onSuccess(@Nonnull Optional<MonitoringState> optState) {
if (optState.isPresent()) {
final MonitoringState currentState = optState.get();
if (currentState.getState() == newTunnelOpState) {
return;
}
final boolean stateChanged = true;
final MonitoringState state = new MonitoringStateBuilder().setMonitorKey(monitorKey).setState(newTunnelOpState).build();
tx.merge(LogicalDatastoreType.OPERATIONAL, getMonitorStateId(monitorKey), state);
ListenableFuture<Void> writeResult = tx.submit();
// WRITE Callback
Futures.addCallback(writeResult, new FutureCallback<Void>() {
@Override
public void onSuccess(Void arg0) {
alivenessMonitor.releaseLock(lock);
if (stateChanged) {
// send notifications
LOG.info("Sending notification for monitor Id : {} with Current State: {}", currentState.getMonitorId(), newTunnelOpState);
alivenessMonitor.publishNotification(currentState.getMonitorId(), newTunnelOpState);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Successful in writing monitoring state {} to ODS", state);
}
}
}
@Override
public void onFailure(@Nonnull Throwable error) {
alivenessMonitor.releaseLock(lock);
LOG.warn("Error in writing monitoring state : {} to Datastore", monitorKey, error);
if (LOG.isTraceEnabled()) {
LOG.trace("Error in writing monitoring state: {} to Datastore", state);
}
}
}, MoreExecutors.directExecutor());
} else {
LOG.warn("Monitoring State not available for key: {} to process the Packet received", monitorKey);
// Complete the transaction
tx.submit();
alivenessMonitor.releaseLock(lock);
}
}
@Override
public void onFailure(@Nonnull Throwable error) {
LOG.error("Error when reading Monitoring State for key: {} to process the Packet received", monitorKey, error);
// FIXME: Not sure if the transaction status is valid to cancel
tx.cancel();
alivenessMonitor.releaseLock(lock);
}
}, MoreExecutors.directExecutor());
}
use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.
the class SeekableStreamIndexTaskRunner method runInternal.
private TaskStatus runInternal(TaskToolbox toolbox) throws Exception {
startTime = DateTimes.nowUtc();
status = Status.STARTING;
setToolbox(toolbox);
authorizerMapper = toolbox.getAuthorizerMapper();
rowIngestionMeters = toolbox.getRowIngestionMetersFactory().createRowIngestionMeters();
parseExceptionHandler = new ParseExceptionHandler(rowIngestionMeters, tuningConfig.isLogParseExceptions(), tuningConfig.getMaxParseExceptions(), tuningConfig.getMaxSavedParseExceptions());
// Now we can initialize StreamChunkReader with the given toolbox.
final StreamChunkParser parser = new StreamChunkParser<RecordType>(this.parser, inputFormat, inputRowSchema, task.getDataSchema().getTransformSpec(), toolbox.getIndexingTmpDir(), row -> row != null && task.withinMinMaxRecordTime(row), rowIngestionMeters, parseExceptionHandler);
initializeSequences();
log.debug("Found chat handler of class[%s]", toolbox.getChatHandlerProvider().getClass().getName());
toolbox.getChatHandlerProvider().register(task.getId(), this, false);
runThread = Thread.currentThread();
// Set up FireDepartmentMetrics
final FireDepartment fireDepartmentForMetrics = new FireDepartment(task.getDataSchema(), new RealtimeIOConfig(null, null), null);
this.fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
toolbox.addMonitor(TaskRealtimeMetricsMonitorBuilder.build(task, fireDepartmentForMetrics, rowIngestionMeters));
final String lookupTier = task.getContextValue(RealtimeIndexTask.CTX_KEY_LOOKUP_TIER);
final LookupNodeService lookupNodeService = lookupTier == null ? toolbox.getLookupNodeService() : new LookupNodeService(lookupTier);
final DiscoveryDruidNode discoveryDruidNode = new DiscoveryDruidNode(toolbox.getDruidNode(), NodeRole.PEON, ImmutableMap.of(toolbox.getDataNodeService().getName(), toolbox.getDataNodeService(), lookupNodeService.getName(), lookupNodeService));
Throwable caughtExceptionOuter = null;
// milliseconds waited for created segments to be handed off
long handoffWaitMs = 0L;
try (final RecordSupplier<PartitionIdType, SequenceOffsetType, RecordType> recordSupplier = task.newTaskRecordSupplier()) {
if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
toolbox.getDataSegmentServerAnnouncer().announce();
toolbox.getDruidNodeAnnouncer().announce(discoveryDruidNode);
}
appenderator = task.newAppenderator(toolbox, fireDepartmentMetrics, rowIngestionMeters, parseExceptionHandler);
driver = task.newDriver(appenderator, toolbox, fireDepartmentMetrics);
// Start up, set up initial sequences.
final Object restoredMetadata = driver.startJob(segmentId -> {
try {
if (lockGranularityToUse == LockGranularity.SEGMENT) {
return toolbox.getTaskActionClient().submit(new SegmentLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), segmentId.getVersion(), segmentId.getShardSpec().getPartitionNum(), 1000L)).isOk();
} else {
final TaskLock lock = toolbox.getTaskActionClient().submit(new TimeChunkLockAcquireAction(TaskLockType.EXCLUSIVE, segmentId.getInterval(), 1000L));
if (lock == null) {
return false;
}
if (lock.isRevoked()) {
throw new ISE(StringUtils.format("Lock for interval [%s] was revoked.", segmentId.getInterval()));
}
return true;
}
} catch (IOException e) {
throw new RuntimeException(e);
}
});
if (restoredMetadata == null) {
// no persist has happened so far
// so either this is a brand new task or replacement of a failed task
Preconditions.checkState(sequences.get(0).startOffsets.entrySet().stream().allMatch(partitionOffsetEntry -> createSequenceNumber(partitionOffsetEntry.getValue()).compareTo(createSequenceNumber(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().get(partitionOffsetEntry.getKey()))) >= 0), "Sequence sequences are not compatible with start sequences of task");
currOffsets.putAll(sequences.get(0).startOffsets);
} else {
@SuppressWarnings("unchecked") final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
final SeekableStreamEndSequenceNumbers<PartitionIdType, SequenceOffsetType> restoredNextPartitions = deserializePartitionsFromMetadata(toolbox.getJsonMapper(), restoredMetadataMap.get(METADATA_NEXT_PARTITIONS));
currOffsets.putAll(restoredNextPartitions.getPartitionSequenceNumberMap());
// Sanity checks.
if (!restoredNextPartitions.getStream().equals(ioConfig.getStartSequenceNumbers().getStream())) {
throw new ISE("Restored stream[%s] but expected stream[%s]", restoredNextPartitions.getStream(), ioConfig.getStartSequenceNumbers().getStream());
}
if (!currOffsets.keySet().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet())) {
throw new ISE("Restored partitions[%s] but expected partitions[%s]", currOffsets.keySet(), ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().keySet());
}
// which is super rare
if (sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
this.endOffsets.putAll(sequences.size() == 0 ? currOffsets : getLastSequenceMetadata().getEndOffsets());
}
}
log.info("Initialized sequences: %s", sequences.stream().map(SequenceMetadata::toString).collect(Collectors.joining(", ")));
// Filter out partitions with END_OF_SHARD markers since these partitions have already been fully read. This
// should have been done by the supervisor already so this is defensive.
int numPreFilterPartitions = currOffsets.size();
if (currOffsets.entrySet().removeIf(x -> isEndOfShard(x.getValue()))) {
log.info("Removed [%d] partitions from assignment which have already been closed.", numPreFilterPartitions - currOffsets.size());
}
// When end offsets are exclusive, we never skip the start record.
if (!isEndOffsetExclusive()) {
for (Map.Entry<PartitionIdType, SequenceOffsetType> entry : currOffsets.entrySet()) {
final boolean isAtStart = entry.getValue().equals(ioConfig.getStartSequenceNumbers().getPartitionSequenceNumberMap().get(entry.getKey()));
if (!isAtStart || ioConfig.getStartSequenceNumbers().getExclusivePartitions().contains(entry.getKey())) {
lastReadOffsets.put(entry.getKey(), entry.getValue());
}
}
}
// Set up committer.
final Supplier<Committer> committerSupplier = () -> {
final Map<PartitionIdType, SequenceOffsetType> snapshot = ImmutableMap.copyOf(currOffsets);
lastPersistedOffsets.clear();
lastPersistedOffsets.putAll(snapshot);
return new Committer() {
@Override
public Object getMetadata() {
return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new SeekableStreamEndSequenceNumbers<>(stream, snapshot));
}
@Override
public void run() {
// Do nothing.
}
};
};
// restart publishing of sequences (if any)
maybePersistAndPublishSequences(committerSupplier);
Set<StreamPartition<PartitionIdType>> assignment = assignPartitions(recordSupplier);
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
seekToStartingSequence(recordSupplier, assignment);
ingestionState = IngestionState.BUILD_SEGMENTS;
// Main loop.
// Could eventually support leader/follower mode (for keeping replicas more in sync)
boolean stillReading = !assignment.isEmpty();
status = Status.READING;
Throwable caughtExceptionInner = null;
try {
while (stillReading) {
if (possiblyPause()) {
// The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
// partitions upon resuming. Don't call "seekToStartingSequence" after "assignPartitions", because there's
// no need to re-seek here. All we're going to be doing is dropping partitions.
assignment = assignPartitions(recordSupplier);
possiblyResetDataSourceMetadata(toolbox, recordSupplier, assignment);
if (assignment.isEmpty()) {
log.debug("All partitions have been fully read.");
publishOnStop.set(true);
stopRequested.set(true);
}
}
// if stop is requested or task's end sequence is set by call to setEndOffsets method with finish set to true
if (stopRequested.get() || sequences.size() == 0 || getLastSequenceMetadata().isCheckpointed()) {
status = Status.PUBLISHING;
}
if (stopRequested.get()) {
break;
}
if (backgroundThreadException != null) {
throw new RuntimeException(backgroundThreadException);
}
checkPublishAndHandoffFailure();
maybePersistAndPublishSequences(committerSupplier);
// calling getRecord() ensures that exceptions specific to kafka/kinesis like OffsetOutOfRangeException
// are handled in the subclasses.
List<OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType, RecordType>> records = getRecords(recordSupplier, toolbox);
// note: getRecords() also updates assignment
stillReading = !assignment.isEmpty();
SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceToCheckpoint = null;
for (OrderedPartitionableRecord<PartitionIdType, SequenceOffsetType, RecordType> record : records) {
final boolean shouldProcess = verifyRecordInRange(record.getPartitionId(), record.getSequenceNumber());
log.trace("Got stream[%s] partition[%s] sequenceNumber[%s], shouldProcess[%s].", record.getStream(), record.getPartitionId(), record.getSequenceNumber(), shouldProcess);
if (shouldProcess) {
final List<InputRow> rows = parser.parse(record.getData(), isEndOfShard(record.getSequenceNumber()));
boolean isPersistRequired = false;
final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceToUse = sequences.stream().filter(sequenceMetadata -> sequenceMetadata.canHandle(this, record)).findFirst().orElse(null);
if (sequenceToUse == null) {
throw new ISE("Cannot find any valid sequence for record with partition [%s] and sequenceNumber [%s]. Current sequences: %s", record.getPartitionId(), record.getSequenceNumber(), sequences);
}
for (InputRow row : rows) {
final AppenderatorDriverAddResult addResult = driver.add(row, sequenceToUse.getSequenceName(), committerSupplier, true, // of rows are indexed
false);
if (addResult.isOk()) {
// If the number of rows in the segment exceeds the threshold after adding a row,
// move the segment out from the active segments of BaseAppenderatorDriver to make a new segment.
final boolean isPushRequired = addResult.isPushRequired(tuningConfig.getPartitionsSpec().getMaxRowsPerSegment(), tuningConfig.getPartitionsSpec().getMaxTotalRowsOr(DynamicPartitionsSpec.DEFAULT_MAX_TOTAL_ROWS));
if (isPushRequired && !sequenceToUse.isCheckpointed()) {
sequenceToCheckpoint = sequenceToUse;
}
isPersistRequired |= addResult.isPersistRequired();
} else {
// If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
}
}
if (isPersistRequired) {
Futures.addCallback(driver.persistAsync(committerSupplier.get()), new FutureCallback<Object>() {
@Override
public void onSuccess(@Nullable Object result) {
log.debug("Persist completed with metadata: %s", result);
}
@Override
public void onFailure(Throwable t) {
log.error("Persist failed, dying");
backgroundThreadException = t;
}
});
}
// in kafka, we can easily get the next offset by adding 1, but for kinesis, there's no way
// to get the next sequence number without having to make an expensive api call. So the behavior
// here for kafka is to +1 while for kinesis we simply save the current sequence number
lastReadOffsets.put(record.getPartitionId(), record.getSequenceNumber());
currOffsets.put(record.getPartitionId(), getNextStartOffset(record.getSequenceNumber()));
}
// Use record.getSequenceNumber() in the moreToRead check, since currOffsets might not have been
// updated if we were skipping records for being beyond the end.
final boolean moreToReadAfterThisRecord = isMoreToReadAfterReadingRecord(record.getSequenceNumber(), endOffsets.get(record.getPartitionId()));
if (!moreToReadAfterThisRecord && assignment.remove(record.getStreamPartition())) {
log.info("Finished reading stream[%s], partition[%s].", record.getStream(), record.getPartitionId());
recordSupplier.assign(assignment);
stillReading = !assignment.isEmpty();
}
}
if (!stillReading) {
// We let the fireDepartmentMetrics know that all messages have been read. This way, some metrics such as
// high message gap need not be reported
fireDepartmentMetrics.markProcessingDone();
}
if (System.currentTimeMillis() > nextCheckpointTime) {
sequenceToCheckpoint = getLastSequenceMetadata();
}
if (sequenceToCheckpoint != null && stillReading) {
Preconditions.checkArgument(getLastSequenceMetadata().getSequenceName().equals(sequenceToCheckpoint.getSequenceName()), "Cannot checkpoint a sequence [%s] which is not the latest one, sequences %s", sequenceToCheckpoint, sequences);
requestPause();
final CheckPointDataSourceMetadataAction checkpointAction = new CheckPointDataSourceMetadataAction(task.getDataSource(), ioConfig.getTaskGroupId(), null, createDataSourceMetadata(new SeekableStreamStartSequenceNumbers<>(stream, sequenceToCheckpoint.getStartOffsets(), sequenceToCheckpoint.getExclusiveStartPartitions())));
if (!toolbox.getTaskActionClient().submit(checkpointAction)) {
throw new ISE("Checkpoint request with sequences [%s] failed, dying", currOffsets);
}
}
}
ingestionState = IngestionState.COMPLETED;
} catch (Exception e) {
// (1) catch all exceptions while reading from kafka
caughtExceptionInner = e;
log.error(e, "Encountered exception in run() before persisting.");
throw e;
} finally {
try {
// persist pending data
driver.persist(committerSupplier.get());
} catch (Exception e) {
if (caughtExceptionInner != null) {
caughtExceptionInner.addSuppressed(e);
} else {
throw e;
}
}
}
synchronized (statusLock) {
if (stopRequested.get() && !publishOnStop.get()) {
throw new InterruptedException("Stopping without publishing");
}
status = Status.PUBLISHING;
}
// We need to copy sequences here, because the success callback in publishAndRegisterHandoff removes items from
// the sequence list. If a publish finishes before we finish iterating through the sequence list, we can
// end up skipping some sequences.
List<SequenceMetadata<PartitionIdType, SequenceOffsetType>> sequencesSnapshot = new ArrayList<>(sequences);
for (int i = 0; i < sequencesSnapshot.size(); i++) {
final SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata = sequencesSnapshot.get(i);
if (!publishingSequences.contains(sequenceMetadata.getSequenceName())) {
final boolean isLast = i == (sequencesSnapshot.size() - 1);
if (isLast) {
// Shorten endOffsets of the last sequence to match currOffsets.
sequenceMetadata.setEndOffsets(currOffsets);
}
// Update assignments of the sequence, which should clear them. (This will be checked later, when the
// Committer is built.)
sequenceMetadata.updateAssignments(currOffsets, this::isMoreToReadAfterReadingRecord);
publishingSequences.add(sequenceMetadata.getSequenceName());
// persist already done in finally, so directly add to publishQueue
publishAndRegisterHandoff(sequenceMetadata);
}
}
if (backgroundThreadException != null) {
throw new RuntimeException(backgroundThreadException);
}
// Wait for publish futures to complete.
Futures.allAsList(publishWaitList).get();
// Wait for handoff futures to complete.
// Note that every publishing task (created by calling AppenderatorDriver.publish()) has a corresponding
// handoffFuture. handoffFuture can throw an exception if 1) the corresponding publishFuture failed or 2) it
// failed to persist sequences. It might also return null if handoff failed, but was recoverable.
// See publishAndRegisterHandoff() for details.
List<SegmentsAndCommitMetadata> handedOffList = Collections.emptyList();
if (tuningConfig.getHandoffConditionTimeout() == 0) {
handedOffList = Futures.allAsList(handOffWaitList).get();
} else {
final long start = System.nanoTime();
try {
handedOffList = Futures.allAsList(handOffWaitList).get(tuningConfig.getHandoffConditionTimeout(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// Handoff timeout is not an indexing failure, but coordination failure. We simply ignore timeout exception
// here.
log.makeAlert("Timeout waiting for handoff").addData("taskId", task.getId()).addData("handoffConditionTimeout", tuningConfig.getHandoffConditionTimeout()).emit();
} finally {
handoffWaitMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
}
}
for (SegmentsAndCommitMetadata handedOff : handedOffList) {
log.info("Handoff complete for segments: %s", String.join(", ", Lists.transform(handedOff.getSegments(), DataSegment::toString)));
}
appenderator.close();
} catch (InterruptedException | RejectedExecutionException e) {
// (2) catch InterruptedException and RejectedExecutionException thrown for the whole ingestion steps including
// the final publishing.
caughtExceptionOuter = e;
try {
Futures.allAsList(publishWaitList).cancel(true);
Futures.allAsList(handOffWaitList).cancel(true);
if (appenderator != null) {
appenderator.closeNow();
}
} catch (Exception e2) {
e.addSuppressed(e2);
}
// handle the InterruptedException that gets wrapped in a RejectedExecutionException
if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
throw e;
}
// if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
if (!stopRequested.get()) {
Thread.currentThread().interrupt();
throw e;
}
} catch (Exception e) {
// (3) catch all other exceptions thrown for the whole ingestion steps including the final publishing.
caughtExceptionOuter = e;
try {
Futures.allAsList(publishWaitList).cancel(true);
Futures.allAsList(handOffWaitList).cancel(true);
if (appenderator != null) {
appenderator.closeNow();
}
} catch (Exception e2) {
e.addSuppressed(e2);
}
throw e;
} finally {
try {
if (driver != null) {
driver.close();
}
toolbox.getChatHandlerProvider().unregister(task.getId());
if (toolbox.getAppenderatorsManager().shouldTaskMakeNodeAnnouncements()) {
toolbox.getDruidNodeAnnouncer().unannounce(discoveryDruidNode);
toolbox.getDataSegmentServerAnnouncer().unannounce();
}
} catch (Throwable e) {
if (caughtExceptionOuter != null) {
caughtExceptionOuter.addSuppressed(e);
} else {
throw e;
}
}
}
toolbox.getTaskReportFileWriter().write(task.getId(), getTaskCompletionReports(null, handoffWaitMs));
return TaskStatus.success(task.getId());
}
use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.
the class SeekableStreamIndexTaskRunner method publishAndRegisterHandoff.
private void publishAndRegisterHandoff(SequenceMetadata<PartitionIdType, SequenceOffsetType> sequenceMetadata) {
log.debug("Publishing segments for sequence [%s].", sequenceMetadata);
final ListenableFuture<SegmentsAndCommitMetadata> publishFuture = Futures.transform(driver.publish(sequenceMetadata.createPublisher(this, toolbox, ioConfig.isUseTransaction()), sequenceMetadata.getCommitterSupplier(this, stream, lastPersistedOffsets).get(), Collections.singletonList(sequenceMetadata.getSequenceName())), (Function<SegmentsAndCommitMetadata, SegmentsAndCommitMetadata>) publishedSegmentsAndMetadata -> {
if (publishedSegmentsAndMetadata == null) {
throw new ISE("Transaction failure publishing segments for sequence [%s]", sequenceMetadata);
} else {
return publishedSegmentsAndMetadata;
}
});
publishWaitList.add(publishFuture);
// Create a handoffFuture for every publishFuture. The created handoffFuture must fail if publishFuture fails.
final SettableFuture<SegmentsAndCommitMetadata> handoffFuture = SettableFuture.create();
handOffWaitList.add(handoffFuture);
Futures.addCallback(publishFuture, new FutureCallback<SegmentsAndCommitMetadata>() {
@Override
public void onSuccess(SegmentsAndCommitMetadata publishedSegmentsAndCommitMetadata) {
log.info("Published %s segments for sequence [%s] with metadata [%s].", publishedSegmentsAndCommitMetadata.getSegments().size(), sequenceMetadata.getSequenceName(), Preconditions.checkNotNull(publishedSegmentsAndCommitMetadata.getCommitMetadata(), "commitMetadata"));
log.infoSegments(publishedSegmentsAndCommitMetadata.getSegments(), "Published segments");
sequences.remove(sequenceMetadata);
publishingSequences.remove(sequenceMetadata.getSequenceName());
try {
persistSequences();
} catch (IOException e) {
log.error(e, "Unable to persist state, dying");
handoffFuture.setException(e);
throw new RuntimeException(e);
}
Futures.transform(driver.registerHandoff(publishedSegmentsAndCommitMetadata), new Function<SegmentsAndCommitMetadata, Void>() {
@Nullable
@Override
public Void apply(@Nullable SegmentsAndCommitMetadata handoffSegmentsAndCommitMetadata) {
if (handoffSegmentsAndCommitMetadata == null) {
log.warn("Failed to hand off %s segments", publishedSegmentsAndCommitMetadata.getSegments().size());
log.warnSegments(publishedSegmentsAndCommitMetadata.getSegments(), "Failed to hand off segments");
}
handoffFuture.set(handoffSegmentsAndCommitMetadata);
return null;
}
});
}
@Override
public void onFailure(Throwable t) {
log.error(t, "Error while publishing segments for sequenceNumber[%s]", sequenceMetadata);
handoffFuture.setException(t);
}
});
}
Aggregations