use of com.google.common.util.concurrent.FutureCallback in project cassandra by apache.
the class LocalSessions method handlePrepareMessage.
/**
* The PrepareConsistentRequest promotes the parent repair session to a consistent incremental
* session, and isolates the data to be repaired from the rest of the table's data
*
* No response is sent to the repair coordinator until the data preparation / isolation has completed
* successfully. If the data preparation fails, a failure message is sent to the coordinator,
* cancelling the session.
*/
public void handlePrepareMessage(InetAddressAndPort from, PrepareConsistentRequest request) {
logger.trace("received {} from {}", request, from);
UUID sessionID = request.parentSession;
InetAddressAndPort coordinator = request.coordinator;
Set<InetAddressAndPort> peers = request.participants;
ActiveRepairService.ParentRepairSession parentSession;
try {
parentSession = getParentRepairSession(sessionID);
} catch (Throwable e) {
logger.error("Error retrieving ParentRepairSession for session {}, responding with failure", sessionID);
sendMessage(coordinator, Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), false)));
return;
}
LocalSession session = createSessionUnsafe(sessionID, parentSession, peers);
putSessionUnsafe(session);
logger.info("Beginning local incremental repair session {}", session);
ExecutorService executor = executorFactory().pooled("Repair-" + sessionID, parentSession.getColumnFamilyStores().size());
KeyspaceRepairManager repairManager = parentSession.getKeyspace().getRepairManager();
RangesAtEndpoint tokenRanges = filterLocalRanges(parentSession.getKeyspace().getName(), parentSession.getRanges());
Future<List<Void>> repairPreparation = prepareSession(repairManager, sessionID, parentSession.getColumnFamilyStores(), tokenRanges, executor, () -> session.getState() != PREPARING);
repairPreparation.addCallback(new FutureCallback<List<Void>>() {
public void onSuccess(@Nullable List<Void> result) {
try {
logger.info("Prepare phase for incremental repair session {} completed", sessionID);
if (session.getState() != FAILED)
setStateAndSave(session, PREPARED);
else
logger.info("Session {} failed before anticompaction completed", sessionID);
Message<PrepareConsistentResponse> message = Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), session.getState() != FAILED));
sendMessage(coordinator, message);
} finally {
executor.shutdown();
}
}
public void onFailure(Throwable t) {
try {
if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof CompactionInterruptedException))
logger.info("Anticompaction interrupted for session {}: {}", sessionID, t.getMessage());
else if (Throwables.anyCauseMatches(t, (throwable) -> throwable instanceof NoSuchRepairSessionException))
logger.warn("No such repair session: {}", sessionID);
else
logger.error("Prepare phase for incremental repair session {} failed", sessionID, t);
sendMessage(coordinator, Message.out(PREPARE_CONSISTENT_RSP, new PrepareConsistentResponse(sessionID, getBroadcastAddressAndPort(), false)));
failSession(sessionID, false);
} finally {
executor.shutdown();
}
}
});
}
use of com.google.common.util.concurrent.FutureCallback in project flink by apache.
the class CassandraRowWriteAheadSink method sendValues.
@Override
protected boolean sendValues(Iterable<Row> values, long checkpointId, long timestamp) throws Exception {
final AtomicInteger updatesCount = new AtomicInteger(0);
final AtomicInteger updatesConfirmed = new AtomicInteger(0);
final AtomicReference<Throwable> exception = new AtomicReference<>();
FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
@Override
public void onSuccess(ResultSet resultSet) {
updatesConfirmed.incrementAndGet();
if (updatesCount.get() > 0) {
// only set if all updates have been sent
if (updatesCount.get() == updatesConfirmed.get()) {
synchronized (updatesConfirmed) {
updatesConfirmed.notifyAll();
}
}
}
}
@Override
public void onFailure(Throwable throwable) {
if (exception.compareAndSet(null, throwable)) {
LOG.error("Error while sending value.", throwable);
synchronized (updatesConfirmed) {
updatesConfirmed.notifyAll();
}
}
}
};
// set values for prepared statement
int updatesSent = 0;
for (Row value : values) {
for (int x = 0; x < arity; x++) {
fields[x] = value.getField(x);
}
// insert values and send to cassandra
BoundStatement s = preparedStatement.bind(fields);
s.setDefaultTimestamp(timestamp);
ResultSetFuture result = session.executeAsync(s);
updatesSent++;
if (result != null) {
// add callback to detect errors
Futures.addCallback(result, callback);
}
}
updatesCount.set(updatesSent);
synchronized (updatesConfirmed) {
while (exception.get() == null && updatesSent != updatesConfirmed.get()) {
updatesConfirmed.wait();
}
}
if (exception.get() != null) {
LOG.warn("Sending a value failed.", exception.get());
return false;
} else {
return true;
}
}
use of com.google.common.util.concurrent.FutureCallback in project flink by apache.
the class CassandraTupleWriteAheadSink method sendValues.
@Override
protected boolean sendValues(Iterable<IN> values, long checkpointId, long timestamp) throws Exception {
final AtomicInteger updatesCount = new AtomicInteger(0);
final AtomicInteger updatesConfirmed = new AtomicInteger(0);
final AtomicReference<Throwable> exception = new AtomicReference<>();
FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
@Override
public void onSuccess(ResultSet resultSet) {
updatesConfirmed.incrementAndGet();
if (updatesCount.get() > 0) {
// only set if all updates have been sent
if (updatesCount.get() == updatesConfirmed.get()) {
synchronized (updatesConfirmed) {
updatesConfirmed.notifyAll();
}
}
}
}
@Override
public void onFailure(Throwable throwable) {
if (exception.compareAndSet(null, throwable)) {
LOG.error("Error while sending value.", throwable);
synchronized (updatesConfirmed) {
updatesConfirmed.notifyAll();
}
}
}
};
// set values for prepared statement
int updatesSent = 0;
for (IN value : values) {
for (int x = 0; x < value.getArity(); x++) {
fields[x] = value.getField(x);
}
// insert values and send to cassandra
BoundStatement s = preparedStatement.bind(fields);
s.setDefaultTimestamp(timestamp);
ResultSetFuture result = session.executeAsync(s);
updatesSent++;
if (result != null) {
// add callback to detect errors
Futures.addCallback(result, callback);
}
}
updatesCount.set(updatesSent);
synchronized (updatesConfirmed) {
while (exception.get() == null && updatesSent != updatesConfirmed.get()) {
updatesConfirmed.wait();
}
}
if (exception.get() != null) {
LOG.warn("Sending a value failed.", exception.get());
return false;
} else {
return true;
}
}
use of com.google.common.util.concurrent.FutureCallback in project Minigames by AddstarMC.
the class BackendCommand method onCommand.
@Override
public boolean onCommand(final CommandSender sender, Minigame minigame, String label, String[] args) {
if (args == null || args.length != 2) {
return false;
}
BackendManager manager = Minigames.getPlugin().getBackend();
if (args[0].equalsIgnoreCase("export")) {
try {
ListenableFuture<Void> future = manager.exportTo(args[1], Minigames.getPlugin().getConfig(), new Notifier(sender));
sender.sendMessage(ChatColor.GOLD + "Exporting backend to " + args[1] + "...");
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onFailure(Throwable t) {
sender.sendMessage(ChatColor.RED + "An internal error occured while exporting.");
}
@Override
public void onSuccess(Void result) {
}
});
} catch (IllegalArgumentException e) {
sender.sendMessage(ChatColor.RED + e.getMessage());
}
} else if (args[0].equalsIgnoreCase("switch")) {
try {
ListenableFuture<Void> future = manager.switchBackend(args[1], Minigames.getPlugin().getConfig());
sender.sendMessage(ChatColor.GOLD + "Switching minigames backend to " + args[1] + "...");
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onFailure(Throwable t) {
sender.sendMessage(ChatColor.RED + "An internal error occured while switching backend.");
}
@Override
public void onSuccess(Void result) {
sender.sendMessage(ChatColor.GOLD + "The backend has been successfully switched");
sender.sendMessage(ChatColor.GOLD + "!!! This change is " + ChatColor.BOLD + "temporary" + ChatColor.GOLD + ". Please update the config !!!");
}
});
} catch (IllegalArgumentException e) {
sender.sendMessage(ChatColor.RED + e.getMessage());
}
} else {
sender.sendMessage(ChatColor.RED + "Unknown option " + args[0]);
}
return true;
}
use of com.google.common.util.concurrent.FutureCallback in project druid by druid-io.
the class BatchAppenderator method add.
@Override
public AppenderatorAddResult add(final SegmentIdWithShardSpec identifier, final InputRow row, @Nullable final Supplier<Committer> committerSupplier, final boolean allowIncrementalPersists) throws IndexSizeExceededException, SegmentNotWritableException {
throwPersistErrorIfExists();
Preconditions.checkArgument(committerSupplier == null, "Batch appenderator does not need a committer!");
Preconditions.checkArgument(allowIncrementalPersists, "Batch appenderator should always allow incremental persists!");
if (!identifier.getDataSource().equals(schema.getDataSource())) {
throw new IAE("Expected dataSource[%s] but was asked to insert row for dataSource[%s]?!", schema.getDataSource(), identifier.getDataSource());
}
final Sink sink = getOrCreateSink(identifier);
metrics.reportMessageMaxTimestamp(row.getTimestampFromEpoch());
final int sinkRowsInMemoryBeforeAdd = sink.getNumRowsInMemory();
final int sinkRowsInMemoryAfterAdd;
final long bytesInMemoryBeforeAdd = sink.getBytesInMemory();
final long bytesInMemoryAfterAdd;
final IncrementalIndexAddResult addResult;
try {
// allow incrememtal persis is always true for batch
addResult = sink.add(row, false);
sinkRowsInMemoryAfterAdd = addResult.getRowCount();
bytesInMemoryAfterAdd = addResult.getBytesInMemory();
} catch (IndexSizeExceededException e) {
// Uh oh, we can't do anything about this! We can't persist (commit metadata would be out of sync) and we
// can't add the row (it just failed). This should never actually happen, though, because we check
// sink.canAddRow after returning from add.
log.error(e, "Sink for segment[%s] was unexpectedly full!", identifier);
throw e;
}
if (sinkRowsInMemoryAfterAdd < 0) {
throw new SegmentNotWritableException("Attempt to add row to swapped-out sink for segment[%s].", identifier);
}
if (addResult.isRowAdded()) {
rowIngestionMeters.incrementProcessed();
} else if (addResult.hasParseException()) {
parseExceptionHandler.handle(addResult.getParseException());
}
final int numAddedRows = sinkRowsInMemoryAfterAdd - sinkRowsInMemoryBeforeAdd;
rowsCurrentlyInMemory += numAddedRows;
bytesCurrentlyInMemory += (bytesInMemoryAfterAdd - bytesInMemoryBeforeAdd);
totalRows += numAddedRows;
sinksMetadata.computeIfAbsent(identifier, unused -> new SinkMetadata()).addRows(numAddedRows);
boolean persist = false;
List<String> persistReasons = new ArrayList<>();
if (!sink.canAppendRow()) {
persist = true;
persistReasons.add("No more rows can be appended to sink");
}
if (rowsCurrentlyInMemory >= tuningConfig.getMaxRowsInMemory()) {
persist = true;
persistReasons.add(StringUtils.format("rowsCurrentlyInMemory[%d] is greater than maxRowsInMemory[%d]", rowsCurrentlyInMemory, tuningConfig.getMaxRowsInMemory()));
}
if (bytesCurrentlyInMemory >= maxBytesTuningConfig) {
persist = true;
persistReasons.add(StringUtils.format("bytesCurrentlyInMemory[%d] is greater than maxBytesInMemory[%d]", bytesCurrentlyInMemory, maxBytesTuningConfig));
}
if (persist) {
// persistAll clears rowsCurrentlyInMemory, no need to update it.
log.info("Incremental persist to disk because %s.", String.join(",", persistReasons));
long bytesToBePersisted = 0L;
for (Map.Entry<SegmentIdWithShardSpec, Sink> entry : sinks.entrySet()) {
final Sink sinkEntry = entry.getValue();
if (sinkEntry != null) {
bytesToBePersisted += sinkEntry.getBytesInMemory();
if (sinkEntry.swappable()) {
// Code for batch no longer memory maps hydrants, but they still take memory...
int memoryStillInUse = calculateMemoryUsedByHydrant();
bytesCurrentlyInMemory += memoryStillInUse;
}
}
}
if (!skipBytesInMemoryOverheadCheck && bytesCurrentlyInMemory - bytesToBePersisted > maxBytesTuningConfig) {
// We are still over maxBytesTuningConfig even after persisting.
// This means that we ran out of all available memory to ingest (due to overheads created as part of ingestion)
final String alertMessage = StringUtils.format("Task has exceeded safe estimated heap usage limits, failing " + "(numSinks: [%d] numHydrantsAcrossAllSinks: [%d] totalRows: [%d])" + "(bytesCurrentlyInMemory: [%d] - bytesToBePersisted: [%d] > maxBytesTuningConfig: [%d])", sinks.size(), sinks.values().stream().mapToInt(Iterables::size).sum(), getTotalRowCount(), bytesCurrentlyInMemory, bytesToBePersisted, maxBytesTuningConfig);
final String errorMessage = StringUtils.format("%s.\nThis can occur when the overhead from too many intermediary segment persists becomes to " + "great to have enough space to process additional input rows. This check, along with metering the overhead " + "of these objects to factor into the 'maxBytesInMemory' computation, can be disabled by setting " + "'skipBytesInMemoryOverheadCheck' to 'true' (note that doing so might allow the task to naturally encounter " + "a 'java.lang.OutOfMemoryError'). Alternatively, 'maxBytesInMemory' can be increased which will cause an " + "increase in heap footprint, but will allow for more intermediary segment persists to occur before " + "reaching this condition.", alertMessage);
log.makeAlert(alertMessage).addData("dataSource", schema.getDataSource()).emit();
throw new RuntimeException(errorMessage);
}
Futures.addCallback(persistAll(null), new FutureCallback<Object>() {
@Override
public void onSuccess(@Nullable Object result) {
// do nothing
}
@Override
public void onFailure(Throwable t) {
persistError = t;
}
});
}
return new AppenderatorAddResult(identifier, sinksMetadata.get(identifier).numRowsInSegment, false);
}
Aggregations