use of com.google_voltpatches.common.util.concurrent.ListenableFuture in project voltdb by VoltDB.
the class TableStreamer method streamMore.
/**
* Streams more tuples from the table.
* @param context Context
* @param outputBuffers Allocated buffers to hold output tuples
* @param rowCountAccumulator an array of a single int use to accumulate streamed rows count
* @return A future for all writes to data targets, and a boolean indicating if there's more left in the table.
* The future could be null if nothing is serialized. If row count is specified it sets the number of rows that
* is to stream
* @throws SnapshotSerializationException
*/
@SuppressWarnings("rawtypes")
public Pair<ListenableFuture, Boolean> streamMore(SystemProcedureExecutionContext context, List<DBBPool.BBContainer> outputBuffers, int[] rowCountAccumulator) {
ListenableFuture writeFuture = null;
prepareBuffers(outputBuffers);
Pair<Long, int[]> serializeResult = context.tableStreamSerializeMore(m_tableId, m_type, outputBuffers);
if (serializeResult.getFirst() == SERIALIZATION_ERROR) {
// Cancel the snapshot here
for (DBBPool.BBContainer container : outputBuffers) {
container.discard();
}
SnapshotSerializationException ex = new SnapshotSerializationException("Snapshot of table " + m_tableId + " failed to complete.");
for (SnapshotTableTask task : m_tableTasks) {
task.m_target.reportSerializationFailure(ex);
}
return Pair.of(null, false);
}
if (serializeResult.getSecond()[0] > 0) {
if (rowCountAccumulator != null && rowCountAccumulator.length == 1) {
rowCountAccumulator[0] += getTupleDataRowCount(outputBuffers);
}
writeFuture = writeBlocksToTargets(outputBuffers, serializeResult.getSecond());
} else {
// Return all allocated snapshot output buffers
for (DBBPool.BBContainer container : outputBuffers) {
container.discard();
}
}
return Pair.of(writeFuture, serializeResult.getFirst() > 0);
}
use of com.google_voltpatches.common.util.concurrent.ListenableFuture in project voltdb by VoltDB.
the class SnapshotSiteProcessor method doSnapshotWork.
/*
* No schedule means don't try and schedule snapshot work because this is a blocking
* task from completeSnapshotWork. This avoids creating thousands of task objects.
*/
public Future<?> doSnapshotWork(SystemProcedureExecutionContext context, boolean noSchedule) {
ListenableFuture<?> retval = null;
/*
* This thread will null out the reference to m_snapshotTableTasks when
* a snapshot is finished. If the snapshot buffer is loaned out that means
* it is pending I/O somewhere so there is no work to do until it comes back.
*/
if (m_snapshotTableTasks == null) {
return retval;
}
if (m_snapshotTargets == null) {
return null;
}
/*
* Try to serialize a block from a table, if the table is finished,
* remove the tasks from the task map and move on to the next table. If a block is
* successfully serialized, break out of the loop and release the site thread for more
* transaction work.
*/
Iterator<Map.Entry<Integer, Collection<SnapshotTableTask>>> taskIter = m_snapshotTableTasks.asMap().entrySet().iterator();
while (taskIter.hasNext()) {
Map.Entry<Integer, Collection<SnapshotTableTask>> taskEntry = taskIter.next();
final int tableId = taskEntry.getKey();
final Collection<SnapshotTableTask> tableTasks = taskEntry.getValue();
final List<BBContainer> outputBuffers = getOutputBuffers(tableTasks, noSchedule);
if (outputBuffers == null) {
// Not enough buffers available
if (!noSchedule) {
rescheduleSnapshotWork();
}
break;
}
// Stream more and add a listener to handle any failures
Pair<ListenableFuture, Boolean> streamResult = m_streamers.get(tableId).streamMore(context, outputBuffers, null);
if (streamResult.getFirst() != null) {
final ListenableFuture writeFutures = streamResult.getFirst();
writeFutures.addListener(new Runnable() {
@Override
public void run() {
try {
writeFutures.get();
} catch (Throwable t) {
if (m_perSiteLastSnapshotSucceded) {
if (t instanceof StreamSnapshotTimeoutException || t.getCause() instanceof StreamSnapshotTimeoutException) {
//This error is already logged by the watchdog when it generates the exception
} else {
SNAP_LOG.error("Error while attempting to write snapshot data", t);
}
m_perSiteLastSnapshotSucceded = false;
}
}
}
}, CoreUtils.SAMETHREADEXECUTOR);
}
/**
* The table streamer will return false when there is no more data left to pull from that table. The
* enclosing loop ensures that the next table is then addressed.
*/
if (!streamResult.getSecond()) {
asyncTerminateReplicatedTableTasks(tableTasks);
// XXX: Guava's multimap will clear the tableTasks collection when the entry is
// removed from the containing map, so don't use the collection after removal!
taskIter.remove();
SNAP_LOG.debug("Finished snapshot tasks for table " + tableId + ": " + tableTasks);
} else {
break;
}
}
/**
* If there are no more tasks then this particular EE is finished doing snapshot work
* Check the AtomicInteger to find out if this is the last one.
*/
if (m_snapshotTableTasks.isEmpty()) {
SNAP_LOG.debug("Finished with tasks");
// In case this is a non-blocking snapshot, do the post-snapshot tasks here.
runPostSnapshotTasks(context);
final ArrayList<SnapshotDataTarget> snapshotTargets = m_snapshotTargets;
m_snapshotTargets = null;
m_snapshotTableTasks = null;
boolean IamLast = false;
synchronized (ExecutionSitesCurrentlySnapshotting) {
if (!ExecutionSitesCurrentlySnapshotting.contains(this)) {
VoltDB.crashLocalVoltDB("Currently snapshotting site didn't find itself in set of snapshotting sites", true, null);
}
IamLast = ExecutionSitesCurrentlySnapshotting.size() == 1;
if (!IamLast) {
ExecutionSitesCurrentlySnapshotting.remove(this);
}
}
/**
* If this is the last one then this EE must close all the SnapshotDataTargets.
* Done in a separate thread so the EE can go and do other work. It will
* sync every file descriptor and that may block for a while.
*/
if (IamLast) {
SNAP_LOG.debug("I AM LAST!");
final long txnId = m_lastSnapshotTxnId;
final ExtensibleSnapshotDigestData snapshotDataForZookeeper = m_extraSnapshotData;
m_extraSnapshotData = null;
final Thread terminatorThread = new Thread("Snapshot terminator") {
@Override
public void run() {
boolean snapshotSucceeded = true;
try {
/*
* Be absolutely sure the snapshot is finished
* and synced to disk before another is started
*/
for (Thread t : m_snapshotTargetTerminators) {
if (t == this) {
continue;
}
try {
t.join();
} catch (InterruptedException e) {
return;
}
}
for (final SnapshotDataTarget t : snapshotTargets) {
try {
t.close();
} catch (IOException e) {
snapshotSucceeded = false;
throw new RuntimeException(e);
} catch (InterruptedException e) {
snapshotSucceeded = false;
throw new RuntimeException(e);
}
}
Runnable r = null;
while ((r = m_tasksOnSnapshotCompletion.poll()) != null) {
try {
r.run();
} catch (Exception e) {
SNAP_LOG.error("Error running snapshot completion task", e);
}
}
} finally {
try {
VoltDB.instance().getHostMessenger().getZK().delete(VoltZK.nodes_currently_snapshotting + "/" + VoltDB.instance().getHostMessenger().getHostId(), -1);
} catch (NoNodeException e) {
SNAP_LOG.warn("Expect the snapshot node to already exist during deletion", e);
} catch (Exception e) {
VoltDB.crashLocalVoltDB(e.getMessage(), true, e);
} finally {
/**
* Remove this last site from the set here after the terminator has run
* so that new snapshots won't start until
* everything is on disk for the previous snapshot. This prevents a really long
* snapshot initiation procedure from occurring because it has to contend for
* filesystem resources
*
* Do this before logSnapshotCompleteToZK() because the ZK operations are slow,
* and they can trigger snapshot completion interests to fire before this site
* removes itself from the set. The next snapshot request may come in and see
* this snapshot is still in progress.
*/
ExecutionSitesCurrentlySnapshotting.remove(SnapshotSiteProcessor.this);
}
logSnapshotCompleteToZK(txnId, snapshotSucceeded, snapshotDataForZookeeper);
}
}
};
m_snapshotTargetTerminators.add(terminatorThread);
terminatorThread.start();
}
}
return retval;
}
use of com.google_voltpatches.common.util.concurrent.ListenableFuture in project voltdb by VoltDB.
the class TableStreamer method writeBlocksToTargets.
/**
* Finalize the output buffers and write them to the corresponding data targets
*
* @return A future that can used to wait for all targets to finish writing the buffers
*/
private ListenableFuture<?> writeBlocksToTargets(Collection<DBBPool.BBContainer> outputBuffers, int[] serialized) {
Preconditions.checkArgument(m_tableTasks.size() == serialized.length);
Preconditions.checkArgument(outputBuffers.size() == serialized.length);
final List<ListenableFuture<?>> writeFutures = new ArrayList<ListenableFuture<?>>(outputBuffers.size());
// The containers, the data targets, and the serialized byte counts should all line up
Iterator<DBBPool.BBContainer> containerIter = outputBuffers.iterator();
int serializedIndex = 0;
for (SnapshotTableTask task : m_tableTasks) {
final DBBPool.BBContainer container = containerIter.next();
/*
* Finalize the buffer by setting position to 0 and limit to the last used byte
*/
final ByteBuffer buf = container.b();
buf.limit(serialized[serializedIndex++] + task.m_target.getHeaderSize());
buf.position(0);
Callable<DBBPool.BBContainer> valueForTarget = Callables.returning(container);
if (task.m_filters != null) {
for (SnapshotDataFilter filter : task.m_filters) {
valueForTarget = filter.filter(valueForTarget);
}
}
ListenableFuture<?> writeFuture = task.m_target.write(valueForTarget, m_tableId);
if (writeFuture != null) {
writeFutures.add(writeFuture);
}
}
// Wraps all write futures in one future
return Futures.allAsList(writeFutures);
}
use of com.google_voltpatches.common.util.concurrent.ListenableFuture in project voltdb by VoltDB.
the class VoltTrace method dumpEvents.
private ListenableFuture dumpEvents(File path) {
if (m_emptyQueue == null || m_traceEvents.isEmpty()) {
return null;
}
final EvictingQueue<TraceEventBatch> writeQueue = m_traceEvents;
m_traceEvents = m_emptyQueue;
m_emptyQueue = null;
final ListenableFuture future = m_writerThread.submit(new TraceFileWriter(path, writeQueue));
future.addListener(() -> m_work.offer(() -> m_emptyQueue = writeQueue), CoreUtils.SAMETHREADEXECUTOR);
return future;
}
Aggregations