use of org.voltdb.SnapshotTableTask in project voltdb by VoltDB.
the class SnapshotWritePlan method placeReplicatedTasks.
protected void placeReplicatedTasks(Collection<SnapshotTableTask> tasks, List<Long> hsids) {
SNAP_LOG.debug("Placing replicated tasks at sites: " + CoreUtils.hsIdCollectionToString(hsids));
int siteIndex = 0;
// Round-robin the placement of replicated table tasks across the provided HSIds
for (SnapshotTableTask task : tasks) {
ArrayList<Long> robin = new ArrayList<Long>();
robin.add(hsids.get(siteIndex));
placeTask(task, robin);
siteIndex = siteIndex++ % hsids.size();
}
}
use of org.voltdb.SnapshotTableTask in project voltdb by VoltDB.
the class NativeSnapshotWritePlan method createDeferredSetup.
private Callable<Boolean> createDeferredSetup(final String file_path, final String pathType, final String file_nonce, final long txnId, final Map<Integer, Long> partitionTransactionIds, final SystemProcedureExecutionContext context, final ExtensibleSnapshotDigestData extraSnapshotData, final SiteTracker tracker, final HashinatorSnapshotData hashinatorData, final long timestamp, final int newPartitionCount, final Table[] tables, final SnapshotRegistry.Snapshot snapshotRecord, final ArrayList<SnapshotTableTask> partitionedSnapshotTasks, final ArrayList<SnapshotTableTask> replicatedSnapshotTasks, final boolean isTruncationSnapshot) {
return new Callable<Boolean>() {
private final HashMap<Integer, SnapshotDataTarget> m_createdTargets = Maps.newHashMap();
@Override
public Boolean call() throws Exception {
final AtomicInteger numTables = new AtomicInteger(tables.length);
NativeSnapshotWritePlan.createFileBasedCompletionTasks(file_path, pathType, file_nonce, txnId, partitionTransactionIds, context, extraSnapshotData, hashinatorData, timestamp, newPartitionCount, tables);
for (SnapshotTableTask task : replicatedSnapshotTasks) {
SnapshotDataTarget target = getSnapshotDataTarget(numTables, task);
task.setTarget(target);
}
for (SnapshotTableTask task : partitionedSnapshotTasks) {
SnapshotDataTarget target = getSnapshotDataTarget(numTables, task);
task.setTarget(target);
}
if (isTruncationSnapshot) {
// Only sync the DR Log on Native Snapshots
SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(new Runnable() {
@Override
public void run() {
context.forceAllDRNodeBuffersToDisk(false);
}
});
}
// Sync export buffer for all types of snapshot
SnapshotSiteProcessor.m_tasksOnSnapshotCompletion.offer(new Runnable() {
@Override
public void run() {
ExportManager.sync(false);
}
});
return true;
}
private SnapshotDataTarget getSnapshotDataTarget(AtomicInteger numTables, SnapshotTableTask task) throws IOException {
SnapshotDataTarget target = m_createdTargets.get(task.m_table.getRelativeIndex());
if (target == null) {
target = createDataTargetForTable(file_path, file_nonce, task.m_table, txnId, context.getHostId(), context.getCluster().getTypeName(), context.getDatabase().getTypeName(), context.getNumberOfPartitions(), DrRoleType.XDCR.value().equals(context.getCluster().getDrrole()), tracker, timestamp, numTables, snapshotRecord);
m_createdTargets.put(task.m_table.getRelativeIndex(), target);
}
return target;
}
};
}
use of org.voltdb.SnapshotTableTask in project voltdb by VoltDB.
the class CSVSnapshotWritePlan method createSetup.
@Override
public Callable<Boolean> createSetup(String file_path, String pathType, String file_nonce, long txnId, Map<Integer, Long> partitionTransactionIds, JSONObject jsData, SystemProcedureExecutionContext context, final VoltTable result, ExtensibleSnapshotDigestData extraSnapshotData, SiteTracker tracker, HashinatorSnapshotData hashinatorData, long timestamp) {
assert (SnapshotSiteProcessor.ExecutionSitesCurrentlySnapshotting.isEmpty());
/*
* List of partitions to include if this snapshot is
* going to be deduped. Attempts to break up the work
* by seeding and RNG selecting
* a random replica to do the work. Will not work in failure
* cases, but we don't use dedupe when we want durability.
*/
List<Long> sitesToInclude = CSVSnapshotWritePlan.computeDedupedLocalSites(txnId, tracker);
// If there's no work to do on this host, just claim success and get out:
if (sitesToInclude.isEmpty() && !tracker.isFirstHost()) {
return null;
}
final SnapshotRequestConfig config = new SnapshotRequestConfig(jsData, context.getDatabase());
final AtomicInteger numTables = new AtomicInteger(config.tables.length);
final SnapshotRegistry.Snapshot snapshotRecord = SnapshotRegistry.startSnapshot(txnId, context.getHostId(), file_path, file_nonce, SnapshotFormat.CSV, config.tables);
boolean noTargetsCreated = true;
final ArrayList<SnapshotTableTask> partitionedSnapshotTasks = new ArrayList<SnapshotTableTask>();
final ArrayList<SnapshotTableTask> replicatedSnapshotTasks = new ArrayList<SnapshotTableTask>();
for (final Table table : config.tables) {
/*
* For a deduped csv snapshot, only produce the replicated tables on the "leader"
* host.
*/
if (table.getIsreplicated() && !tracker.isFirstHost()) {
snapshotRecord.removeTable(table.getTypeName());
// We'll expect one less table in the global table count
// in order to be done, too (ENG-4802)
numTables.decrementAndGet();
continue;
}
List<SnapshotDataFilter> filters = new ArrayList<SnapshotDataFilter>();
filters.add(new CSVSnapshotFilter(CatalogUtil.getVoltTable(table), ',', null));
final SnapshotTableTask task = new SnapshotTableTask(table, filters.toArray(new SnapshotDataFilter[filters.size()]), null, false);
if (table.getIsreplicated()) {
replicatedSnapshotTasks.add(task);
} else {
partitionedSnapshotTasks.add(task);
}
noTargetsCreated = false;
result.addRow(context.getHostId(), CoreUtils.getHostnameOrAddress(), table.getTypeName(), "SUCCESS", "");
}
if (noTargetsCreated) {
SnapshotRegistry.discardSnapshot(snapshotRecord);
}
// CSV snapshots do the partitioned work only on the specified sites for de-duping,
// but since we've pre-filtered the replicated task list to only contain entries on
// one node, we can go ahead and distribute them across all of the sites on that node.
placePartitionedTasks(partitionedSnapshotTasks, sitesToInclude);
placeReplicatedTasks(replicatedSnapshotTasks, tracker.getSitesForHost(context.getHostId()));
// All IO work will be deferred and be run on the dedicated snapshot IO thread
return createDeferredSetup(file_path, pathType, file_nonce, config.tables, txnId, partitionTransactionIds, context, extraSnapshotData, timestamp, numTables, snapshotRecord, partitionedSnapshotTasks, replicatedSnapshotTasks);
}
Aggregations