use of io.datarouter.filesystem.snapshot.block.root.RootBlock in project datarouter by hotpads.
the class SnapshotWriter method complete.
public Optional<RootBlock> complete() {
LinkedBlockingDequeTool.put(messages, Message.last());
CountDownLatchTool.await(writerThreadCompletionLatch);
// finish value blocks
IntStream.range(0, valueBlockEncoders.size()).forEach(column -> {
ValueBlockEncoder valueBlockEncoder = valueBlockEncoders.get(column);
if (valueBlockEncoder.numRecords() > 0) {
blockWriter.submitValueBlock(column, numValueBlocksByColumn.get(column), valueBlockEncoder);
numValueBlocksByColumn.set(column, numValueBlocksByColumn.get(column) + 1);
}
});
// finish leaf blocks
if (leafBlockEncoder.numRecords() > 0) {
addBranchEntry(0, numKeys, lastEntry, numLeafBlocks);
blockWriter.submitLeaf(leafBlockEncoder);
++numLeafBlocks;
}
// finish branch blocks
IntStream.range(0, branchBlockEncoders.size()).forEach(level -> {
BranchBlockEncoder branchEncoder = branchBlockEncoders.get(level);
if (branchEncoder.numRecords() > 0) {
if (level != branchBlockEncoders.size() - 1) {
// avoid creating a root block with only one entry
addBranchEntry(level + 1, numKeys, lastEntry, numBranchBlocksByLevel.get(level));
}
blockWriter.submitBranch(branchEncoder);
branchBlockEncoders.set(level, config.branchBlockEncoderFactory.apply(level));
numBranchBlocksByLevel.set(level, numBranchBlocksByLevel.get(level) + 1);
}
});
// complete file uploads (could parallelize this?)
blockWriter.complete();
// write root block
if (numKeys == 0) {
return Optional.empty();
}
// TODO write to cache if config.updateCache
RootBlock root = blockWriter.flushRootBlock(startTimeMs, numBranchBlocksByLevel, numValueBlocksByColumn, branchBlockEncoders.size(), numKeys, numLeafBlocks);
// log completion
logStatus();
String logTokens = Scanner.of(root.toKeyValueStrings().entrySet()).map(kv -> kv.getKey() + "=" + kv.getValue()).collect(Collectors.joining(", "));
logger.warn("Completed group={}, id={}, {}", snapshotKey.groupId, snapshotKey.snapshotId, logTokens);
return Optional.of(root);
}
use of io.datarouter.filesystem.snapshot.block.root.RootBlock in project datarouter by hotpads.
the class DatarouterSnapshotHandler method summary.
@Handler
public Mav summary(@Param(P_groupId) String groupId, @Param(P_snapshotId) String snapshotId) {
var snapshotKey = new SnapshotKey(groupId, snapshotId);
RootBlock rootBlock = groups.getGroup(groupId).root(BlockKey.root(snapshotKey));
return pageFactory.startBuilder(request).withTitle("Datarouter Filesystem - Snapshot Groups").withRequires(DatarouterWebRequireJsV2.SORTTABLE).withContent(buildSummary(rootBlock)).buildMav();
}
use of io.datarouter.filesystem.snapshot.block.root.RootBlock in project datarouter by hotpads.
the class SnapshotGroupDeleteOps method deleteSnapshot.
public void deleteSnapshot(SnapshotKey snapshotKey, ExecutorService exec, int numThreads) {
deleteIdFile(snapshotKey.snapshotId);
RootBlock rootBlock = group.root(BlockKey.root(snapshotKey));
// TODO delete from cache
SnapshotFileStorage snapshotFileStorage = group.makeSnapshotFileStorage(snapshotKey.snapshotId);
new SnapshotFileDeleter(rootBlock, pathsRegistry, snapshotKey, snapshotFileStorage, exec, numThreads).delete();
}
use of io.datarouter.filesystem.snapshot.block.root.RootBlock in project datarouter by hotpads.
the class SnapshotGroup method makeDecodingBlockLoader.
private DecodingBlockLoader makeDecodingBlockLoader(SnapshotKey snapshotKey) {
SnapshotBlockStorageReader blockStorageReader = makeStorageReader(snapshotKey.snapshotId);
byte[] rootBytes = blockStorageReader.getRootBlock();
RootBlock rootBlock = rootBlockDecoder.decode(rootBytes);
return decodingBlockLoaderFactory.create(rootBlock, blockStorageReader);
}
use of io.datarouter.filesystem.snapshot.block.root.RootBlock in project datarouter by hotpads.
the class FilesystemSnapshotSortingTests method writeInputSnapshot.
private RootBlock writeInputSnapshot() {
var timer = new PhaseTimer("writeInputSnapshot");
SnapshotWriterConfig config = makeSnapshotWriterConfig(false);
SnapshotWriteResult result = Scanner.iterate(0, i -> i + 1).limit(NUM_ENTRIES).shuffle().map(FilesystemSnapshotSortingTests::makeEntry).batch(1000).apply(entries -> inputGroup.writeOps().write(config, entries, exec, () -> false));
inputSnapshotKey = result.key;
timer.add("wrote " + NumberFormatter.addCommas(result.optRoot.get().numItems()));
logger.warn("{}", timer);
return result.optRoot.get();
}
Aggregations