use of org.apache.jackrabbit.oak.segment.file.tar.GCGeneration in project jackrabbit-oak by apache.
the class FileStoreBackupImpl method backup.
@Override
public void backup(@Nonnull SegmentReader reader, @Nonnull Revisions revisions, @Nonnull File destination) throws IOException, InvalidFileStoreVersionException {
Stopwatch watch = Stopwatch.createStarted();
SegmentGCOptions gcOptions = SegmentGCOptions.defaultGCOptions().setOffline();
FileStoreBuilder builder = fileStoreBuilder(destination).withStrictVersionCheck(true).withDefaultMemoryMapping();
if (USE_FAKE_BLOBSTORE) {
builder.withBlobStore(new BasicReadOnlyBlobStore());
}
builder.withGCOptions(gcOptions);
FileStore backup = builder.build();
SegmentNodeState current = reader.readHeadState(revisions);
try {
GCGeneration gen = current.getRecordId().getSegmentId().getGcGeneration();
SegmentBufferWriter bufferWriter = new SegmentBufferWriter(backup.getSegmentIdProvider(), backup.getReader(), "b", gen);
SegmentWriter writer = new DefaultSegmentWriter(backup, backup.getReader(), backup.getSegmentIdProvider(), backup.getBlobStore(), new WriterCacheManager.Default(), bufferWriter);
Compactor compactor = new Compactor(backup.getReader(), writer, backup.getBlobStore(), Suppliers.ofInstance(false), GCNodeWriteMonitor.EMPTY);
SegmentNodeState head = backup.getHead();
SegmentNodeState after = compactor.compact(head, current, head);
writer.flush();
if (after != null) {
backup.getRevisions().setHead(head.getRecordId(), after.getRecordId());
}
} finally {
backup.close();
}
backup = fileStoreBuilder(destination).withDefaultMemoryMapping().withGCOptions(gcOptions).withStrictVersionCheck(true).build();
try {
cleanup(backup);
} finally {
backup.close();
}
watch.stop();
log.info("Backup finished in {}.", watch);
}
use of org.apache.jackrabbit.oak.segment.file.tar.GCGeneration in project jackrabbit-oak by apache.
the class StandbyClientSync method run.
@Override
public void run() {
String name = Thread.currentThread().getName();
try {
Thread.currentThread().setName("standby-run-" + standbyRunCounter.incrementAndGet());
if (!running) {
return;
}
state = STATUS_STARTING;
if (!active.compareAndSet(false, true)) {
return;
}
state = STATUS_RUNNING;
try {
long startTimestamp = System.currentTimeMillis();
GCGeneration genBefore = headGeneration(fileStore);
try (StandbyClient client = new StandbyClient(host, port, group, observer.getID(), secure, readTimeoutMs, spoolFolder)) {
execution.execute(client);
}
fileStore.flush();
GCGeneration genAfter = headGeneration(fileStore);
if (autoClean && genAfter.compareWith(genBefore) > 0) {
log.info("New head generation detected (prevHeadGen: {} newHeadGen: {}), running cleanup.", genBefore, genAfter);
cleanupAndRemove();
}
this.failedRequests = 0;
this.syncStartTimestamp = startTimestamp;
this.syncEndTimestamp = System.currentTimeMillis();
this.lastSuccessfulRequest = syncEndTimestamp / 1000;
} catch (Exception e) {
this.failedRequests++;
log.error("Failed synchronizing state.", e);
} finally {
active.set(false);
}
} finally {
Thread.currentThread().setName(name);
}
}
use of org.apache.jackrabbit.oak.segment.file.tar.GCGeneration in project jackrabbit-oak by apache.
the class AbstractFileStore method populateTarBinaryReferences.
private static void populateTarBinaryReferences(final Segment segment, final EntryRecovery w) {
final GCGeneration generation = segment.getGcGeneration();
final UUID id = segment.getSegmentId().asUUID();
segment.forEachRecord((number, type, offset) -> {
if (type == RecordType.BLOB_ID) {
w.recoverBinaryReference(generation, id, SegmentBlob.readBlobId(segment, number));
}
});
}
use of org.apache.jackrabbit.oak.segment.file.tar.GCGeneration in project jackrabbit-oak by apache.
the class FileStoreRestoreImpl method restore.
@Override
public void restore(File source, File destination) throws IOException, InvalidFileStoreVersionException {
if (!validFileStore(source)) {
throw new IOException("Folder " + source + " is not a valid FileStore directory");
}
ReadOnlyFileStore restore = fileStoreBuilder(source).buildReadOnly();
Stopwatch watch = Stopwatch.createStarted();
FileStore store = fileStoreBuilder(destination).withStrictVersionCheck(true).build();
SegmentNodeState current = store.getHead();
try {
SegmentNodeState head = restore.getHead();
GCGeneration gen = head.getRecordId().getSegmentId().getGcGeneration();
SegmentBufferWriter bufferWriter = new SegmentBufferWriter(store.getSegmentIdProvider(), store.getReader(), "r", gen);
SegmentWriter writer = new DefaultSegmentWriter(store, store.getReader(), store.getSegmentIdProvider(), store.getBlobStore(), new WriterCacheManager.Default(), bufferWriter);
SegmentGCOptions gcOptions = defaultGCOptions().setOffline();
Compactor compactor = new Compactor(store.getReader(), writer, store.getBlobStore(), Suppliers.ofInstance(false), GCNodeWriteMonitor.EMPTY);
SegmentNodeState after = compactor.compact(current, head, current);
writer.flush();
store.getRevisions().setHead(current.getRecordId(), after.getRecordId());
} finally {
restore.close();
store.close();
}
watch.stop();
log.info("Restore finished in {}.", watch);
}
use of org.apache.jackrabbit.oak.segment.file.tar.GCGeneration in project jackrabbit-oak by apache.
the class AbstractFileStore method writeSegment.
private void writeSegment(UUID id, byte[] data, EntryRecovery w) throws IOException {
long msb = id.getMostSignificantBits();
long lsb = id.getLeastSignificantBits();
ByteBuffer buffer = ByteBuffer.wrap(data);
GCGeneration generation = SegmentId.isDataSegmentId(lsb) ? Segment.getGcGeneration(newSegmentData(buffer), id) : GCGeneration.NULL;
w.recoverEntry(msb, lsb, data, 0, data.length, generation);
if (SegmentId.isDataSegmentId(lsb)) {
Segment segment = new Segment(tracker, segmentReader, tracker.newSegmentId(msb, lsb), buffer);
populateTarGraph(segment, w);
populateTarBinaryReferences(segment, w);
}
}
Aggregations