use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class FileStoreBackupTest method testBackup.
@Test
public void testBackup() throws Exception {
FileStore source = newFileStore(src);
SegmentNodeStore store = SegmentNodeStoreBuilders.builder(source).build();
FileStoreBackup fsb = new FileStoreBackupImpl();
try {
init(store);
source.flush();
fsb.backup(source.getReader(), source.getRevisions(), destination);
compare(source, destination);
addTestContent(store);
source.flush();
fsb.backup(source.getReader(), source.getRevisions(), destination);
compare(source, destination);
source.compactFull();
fsb.cleanup(source);
fsb.backup(source.getReader(), source.getRevisions(), destination);
compare(source, destination);
} finally {
source.close();
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class BlobIdRecordTest method longReferencesShouldHaveBlobIdType.
@Test
public void longReferencesShouldHaveBlobIdType() throws Exception {
try (FileStore ss = newFileStore(new LongIdMappingBlobStore())) {
SegmentWriter sw = defaultSegmentWriterBuilder("test").build(ss);
byte[] content = new byte[Segment.MEDIUM_LIMIT + 1];
SegmentBlob sb = new SegmentBlob(ss.getBlobStore(), sw.writeBlob(new ArrayBasedBlob(content)));
assertRecordTypeEquals(sb, RecordType.BLOB_ID);
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class SegmentTarFixture method getOak.
@Override
public Oak getOak(int clusterId) throws Exception {
FileStoreBuilder fileStoreBuilder = fileStoreBuilder(parentPath).withMaxFileSize(maxFileSize).withSegmentCacheSize(segmentCacheSize).withMemoryMapping(memoryMapping);
if (azureConnectionString != null) {
CloudStorageAccount cloud = CloudStorageAccount.parse(azureConnectionString);
CloudBlobContainer container = cloud.createCloudBlobClient().getContainerReference(azureContainerName);
container.createIfNotExists();
CloudBlobDirectory directory = container.getDirectoryReference(azureRootPath);
fileStoreBuilder.withCustomPersistence(new AzurePersistence(directory));
}
if (useBlobStore) {
FileDataStore fds = new FileDataStore();
fds.setMinRecordLength(4092);
fds.init(parentPath.getAbsolutePath());
BlobStore blobStore = new DataStoreBlobStore(fds);
fileStoreBuilder.withBlobStore(blobStore);
}
FileStore fs = fileStoreBuilder.build();
return newOak(SegmentNodeStoreBuilders.builder(fs).build());
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class RepeatedRepositorySidegradeTest method upgradeRepository.
@Before
public synchronized void upgradeRepository() throws Exception {
if (!upgradeComplete) {
final File sourceDir = new File(getTestDirectory(), "jackrabbit2");
sourceDir.mkdirs();
FileStore fileStore = fileStoreBuilder(sourceDir).build();
SegmentNodeStore segmentNodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
RepositoryImpl repository = (RepositoryImpl) new Jcr(new Oak(segmentNodeStore)).createRepository();
Session session = repository.login(CREDENTIALS);
try {
createSourceContent(session);
} finally {
session.save();
session.logout();
repository.shutdown();
fileStore.close();
}
final NodeStore target = getTargetNodeStore();
doUpgradeRepository(sourceDir, target, false);
fileStore = fileStoreBuilder(sourceDir).build();
segmentNodeStore = SegmentNodeStoreBuilders.builder(fileStore).build();
repository = (RepositoryImpl) new Jcr(new Oak(segmentNodeStore)).createRepository();
session = repository.login(CREDENTIALS);
try {
modifySourceContent(session);
} finally {
session.save();
session.logout();
repository.shutdown();
fileStore.close();
}
doUpgradeRepository(sourceDir, target, true);
upgradeComplete = true;
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class Compact method compact.
private void compact() throws IOException, InvalidFileStoreVersionException {
try (FileStore store = newFileStore()) {
store.compact();
}
System.out.println(" -> cleaning up");
try (FileStore store = newFileStore()) {
store.cleanup();
File journal = new File(path, "journal.log");
String head;
try (JournalReader journalReader = new JournalReader(journal)) {
head = journalReader.next().getRevision() + " root " + System.currentTimeMillis() + "\n";
}
try (RandomAccessFile journalFile = new RandomAccessFile(journal, "rw")) {
System.out.println(" -> writing new " + journal.getName() + ": " + head);
journalFile.setLength(0);
journalFile.writeBytes(head);
journalFile.getChannel().force(false);
}
}
}
Aggregations