use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class DefaultStandbyHeadReaderTest method shouldWaitForFlushAndReturnHeadSegmentId.
@Test
public void shouldWaitForFlushAndReturnHeadSegmentId() throws Exception {
try (FileStore store = newFileStore()) {
DefaultStandbyHeadReader reader = new DefaultStandbyHeadReader(store, 10_000L);
assertNotNull(reader.readHeadRecordId());
assertEquals(store.getRevisions().getPersistedHead().toString(), reader.readHeadRecordId());
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class OakDirectoryTestBase method largeFile.
@Test
public void largeFile() throws Exception {
FileStore store = FileStoreBuilder.fileStoreBuilder(tempFolder.getRoot()).withMemoryMapping(false).withBlobStore(getBlackHoleBlobStore()).build();
SegmentNodeStore nodeStore = SegmentNodeStoreBuilders.builder(store).build();
IndexDefinition defn = new IndexDefinition(INITIAL_CONTENT, EmptyNodeState.EMPTY_NODE, "/foo");
Directory directory = getOakDirectoryBuilder(nodeStore.getRoot().builder(), defn).setReadOnly(false).build();
long expectedSize = ONE_GB * 2 + ONE_MB;
String fileName = "test";
writeFile(directory, fileName, expectedSize);
assertEquals(expectedSize, directory.fileLength(fileName));
IndexInput input = directory.openInput(fileName, IOContext.DEFAULT);
readInputToEnd(expectedSize, input);
store.close();
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class DataStoreCheckTest method setup.
@Before
public void setup() throws Exception {
if (S3DataStoreUtils.isS3Configured()) {
Properties props = S3DataStoreUtils.getS3Config();
props.setProperty("cacheSize", "0");
container = props.getProperty(S3Constants.S3_BUCKET);
DataStore ds = S3DataStoreUtils.getS3DataStore(S3DataStoreUtils.getFixtures().get(0), props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "s3ds";
} else if (AzureDataStoreUtils.isAzureConfigured()) {
Properties props = AzureDataStoreUtils.getAzureConfig();
props.setProperty("cacheSize", "0");
container = props.getProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
DataStore ds = AzureDataStoreUtils.getAzureDataStore(props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "azureblobds";
} else {
OakFileDataStore delegate = new OakFileDataStore();
dsPath = temporaryFolder.newFolder().getAbsolutePath();
delegate.setPath(dsPath);
delegate.init(null);
setupDataStore = new DataStoreBlobStore(delegate);
File cfgFile = temporaryFolder.newFile();
Properties props = new Properties();
props.put("path", dsPath);
props.put("minRecordLength", new Long(4096));
cfgFilePath = createTempConfig(cfgFile, props);
dsOption = "fds";
}
File storeFile = temporaryFolder.newFolder();
storePath = storeFile.getAbsolutePath();
FileStore fileStore = FileStoreBuilder.fileStoreBuilder(storeFile).withBlobStore(setupDataStore).withMaxFileSize(256).withSegmentCacheSize(64).build();
NodeStore store = SegmentNodeStoreBuilders.builder(fileStore).build();
/* Create nodes with blobs stored in DS*/
NodeBuilder a = store.getRoot().builder();
int numBlobs = 10;
blobsAdded = Sets.newHashSet();
blobsAddedWithNodes = Maps.newHashMap();
for (int i = 0; i < numBlobs; i++) {
SegmentBlob b = (SegmentBlob) store.createBlob(randomStream(i, 18342));
Iterator<String> idIter = setupDataStore.resolveChunks(b.getBlobId());
while (idIter.hasNext()) {
String chunk = idIter.next();
blobsAdded.add(chunk);
blobsAddedWithNodes.put(chunk, "/c" + i + "/x");
}
a.child("c" + i).setProperty("x", b);
}
store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
log.info("Created blobs : {}", blobsAdded);
fileStore.close();
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class DataStoreTestBase method useProxy.
private void useProxy(int skipPosition, int skipBytes, int flipPosition, boolean intermediateChange) throws Exception {
int blobSize = 5 * MB;
FileStore primary = getPrimary();
FileStore secondary = getSecondary();
NodeStore store = SegmentNodeStoreBuilders.builder(primary).build();
byte[] data = addTestContent(store, "server", blobSize);
primary.flush();
try (StandbyServerSync serverSync = new StandbyServerSync(serverPort.getPort(), primary, MB)) {
serverSync.start();
File spoolFolder = folder.newFolder();
try (NetworkErrorProxy ignored = new NetworkErrorProxy(proxyPort.getPort(), getServerHost(), serverPort.getPort(), flipPosition, skipPosition, skipBytes);
StandbyClientSync clientSync = new StandbyClientSync(getServerHost(), proxyPort.getPort(), secondary, false, getClientTimeout(), false, spoolFolder)) {
clientSync.run();
}
if (storesShouldBeDifferent()) {
assertFalse("stores are equal", primary.getHead().equals(secondary.getHead()));
}
if (intermediateChange) {
blobSize = 2 * MB;
data = addTestContent(store, "server", blobSize);
primary.flush();
}
try (StandbyClientSync clientSync = new StandbyClientSync(getServerHost(), serverPort.getPort(), secondary, false, getClientTimeout(), false, spoolFolder)) {
clientSync.run();
}
}
assertEquals(primary.getHead(), secondary.getHead());
assertTrue(primary.getStats().getApproximateSize() < MB);
assertTrue(secondary.getStats().getApproximateSize() < MB);
PropertyState ps = secondary.getHead().getChildNode("root").getChildNode("server").getProperty("testBlob");
assertNotNull(ps);
assertEquals(Type.BINARY.tag(), ps.getType().tag());
Blob b = ps.getValue(Type.BINARY);
assertEquals(blobSize, b.length());
byte[] testData = new byte[blobSize];
try (InputStream blobInputStream = b.getNewStream()) {
ByteStreams.readFully(blobInputStream, testData);
assertArrayEquals(data, testData);
}
}
use of org.apache.jackrabbit.oak.segment.file.FileStore in project jackrabbit-oak by apache.
the class DataStoreTestBase method testResilientSync.
@Test
public void testResilientSync() throws Exception {
final int blobSize = 5 * MB;
FileStore primary = getPrimary();
FileStore secondary = getSecondary();
NodeStore store = SegmentNodeStoreBuilders.builder(primary).build();
byte[] data = addTestContent(store, "server", blobSize);
File spoolFolder = folder.newFolder();
// run 1: unsuccessful
try (StandbyServerSync serverSync = new StandbyServerSync(serverPort.getPort(), primary, MB);
StandbyClientSync cl = new StandbyClientSync(getServerHost(), serverPort.getPort(), secondary, false, 4_000, false, spoolFolder)) {
serverSync.start();
// no persisted head on primary
// sync shouldn't be successful, but shouldn't throw exception either,
// timeout too low for TarMK flush thread to kick-in
cl.run();
assertNotEquals(primary.getHead(), secondary.getHead());
}
// run 2: successful
try (StandbyServerSync serverSync = new StandbyServerSync(serverPort.getPort(), primary, MB);
StandbyClientSync cl = new StandbyClientSync(getServerHost(), serverPort.getPort(), secondary, false, 4_000, false, spoolFolder)) {
serverSync.start();
// this time persisted head will be available on primary
// waited at least 4s + 4s > 5s (TarMK flush thread run frequency)
cl.run();
assertEquals(primary.getHead(), secondary.getHead());
}
assertTrue(primary.getStats().getApproximateSize() < MB);
assertTrue(secondary.getStats().getApproximateSize() < MB);
PropertyState ps = secondary.getHead().getChildNode("root").getChildNode("server").getProperty("testBlob");
assertNotNull(ps);
assertEquals(Type.BINARY.tag(), ps.getType().tag());
Blob b = ps.getValue(Type.BINARY);
assertEquals(blobSize, b.length());
byte[] testData = new byte[blobSize];
try (InputStream blobInputStream = b.getNewStream()) {
ByteStreams.readFully(blobInputStream, testData);
assertArrayEquals(data, testData);
}
}
Aggregations