use of org.apache.jackrabbit.oak.spi.state.NodeStore in project jackrabbit-oak by apache.
the class MigrationFactory method createSidegrade.
public RepositorySidegrade createSidegrade() throws IOException, CliArgumentException {
BlobStore srcBlobStore = datastores.getSrcBlobStore().create(closer);
NodeStore srcStore = stores.getSrcStore().create(srcBlobStore, closer);
NodeStore dstStore = createTarget(closer, srcBlobStore);
return createSidegrade(srcStore, dstStore);
}
use of org.apache.jackrabbit.oak.spi.state.NodeStore in project jackrabbit-oak by apache.
the class MigrationFactory method createUpgrade.
public RepositoryUpgrade createUpgrade() throws IOException, RepositoryException, CliArgumentException {
RepositoryContext src = stores.getSrcStore().create(closer);
BlobStore srcBlobStore = new DataStoreBlobStore(src.getDataStore());
NodeStore dstStore = createTarget(closer, srcBlobStore);
return createUpgrade(src, dstStore);
}
use of org.apache.jackrabbit.oak.spi.state.NodeStore in project jackrabbit-oak by apache.
the class MigrationFactory method createTarget.
protected NodeStore createTarget(Closer closer, BlobStore srcBlobStore) throws IOException {
BlobStore dstBlobStore = datastores.getDstBlobStore(srcBlobStore).create(closer);
NodeStore dstStore = stores.getDstStore().create(dstBlobStore, closer);
return dstStore;
}
use of org.apache.jackrabbit.oak.spi.state.NodeStore in project jackrabbit-oak by apache.
the class LuceneIndexAggregationTest2 method createRepository.
@Override
protected ContentRepository createRepository() {
LuceneIndexProvider provider = new LuceneIndexProvider();
return new Oak().with(new InitialContent() {
@Override
public void initialize(@Nonnull NodeBuilder builder) {
super.initialize(builder);
// registering additional node types for wider testing
InputStream stream = null;
try {
stream = LuceneIndexAggregationTest2.class.getResourceAsStream("test_nodetypes.cnd");
NodeState base = builder.getNodeState();
NodeStore store = new MemoryNodeStore(base);
Root root = RootFactory.createSystemRoot(store, new EditorHook(new CompositeEditorProvider(new NamespaceEditorProvider(), new TypeEditorProvider())), null, null, null, null);
NodeTypeRegistry.register(root, stream, "testing node types");
NodeState target = store.getRoot();
target.compareAgainstBaseState(base, new ApplyDiff(builder));
} catch (Exception e) {
LOG.error("Error while registering required node types. Failing here", e);
fail("Error while registering required node types");
} finally {
printNodeTypes(builder);
if (stream != null) {
try {
stream.close();
} catch (IOException e) {
LOG.debug("Ignoring exception on stream closing.", e);
}
}
}
}
}).with(new OpenSecurityProvider()).with(((QueryIndexProvider) provider.with(getNodeAggregator()))).with((Observer) provider).with(new LuceneIndexEditorProvider()).createContentRepository();
}
use of org.apache.jackrabbit.oak.spi.state.NodeStore in project jackrabbit-oak by apache.
the class DataStoreCheckTest method setup.
@Before
public void setup() throws Exception {
if (S3DataStoreUtils.isS3Configured()) {
Properties props = S3DataStoreUtils.getS3Config();
props.setProperty("cacheSize", "0");
container = props.getProperty(S3Constants.S3_BUCKET);
DataStore ds = S3DataStoreUtils.getS3DataStore(S3DataStoreUtils.getFixtures().get(0), props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "s3ds";
} else if (AzureDataStoreUtils.isAzureConfigured()) {
Properties props = AzureDataStoreUtils.getAzureConfig();
props.setProperty("cacheSize", "0");
container = props.getProperty(AzureConstants.AZURE_BLOB_CONTAINER_NAME);
DataStore ds = AzureDataStoreUtils.getAzureDataStore(props, temporaryFolder.newFolder().getAbsolutePath());
setupDataStore = new DataStoreBlobStore(ds);
cfgFilePath = createTempConfig(temporaryFolder.newFile(), props);
dsOption = "azureblobds";
} else {
OakFileDataStore delegate = new OakFileDataStore();
dsPath = temporaryFolder.newFolder().getAbsolutePath();
delegate.setPath(dsPath);
delegate.init(null);
setupDataStore = new DataStoreBlobStore(delegate);
File cfgFile = temporaryFolder.newFile();
Properties props = new Properties();
props.put("path", dsPath);
props.put("minRecordLength", new Long(4096));
cfgFilePath = createTempConfig(cfgFile, props);
dsOption = "fds";
}
File storeFile = temporaryFolder.newFolder();
storePath = storeFile.getAbsolutePath();
FileStore fileStore = FileStoreBuilder.fileStoreBuilder(storeFile).withBlobStore(setupDataStore).withMaxFileSize(256).withSegmentCacheSize(64).build();
NodeStore store = SegmentNodeStoreBuilders.builder(fileStore).build();
/* Create nodes with blobs stored in DS*/
NodeBuilder a = store.getRoot().builder();
int numBlobs = 10;
blobsAdded = Sets.newHashSet();
for (int i = 0; i < numBlobs; i++) {
SegmentBlob b = (SegmentBlob) store.createBlob(randomStream(i, 18342));
Iterator<String> idIter = setupDataStore.resolveChunks(b.getBlobId());
while (idIter.hasNext()) {
String chunk = idIter.next();
blobsAdded.add(chunk);
}
a.child("c" + i).setProperty("x", b);
}
store.merge(a, EmptyHook.INSTANCE, CommitInfo.EMPTY);
log.info("Created blobs : {}", blobsAdded);
fileStore.close();
}
Aggregations