use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class MongoDocumentStore method findAndModify.
@SuppressWarnings("unchecked")
@CheckForNull
private <T extends Document> T findAndModify(Collection<T> collection, UpdateOp updateOp, boolean upsert, boolean checkConditions) {
DBCollection dbCollection = getDBCollection(collection);
// make sure we don't modify the original updateOp
updateOp = updateOp.copy();
DBObject update = createUpdate(updateOp, false);
Lock lock = null;
if (collection == Collection.NODES) {
lock = nodeLocks.acquire(updateOp.getId());
}
final Stopwatch watch = startWatch();
boolean newEntry = false;
try {
// get modCount of cached document
Long modCount = null;
T cachedDoc = null;
if (collection == Collection.NODES) {
cachedDoc = (T) nodesCache.getIfPresent(updateOp.getId());
if (cachedDoc != null) {
modCount = cachedDoc.getModCount();
}
}
// if we have a matching modCount
if (modCount != null) {
QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
query.and(Document.MOD_COUNT).is(modCount);
WriteResult result = dbCollection.update(query.get(), update);
if (result.getN() > 0) {
// success, update cached document
if (collection == Collection.NODES) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, cachedDoc, updateOp);
nodesCache.put(newDoc);
}
// return previously cached document
return cachedDoc;
}
}
// conditional update failed or not possible
// perform operation and get complete document
QueryBuilder query = createQueryForUpdate(updateOp.getId(), updateOp.getConditions());
DBObject oldNode = dbCollection.findAndModify(query.get(), null, null, /*sort*/
false, /*remove*/
update, false, /*returnNew*/
upsert);
if (oldNode == null) {
newEntry = true;
}
if (checkConditions && oldNode == null) {
return null;
}
T oldDoc = convertFromDBObject(collection, oldNode);
if (oldDoc != null) {
if (collection == Collection.NODES) {
NodeDocument newDoc = (NodeDocument) applyChanges(collection, oldDoc, updateOp);
nodesCache.put(newDoc);
updateLocalChanges(newDoc);
}
oldDoc.seal();
} else if (upsert) {
if (collection == Collection.NODES) {
NodeDocument doc = (NodeDocument) collection.newDocument(this);
UpdateUtils.applyChanges(doc, updateOp);
nodesCache.putIfAbsent(doc);
updateLocalChanges(doc);
}
} else {
// updateOp without conditions and not an upsert
// this means the document does not exist
}
return oldDoc;
} catch (Exception e) {
throw handleException(e, collection, updateOp.getId());
} finally {
if (lock != null) {
lock.unlock();
}
stats.doneFindAndModify(watch.elapsed(TimeUnit.NANOSECONDS), collection, updateOp.getId(), newEntry, true, 0);
}
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class MongoDocumentStore method remove.
@Override
public <T extends Document> void remove(Collection<T> collection, List<String> keys) {
log("remove", keys);
DBCollection dbCollection = getDBCollection(collection);
Stopwatch watch = startWatch();
try {
for (List<String> keyBatch : Lists.partition(keys, IN_CLAUSE_BATCH_SIZE)) {
DBObject query = QueryBuilder.start(Document.ID).in(keyBatch).get();
try {
dbCollection.remove(query);
} catch (Exception e) {
throw DocumentStoreException.convert(e, "Remove failed for " + keyBatch);
} finally {
if (collection == Collection.NODES) {
for (String key : keyBatch) {
invalidateCache(collection, key);
}
}
}
}
} finally {
stats.doneRemove(watch.elapsed(TimeUnit.NANOSECONDS), collection, keys.size());
}
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class RepositoryUpgrade method copy.
/**
* Copies the full content from the source to the target repository.
* <p>
* The source repository <strong>must not be modified</strong> while
* the copy operation is running to avoid an inconsistent copy.
* <p>
* Note that both the source and the target repository must be closed
* during the copy operation as this method requires exclusive access
* to the repositories.
*
* @param initializer optional extra repository initializer to use
* @throws RepositoryException if the copy operation fails
*/
public void copy(RepositoryInitializer initializer) throws RepositoryException {
if (checkLongNames) {
assertNoLongNames();
}
RepositoryConfig config = source.getRepositoryConfig();
logger.info("Copying repository content from {} to Oak", config.getHomeDir());
try {
NodeBuilder targetBuilder = target.getRoot().builder();
if (VersionHistoryUtil.getVersionStorage(targetBuilder).exists() && !versionCopyConfiguration.skipOrphanedVersionsCopy()) {
logger.warn("The version storage on destination already exists. Orphaned version histories will be skipped.");
versionCopyConfiguration.setCopyOrphanedVersions(null);
}
final Root upgradeRoot = new UpgradeRoot(targetBuilder);
String workspaceName = source.getRepositoryConfig().getDefaultWorkspaceName();
SecurityProviderImpl security = new SecurityProviderImpl(mapSecurityConfig(config.getSecurityConfig()));
if (skipInitialization) {
logger.info("Skipping the repository initialization");
} else {
// init target repository first
logger.info("Initializing initial repository content from {}", config.getHomeDir());
new InitialContent().initialize(targetBuilder);
if (initializer != null) {
initializer.initialize(targetBuilder);
}
logger.debug("InitialContent completed from {}", config.getHomeDir());
for (SecurityConfiguration sc : security.getConfigurations()) {
RepositoryInitializer ri = sc.getRepositoryInitializer();
ri.initialize(targetBuilder);
logger.debug("Repository initializer '" + ri.getClass().getName() + "' completed", config.getHomeDir());
}
for (SecurityConfiguration sc : security.getConfigurations()) {
WorkspaceInitializer wi = sc.getWorkspaceInitializer();
wi.initialize(targetBuilder, workspaceName);
logger.debug("Workspace initializer '" + wi.getClass().getName() + "' completed", config.getHomeDir());
}
}
HashBiMap<String, String> uriToPrefix = HashBiMap.create();
logger.info("Copying registered namespaces");
copyNamespaces(targetBuilder, uriToPrefix);
logger.debug("Namespace registration completed.");
if (skipInitialization) {
logger.info("Skipping registering node types and privileges");
} else {
logger.info("Copying registered node types");
NodeTypeManager ntMgr = new ReadWriteNodeTypeManager() {
@Override
protected Tree getTypes() {
return upgradeRoot.getTree(NODE_TYPES_PATH);
}
@Nonnull
@Override
protected Root getWriteRoot() {
return upgradeRoot;
}
};
copyNodeTypes(ntMgr, new ValueFactoryImpl(upgradeRoot, NamePathMapper.DEFAULT));
logger.debug("Node type registration completed.");
// migrate privileges
logger.info("Copying registered privileges");
PrivilegeConfiguration privilegeConfiguration = security.getConfiguration(PrivilegeConfiguration.class);
copyCustomPrivileges(privilegeConfiguration.getPrivilegeManager(upgradeRoot, NamePathMapper.DEFAULT));
logger.debug("Privilege registration completed.");
// Triggers compilation of type information, which we need for
// the type predicates used by the bulk copy operations below.
new TypeEditorProvider(false).getRootEditor(targetBuilder.getBaseState(), targetBuilder.getNodeState(), targetBuilder, null);
}
final NodeState reportingSourceRoot = ReportingNodeState.wrap(JackrabbitNodeState.createRootNodeState(source, workspaceName, targetBuilder.getNodeState(), uriToPrefix, copyBinariesByReference, skipOnError), new LoggingReporter(logger, "Migrating", LOG_NODE_COPY, -1));
final NodeState sourceRoot;
if (filterLongNames) {
sourceRoot = NameFilteringNodeState.wrap(reportingSourceRoot);
} else {
sourceRoot = reportingSourceRoot;
}
final Stopwatch watch = Stopwatch.createStarted();
logger.info("Copying workspace content");
copyWorkspace(sourceRoot, targetBuilder, workspaceName);
// on TarMK this does call triggers the actual copy
targetBuilder.getNodeState();
logger.info("Upgrading workspace content completed in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
if (!versionCopyConfiguration.skipOrphanedVersionsCopy()) {
logger.info("Copying version storage");
watch.reset().start();
copyVersionStorage(targetBuilder, getVersionStorage(sourceRoot), getVersionStorage(targetBuilder), versionCopyConfiguration);
// on TarMK this does call triggers the actual copy
targetBuilder.getNodeState();
logger.info("Version storage copied in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
} else {
logger.info("Skipping the version storage as the copyOrphanedVersions is set to false");
}
watch.reset().start();
logger.info("Applying default commit hooks");
// TODO: default hooks?
List<CommitHook> hooks = newArrayList();
UserConfiguration userConf = security.getConfiguration(UserConfiguration.class);
String groupsPath = userConf.getParameters().getConfigValue(UserConstants.PARAM_GROUP_PATH, UserConstants.DEFAULT_GROUP_PATH);
String usersPath = userConf.getParameters().getConfigValue(UserConstants.PARAM_USER_PATH, UserConstants.DEFAULT_USER_PATH);
// hooks specific to the upgrade, need to run first
hooks.add(new EditorHook(new CompositeEditorProvider(new RestrictionEditorProvider(), new GroupEditorProvider(groupsPath), // copy referenced version histories
new VersionableEditor.Provider(sourceRoot, workspaceName, versionCopyConfiguration), new SameNameSiblingsEditor.Provider(), AuthorizableFolderEditor.provider(groupsPath, usersPath))));
// this editor works on the VersionableEditor output, so it can't be
// a part of the same EditorHook
hooks.add(new EditorHook(new VersionablePropertiesEditor.Provider()));
// security-related hooks
for (SecurityConfiguration sc : security.getConfigurations()) {
hooks.addAll(sc.getCommitHooks(workspaceName));
}
if (customCommitHooks != null) {
hooks.addAll(customCommitHooks);
}
// type validation, reference and indexing hooks
hooks.add(new EditorHook(new CompositeEditorProvider(createTypeEditorProvider(), createIndexEditorProvider())));
target.merge(targetBuilder, new LoggingCompositeHook(hooks, source, overrideEarlyShutdown()), CommitInfo.EMPTY);
logger.info("Processing commit hooks completed in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
logger.debug("Repository upgrade completed.");
} catch (Exception e) {
throw new RepositoryException("Failed to copy content", e);
}
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class LuceneIndexMBeanImpl method checkAndReportConsistencyOfAllIndexes.
@Override
public String[] checkAndReportConsistencyOfAllIndexes(boolean fullCheck) throws IOException {
Stopwatch watch = Stopwatch.createStarted();
List<String> results = new ArrayList<>();
NodeState root = nodeStore.getRoot();
for (String indexPath : indexPathService.getIndexPaths()) {
NodeState idxState = NodeStateUtils.getNode(root, indexPath);
if (LuceneIndexConstants.TYPE_LUCENE.equals(idxState.getString(IndexConstants.TYPE_PROPERTY_NAME))) {
Result result = getConsistencyCheckResult(indexPath, fullCheck);
String msg = "OK";
if (!result.clean) {
msg = "NOT OK";
}
results.add(String.format("%s : %s", indexPath, msg));
}
}
log.info("Checked index consistency in {}. Check result {}", watch, results);
return Iterables.toArray(results, String.class);
}
use of com.google.common.base.Stopwatch in project jackrabbit-oak by apache.
the class FileStoreBackupImpl method backup.
@Override
public void backup(@Nonnull SegmentReader reader, @Nonnull Revisions revisions, @Nonnull File destination) throws IOException, InvalidFileStoreVersionException {
Stopwatch watch = Stopwatch.createStarted();
SegmentGCOptions gcOptions = SegmentGCOptions.defaultGCOptions().setOffline();
FileStoreBuilder builder = fileStoreBuilder(destination).withDefaultMemoryMapping();
if (USE_FAKE_BLOBSTORE) {
builder.withBlobStore(new BasicReadOnlyBlobStore());
}
builder.withGCOptions(gcOptions);
FileStore backup = builder.build();
SegmentNodeState current = reader.readHeadState(revisions);
try {
int gen = current.getRecordId().getSegmentId().getGcGeneration();
SegmentBufferWriter bufferWriter = new SegmentBufferWriter(backup.getSegmentIdProvider(), backup.getReader(), "b", gen);
SegmentWriter writer = new SegmentWriter(backup, backup.getReader(), backup.getSegmentIdProvider(), backup.getBlobStore(), new WriterCacheManager.Default(), bufferWriter);
Compactor compactor = new Compactor(backup.getReader(), writer, backup.getBlobStore(), Suppliers.ofInstance(false), gcOptions);
compactor.setContentEqualityCheck(true);
SegmentNodeState head = backup.getHead();
SegmentNodeState after = compactor.compact(head, current, head);
if (after != null) {
backup.getRevisions().setHead(head.getRecordId(), after.getRecordId());
}
} finally {
backup.close();
}
backup = fileStoreBuilder(destination).withDefaultMemoryMapping().withGCOptions(gcOptions).build();
try {
cleanup(backup);
} finally {
backup.close();
}
watch.stop();
log.info("Backup finished in {}.", watch);
}
Aggregations