use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class AbstractDatabaseAdapter method checkForKeyCollisions.
/**
* For merge/transplant, verifies that the given commits do not touch any of the given keys.
*
* @param commitsChronological list of commit-log-entries, in order of commit-operations,
* chronological order
*/
protected void checkForKeyCollisions(OP_CONTEXT ctx, Hash refHead, Set<Key> keysTouchedOnTarget, List<CommitLogEntry> commitsChronological) throws ReferenceConflictException, ReferenceNotFoundException {
Set<Key> keyCollisions = new HashSet<>();
for (int i = commitsChronological.size() - 1; i >= 0; i--) {
CommitLogEntry sourceCommit = commitsChronological.get(i);
Stream.concat(sourceCommit.getPuts().stream().map(KeyWithBytes::getKey), sourceCommit.getDeletes().stream()).filter(keysTouchedOnTarget::contains).forEach(keyCollisions::add);
}
if (!keyCollisions.isEmpty()) {
removeKeyCollisionsForNamespaces(ctx, refHead, commitsChronological.get(commitsChronological.size() - 1).getHash(), keyCollisions);
if (!keyCollisions.isEmpty()) {
throw new ReferenceConflictException(String.format("The following keys have been changed in conflict: %s", keyCollisions.stream().map(k -> String.format("'%s'", k.toString())).collect(Collectors.joining(", "))));
}
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class NonTransactionalDatabaseAdapter method transplant.
@SuppressWarnings("RedundantThrows")
@Override
public Hash transplant(BranchName targetBranch, Optional<Hash> expectedHead, List<Hash> sequenceToTransplant, Function<ByteString, ByteString> updateCommitMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
try {
return casOpLoop("transplant", targetBranch, CasOpVariant.COMMIT, (ctx, pointer, branchCommits, newKeyLists) -> {
Hash targetHead = branchHead(pointer, targetBranch);
long timeInMicros = commitTimeInMicros();
targetHead = transplantAttempt(ctx, timeInMicros, targetBranch, expectedHead, targetHead, sequenceToTransplant, branchCommits, newKeyLists, updateCommitMetadata);
GlobalStateLogEntry newGlobalHead = writeGlobalCommit(ctx, timeInMicros, pointer, Collections.emptyList());
RefLogEntry newRefLog = writeRefLogEntry(ctx, pointer, targetBranch.getName(), RefLogEntry.RefType.Branch, targetHead, RefLogEntry.Operation.TRANSPLANT, timeInMicros, sequenceToTransplant);
// Return hash of last commit (targetHead) added to 'targetBranch' (via the casOpLoop)
return updateGlobalStatePointer(targetBranch, pointer, targetHead, newGlobalHead, newRefLog);
}, () -> transplantConflictMessage("Retry-failure", targetBranch, expectedHead, sequenceToTransplant));
} catch (ReferenceNotFoundException | ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class NonTransactionalDatabaseAdapter method compactGlobalLog.
protected Map<String, String> compactGlobalLog(GlobalLogCompactionParams globalLogCompactionParams) {
if (!globalLogCompactionParams.isEnabled()) {
return ImmutableMap.of("compacted", "false", "reason", "not enabled");
}
// Not using casOpLoop() here, as it is simpler than adopting casOpLoop().
try (TryLoopState tryState = newTryLoopState("compact-global-log", ts -> repoDescUpdateConflictMessage(String.format("%s after %d retries, %d ms", "Retry-Failure", ts.getRetries(), ts.getDuration(TimeUnit.MILLISECONDS))), this::tryLoopStateCompletion, config)) {
CompactionStats stats = new CompactionStats();
while (true) {
NonTransactionalOperationContext ctx = NON_TRANSACTIONAL_OPERATION_CONTEXT;
GlobalStatePointer pointer = fetchGlobalPointer(ctx);
// Collect the old global-log-ids, to delete those after compaction
List<Hash> oldLogIds = new ArrayList<>();
// Map with all global contents.
Map<String, ByteString> globalContents = new HashMap<>();
// Content-IDs, most recently updated contents first.
List<String> contentIdsByRecency = new ArrayList<>();
// Read the global log - from the most recent global-log entry to the oldest.
try (Stream<GlobalStateLogEntry> globalLog = globalLogFetcher(ctx, pointer)) {
globalLog.forEach(e -> {
if (stats.read < globalLogCompactionParams.getNoCompactionWhenCompactedWithin() && stats.puts > stats.read) {
// do not compact.
throw COMPACTION_NOT_NECESSARY_WITHIN;
}
stats.read++;
oldLogIds.add(Hash.of(e.getId()));
for (ContentIdWithBytes put : e.getPutsList()) {
stats.puts++;
String cid = put.getContentId().getId();
if (globalContents.putIfAbsent(cid, put.getValue()) == null) {
stats.uniquePuts++;
contentIdsByRecency.add(cid);
}
}
});
if (stats.read < globalLogCompactionParams.getNoCompactionUpToLength()) {
// single-bulk read, so do not compact at all.
throw COMPACTION_NOT_NECESSARY_LENGTH;
}
} catch (RuntimeException e) {
if (e == COMPACTION_NOT_NECESSARY_WITHIN) {
tryState.success(null);
return ImmutableMap.of("compacted", "false", "reason", String.format("compacted entry within %d most recent log entries", globalLogCompactionParams.getNoCompactionWhenCompactedWithin()));
}
if (e == COMPACTION_NOT_NECESSARY_LENGTH) {
tryState.success(null);
return ImmutableMap.of("compacted", "false", "reason", String.format("less than %d entries", globalLogCompactionParams.getNoCompactionUpToLength()));
}
throw e;
}
// Collect the IDs of the written global-log-entries, to delete those when the CAS
// operation failed
List<ByteString> newLogIds = new ArrayList<>();
// Reverse the order of content-IDs, most recently updated contents LAST.
// Do this to have the active contents closer to the HEAD of the global log.
Collections.reverse(contentIdsByRecency);
// Maintain the list of global-log-entry parent IDs, but in reverse order as in
// GlobalLogEntry for easier management here.
List<ByteString> globalParentsReverse = new ArrayList<>(config.getParentsPerGlobalCommit());
globalParentsReverse.add(NO_ANCESTOR.asBytes());
GlobalStateLogEntry.Builder currentEntry = newGlobalLogEntryBuilder(commitTimeInMicros()).addParents(globalParentsReverse.get(0));
for (String cid : contentIdsByRecency) {
if (currentEntry.buildPartial().getSerializedSize() >= config.getGlobalLogEntrySize()) {
compactGlobalLogWriteEntry(ctx, stats, globalParentsReverse, currentEntry, newLogIds);
// Prepare new entry
currentEntry = newGlobalLogEntryBuilder(commitTimeInMicros());
for (int i = globalParentsReverse.size() - 1; i >= 0; i--) {
currentEntry.addParents(globalParentsReverse.get(i));
}
}
ByteString value = globalContents.get(cid);
currentEntry.addPuts(ContentIdWithBytes.newBuilder().setContentId(AdapterTypes.ContentId.newBuilder().setId(cid)).setTypeUnused(0).setValue(value).build());
}
compactGlobalLogWriteEntry(ctx, stats, globalParentsReverse, currentEntry, newLogIds);
GlobalStatePointer newPointer = GlobalStatePointer.newBuilder().addAllNamedReferences(pointer.getNamedReferencesList()).addAllRefLogParentsInclHead(pointer.getRefLogParentsInclHeadList()).setRefLogId(pointer.getRefLogId()).setGlobalId(currentEntry.getId()).addGlobalParentsInclHead(currentEntry.getId()).addAllGlobalParentsInclHead(currentEntry.getParentsList()).build();
stats.addToTotal();
// CAS global pointer
if (globalPointerCas(ctx, pointer, newPointer)) {
tryState.success(null);
cleanUpGlobalLog(ctx, oldLogIds);
return stats.asMap(tryState);
}
// Note: if it turns out that there are too many CAS retries happening, the overall
// mechanism can be updated as follows. Since the approach below is much more complex
// and harder to test, if's not part of the initial implementation.
//
// 1. Read the whole global-log as currently, but outside the actual CAS-loop.
// Save the current HEAD of the global-log
// 2. CAS-loop:
// 2.1. Construct and write the new global-log
// 2.2. Try the CAS, if it succeeds, fine
// 2.3. If the CAS failed:
// 2.3.1. Clean up the optimistically written new global-log
// 2.3.2. Read the global-log from its new HEAD up to the current HEAD from step 1.
// Only add the most-recent values for the content-IDs in the incrementally
// read global-log
// 2.3.3. Remember the "new HEAD" as the "current HEAD"
// 2.3.4. Continue to step 2.1.
cleanUpGlobalLog(ctx, newLogIds.stream().map(Hash::of).collect(Collectors.toList()));
stats.onRetry();
tryState.retry();
}
} catch (ReferenceConflictException e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class NonTransactionalDatabaseAdapter method merge.
@Override
public Hash merge(Hash from, BranchName toBranch, Optional<Hash> expectedHead, Function<ByteString, ByteString> updateCommitMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
// creates a new commit-tree that is decoupled from other commit-trees.
try {
return casOpLoop("merge", toBranch, CasOpVariant.COMMIT, (ctx, pointer, branchCommits, newKeyLists) -> {
Hash toHead = branchHead(pointer, toBranch);
long timeInMicros = commitTimeInMicros();
toHead = mergeAttempt(ctx, timeInMicros, from, toBranch, expectedHead, toHead, branchCommits, newKeyLists, updateCommitMetadata);
GlobalStateLogEntry newGlobalHead = writeGlobalCommit(ctx, timeInMicros, pointer, Collections.emptyList());
RefLogEntry newRefLog = writeRefLogEntry(ctx, pointer, toBranch.getName(), RefLogEntry.RefType.Branch, toHead, RefLogEntry.Operation.MERGE, timeInMicros, Collections.singletonList(from));
// Return hash of last commit (toHead) added to 'targetBranch' (via the casOpLoop)
return updateGlobalStatePointer(toBranch, pointer, toHead, newGlobalHead, newRefLog);
}, () -> mergeConflictMessage("Retry-failure", from, toBranch, expectedHead));
} catch (ReferenceNotFoundException | ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class NonTransactionalDatabaseAdapter method assign.
@Override
public void assign(NamedRef assignee, Optional<Hash> expectedHead, Hash assignTo) throws ReferenceNotFoundException, ReferenceConflictException {
try {
casOpLoop("assignRef", assignee, CasOpVariant.REF_UPDATE, (ctx, pointer, branchCommits, newKeyLists) -> {
Hash beforeAssign = branchHead(pointer, assignee);
verifyExpectedHash(beforeAssign, assignee, expectedHead);
validateHashExists(ctx, assignTo);
GlobalStateLogEntry newGlobalHead = noopGlobalLogEntry(ctx, pointer);
RefLogEntry.RefType refType = assignee instanceof TagName ? RefLogEntry.RefType.Tag : RefLogEntry.RefType.Branch;
RefLogEntry newRefLog = writeRefLogEntry(ctx, pointer, assignee.getName(), refType, assignTo, RefLogEntry.Operation.ASSIGN_REFERENCE, commitTimeInMicros(), Collections.singletonList(beforeAssign));
return updateGlobalStatePointer(assignee, pointer, assignTo, newGlobalHead, newRefLog);
}, () -> assignConflictMessage("Retry-Failure", assignee, expectedHead, assignTo));
} catch (ReferenceNotFoundException | ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations