use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class AbstractCommitScenarios method commitRenameTable.
/**
* Test whether a "table rename operation" works as expected.
*
* <p>A "table rename" is effectively a remove-operation plus a put-operation with a different key
* but the same content-id.
*
* <p>Parameterized to force operations to cross/not-cross the number of commits between "full key
* lists" and whether global-state shall be used.
*/
@ParameterizedTest
@MethodSource("commitRenameTableParams")
void commitRenameTable(RenameTable param) throws Exception {
BranchName branch = BranchName.of("main");
Key dummyKey = Key.of("dummy");
Key oldKey = Key.of("hello", "table");
Key newKey = Key.of("new", "name");
ContentId contentId = ContentId.of("id-42");
IntFunction<Hash> performDummyCommit = i -> {
try {
return databaseAdapter.commit(ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("dummy commit meta " + i)).addUnchanged(dummyKey).build());
} catch (ReferenceNotFoundException | ReferenceConflictException e) {
throw new RuntimeException(e);
}
};
List<Hash> beforeInitial = IntStream.range(0, param.setupCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
ImmutableCommitAttempt.Builder commit;
BaseContent initialContent;
BaseContent renamContent;
if (param.globalState) {
initialContent = WithGlobalStateContent.newWithGlobal("0", "initial commit content");
renamContent = WithGlobalStateContent.withGlobal("0", "rename commit content", initialContent.getId());
} else {
initialContent = OnRefOnly.newOnRef("initial commit content");
renamContent = OnRefOnly.onRef("rename commit content", initialContent.getId());
}
byte payload = SimpleStoreWorker.INSTANCE.getPayload(initialContent);
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("initial commit meta")).addPuts(KeyWithBytes.of(oldKey, contentId, payload, SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(initialContent)));
if (param.globalState) {
commit.putGlobal(contentId, SimpleStoreWorker.INSTANCE.toStoreGlobalState(initialContent)).putExpectedStates(contentId, Optional.empty());
}
Hash hashInitial = databaseAdapter.commit(commit.build());
List<Hash> beforeRename = IntStream.range(0, param.afterInitialCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("rename table")).addDeletes(oldKey).addPuts(KeyWithBytes.of(newKey, contentId, payload, SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(renamContent)));
if (param.globalState) {
commit.putGlobal(contentId, SimpleStoreWorker.INSTANCE.toStoreGlobalState(renamContent)).putExpectedStates(contentId, Optional.of(SimpleStoreWorker.INSTANCE.toStoreGlobalState(initialContent)));
}
Hash hashRename = databaseAdapter.commit(commit.build());
List<Hash> beforeDelete = IntStream.range(0, param.afterRenameCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("delete table")).addDeletes(newKey);
if (param.globalState) {
commit.putGlobal(contentId, ByteString.copyFromUtf8("0")).putExpectedStates(contentId, Optional.of(ByteString.copyFromUtf8("0")));
}
Hash hashDelete = databaseAdapter.commit(commit.build());
List<Hash> afterDelete = IntStream.range(0, param.afterDeleteCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
int expectedCommitCount = 1;
// Verify that the commits before the initial put return _no_ keys
expectedCommitCount = renameCommitVerify(beforeInitial.stream(), expectedCommitCount, keys -> assertThat(keys).isEmpty());
// Verify that the commits since the initial put and before the rename-operation return the
// _old_ key
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashInitial), beforeRename.stream()), expectedCommitCount, keys -> assertThat(keys).containsExactly(KeyListEntry.of(oldKey, contentId, payload, hashInitial)));
// Verify that the commits since the rename-operation and before the delete-operation return the
// _new_ key
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashRename), beforeDelete.stream()), expectedCommitCount, keys -> assertThat(keys).containsExactly(KeyListEntry.of(newKey, contentId, payload, hashRename)));
// Verify that the commits since the delete-operation return _no_ keys
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashDelete), afterDelete.stream()), expectedCommitCount, keys -> assertThat(keys).isEmpty());
assertThat(expectedCommitCount - 1).isEqualTo(param.setupCommits + 1 + param.afterInitialCommits + 1 + param.afterRenameCommits + 1 + param.afterDeleteCommits);
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class TxDatabaseAdapter method updateRepositoryDescription.
@Override
public void updateRepositoryDescription(Function<RepoDescription, RepoDescription> updater) throws ReferenceConflictException {
try {
opLoop("updateRepositoryDescription", null, true, (conn, x) -> {
byte[] currentBytes = fetchRepositoryDescriptionInternal(conn);
RepoDescription current = protoToRepoDescription(currentBytes);
RepoDescription updated = updater.apply(current);
if (updated != null) {
if (currentBytes == null) {
try (PreparedStatement ps = conn.conn().prepareStatement(SqlStatements.INSERT_REPO_DESCRIPTION)) {
ps.setString(1, config.getRepositoryId());
ps.setBytes(2, toProto(updated).toByteArray());
if (ps.executeUpdate() == 0) {
return null;
}
}
} else {
try (PreparedStatement ps = conn.conn().prepareStatement(SqlStatements.UPDATE_REPO_DESCRIPTION)) {
ps.setBytes(1, toProto(updated).toByteArray());
ps.setString(2, config.getRepositoryId());
ps.setBytes(3, currentBytes);
if (ps.executeUpdate() == 0) {
return null;
}
}
}
}
return NO_ANCESTOR;
}, () -> repoDescUpdateConflictMessage("Conflict"), () -> repoDescUpdateConflictMessage("Retry-failure"));
} catch (ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class TxDatabaseAdapter method merge.
@Override
public Hash merge(Hash from, BranchName toBranch, Optional<Hash> expectedHead, Function<ByteString, ByteString> updateCommitMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
// creates a new commit-tree that is decoupled from other commit-trees.
try {
return opLoop("merge", toBranch, false, (conn, currentHead) -> {
long timeInMicros = commitTimeInMicros();
Hash toHead = mergeAttempt(conn, timeInMicros, from, toBranch, expectedHead, currentHead, h -> {
}, h -> {
}, updateCommitMetadata);
Hash resultHash = tryMoveNamedReference(conn, toBranch, currentHead, toHead);
commitRefLog(conn, timeInMicros, toHead, toBranch, RefLogEntry.Operation.MERGE, Collections.singletonList(from));
return resultHash;
}, () -> mergeConflictMessage("Conflict", from, toBranch, expectedHead), () -> mergeConflictMessage("Retry-failure", from, toBranch, expectedHead));
} catch (ReferenceNotFoundException | ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class AbstractDatabaseAdapter method findCommonAncestor.
/**
* Finds the common-ancestor of two commit-log-entries. If no common-ancestor is found, throws a
* {@link ReferenceConflictException} or. Otherwise this method returns the hash of the
* common-ancestor.
*/
protected Hash findCommonAncestor(OP_CONTEXT ctx, Hash from, NamedRef toBranch, Hash toHead) throws ReferenceConflictException {
// TODO this implementation requires guardrails:
// max number of "to"-commits to fetch, max number of "from"-commits to fetch,
// both impact the cost (CPU, memory, I/O) of a merge operation.
CommonAncestorState commonAncestorState = new CommonAncestorState(ctx, toHead, false);
Hash commonAncestorHash = findCommonAncestor(ctx, from, commonAncestorState, (dist, hash) -> hash);
if (commonAncestorHash == null) {
throw new ReferenceConflictException(String.format("No common ancestor found for merge of '%s' into branch '%s'", from, toBranch.getName()));
}
return commonAncestorHash;
}
use of org.projectnessie.versioned.ReferenceConflictException in project nessie by projectnessie.
the class AbstractDatabaseAdapter method transplantAttempt.
/**
* Logic implementation of a transplant-attempt.
*
* @param ctx technical operation context
* @param targetBranch target reference with expected HEAD
* @param expectedHead if present, {@code targetBranch}'s current HEAD must be equal to this value
* @param targetHead current HEAD of {@code targetBranch}
* @param sequenceToTransplant sequential list of commits to transplant from {@code source}
* @param branchCommits consumer for the individual commits to merge
* @param newKeyLists consumer for optimistically written {@link KeyListEntity}s
* @param rewriteMetadata function to rewrite the commit-metadata for copied commits
* @return hash of the last commit-log-entry written to {@code targetBranch}
*/
protected Hash transplantAttempt(OP_CONTEXT ctx, long timeInMicros, BranchName targetBranch, Optional<Hash> expectedHead, Hash targetHead, List<Hash> sequenceToTransplant, Consumer<Hash> branchCommits, Consumer<Hash> newKeyLists, Function<ByteString, ByteString> rewriteMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
if (sequenceToTransplant.isEmpty()) {
throw new IllegalArgumentException("No hashes to transplant given.");
}
// 1. ensure 'expectedHash' is a parent of HEAD-of-'targetBranch' & collect keys
List<CommitLogEntry> targetEntriesReverseChronological = new ArrayList<>();
hashOnRef(ctx, targetHead, targetBranch, expectedHead, targetEntriesReverseChronological::add);
// Exclude the expected-hash on the target-branch from key-collisions check
if (!targetEntriesReverseChronological.isEmpty() && expectedHead.isPresent() && targetEntriesReverseChronological.get(0).getHash().equals(expectedHead.get())) {
targetEntriesReverseChronological.remove(0);
}
Collections.reverse(targetEntriesReverseChronological);
// 2. Collect modified keys.
Set<Key> keysTouchedOnTarget = collectModifiedKeys(targetEntriesReverseChronological);
// 4. ensure 'sequenceToTransplant' is sequential
int[] index = new int[] { sequenceToTransplant.size() - 1 };
Hash lastHash = sequenceToTransplant.get(sequenceToTransplant.size() - 1);
List<CommitLogEntry> commitsToTransplantChronological = takeUntilExcludeLast(readCommitLogStream(ctx, lastHash), e -> {
int i = index[0]--;
if (i == -1) {
return true;
}
if (!e.getHash().equals(sequenceToTransplant.get(i))) {
throw new IllegalArgumentException("Sequence of hashes is not contiguous.");
}
return false;
}).collect(Collectors.toList());
// 5. check for key-collisions
checkForKeyCollisions(ctx, targetHead, keysTouchedOnTarget, commitsToTransplantChronological);
// (no need to verify the global states during a transplant)
// 6. re-apply commits in 'sequenceToTransplant' onto 'targetBranch'
targetHead = copyCommits(ctx, timeInMicros, targetHead, commitsToTransplantChronological, newKeyLists, rewriteMetadata);
// 7. Write commits
commitsToTransplantChronological.stream().map(CommitLogEntry::getHash).forEach(branchCommits);
writeMultipleCommits(ctx, commitsToTransplantChronological);
return targetHead;
}
Aggregations