use of org.projectnessie.versioned.ReferenceNotFoundException in project nessie by projectnessie.
the class AbstractCommitScenarios method commitRenameTable.
/**
* Test whether a "table rename operation" works as expected.
*
* <p>A "table rename" is effectively a remove-operation plus a put-operation with a different key
* but the same content-id.
*
* <p>Parameterized to force operations to cross/not-cross the number of commits between "full key
* lists" and whether global-state shall be used.
*/
@ParameterizedTest
@MethodSource("commitRenameTableParams")
void commitRenameTable(RenameTable param) throws Exception {
BranchName branch = BranchName.of("main");
Key dummyKey = Key.of("dummy");
Key oldKey = Key.of("hello", "table");
Key newKey = Key.of("new", "name");
ContentId contentId = ContentId.of("id-42");
IntFunction<Hash> performDummyCommit = i -> {
try {
return databaseAdapter.commit(ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("dummy commit meta " + i)).addUnchanged(dummyKey).build());
} catch (ReferenceNotFoundException | ReferenceConflictException e) {
throw new RuntimeException(e);
}
};
List<Hash> beforeInitial = IntStream.range(0, param.setupCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
ImmutableCommitAttempt.Builder commit;
BaseContent initialContent;
BaseContent renamContent;
if (param.globalState) {
initialContent = WithGlobalStateContent.newWithGlobal("0", "initial commit content");
renamContent = WithGlobalStateContent.withGlobal("0", "rename commit content", initialContent.getId());
} else {
initialContent = OnRefOnly.newOnRef("initial commit content");
renamContent = OnRefOnly.onRef("rename commit content", initialContent.getId());
}
byte payload = SimpleStoreWorker.INSTANCE.getPayload(initialContent);
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("initial commit meta")).addPuts(KeyWithBytes.of(oldKey, contentId, payload, SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(initialContent)));
if (param.globalState) {
commit.putGlobal(contentId, SimpleStoreWorker.INSTANCE.toStoreGlobalState(initialContent)).putExpectedStates(contentId, Optional.empty());
}
Hash hashInitial = databaseAdapter.commit(commit.build());
List<Hash> beforeRename = IntStream.range(0, param.afterInitialCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("rename table")).addDeletes(oldKey).addPuts(KeyWithBytes.of(newKey, contentId, payload, SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(renamContent)));
if (param.globalState) {
commit.putGlobal(contentId, SimpleStoreWorker.INSTANCE.toStoreGlobalState(renamContent)).putExpectedStates(contentId, Optional.of(SimpleStoreWorker.INSTANCE.toStoreGlobalState(initialContent)));
}
Hash hashRename = databaseAdapter.commit(commit.build());
List<Hash> beforeDelete = IntStream.range(0, param.afterRenameCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
commit = ImmutableCommitAttempt.builder().commitToBranch(branch).commitMetaSerialized(ByteString.copyFromUtf8("delete table")).addDeletes(newKey);
if (param.globalState) {
commit.putGlobal(contentId, ByteString.copyFromUtf8("0")).putExpectedStates(contentId, Optional.of(ByteString.copyFromUtf8("0")));
}
Hash hashDelete = databaseAdapter.commit(commit.build());
List<Hash> afterDelete = IntStream.range(0, param.afterDeleteCommits).mapToObj(performDummyCommit).collect(Collectors.toList());
int expectedCommitCount = 1;
// Verify that the commits before the initial put return _no_ keys
expectedCommitCount = renameCommitVerify(beforeInitial.stream(), expectedCommitCount, keys -> assertThat(keys).isEmpty());
// Verify that the commits since the initial put and before the rename-operation return the
// _old_ key
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashInitial), beforeRename.stream()), expectedCommitCount, keys -> assertThat(keys).containsExactly(KeyListEntry.of(oldKey, contentId, payload, hashInitial)));
// Verify that the commits since the rename-operation and before the delete-operation return the
// _new_ key
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashRename), beforeDelete.stream()), expectedCommitCount, keys -> assertThat(keys).containsExactly(KeyListEntry.of(newKey, contentId, payload, hashRename)));
// Verify that the commits since the delete-operation return _no_ keys
expectedCommitCount = renameCommitVerify(Stream.concat(Stream.of(hashDelete), afterDelete.stream()), expectedCommitCount, keys -> assertThat(keys).isEmpty());
assertThat(expectedCommitCount - 1).isEqualTo(param.setupCommits + 1 + param.afterInitialCommits + 1 + param.afterRenameCommits + 1 + param.afterDeleteCommits);
}
use of org.projectnessie.versioned.ReferenceNotFoundException in project nessie by projectnessie.
the class AbstractManyCommits method verify.
private void verify(int i, int numCommits, BranchName branch, Hash commit, ContentId contentId) {
Key key = Key.of("many", "commits", Integer.toString(numCommits));
try {
commit = databaseAdapter.hashOnReference(branch, Optional.of(commit));
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
try {
Map<Key, ContentAndState<ByteString>> values = databaseAdapter.values(commit, Collections.singletonList(key), KeyFilterPredicate.ALLOW_ALL);
WithGlobalStateContent expected = WithGlobalStateContent.withGlobal("state for #" + (numCommits - 1) + " of " + numCommits, "value for #" + i + " of " + numCommits, contentId.getId());
ByteString expectValue = SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(expected);
ByteString expectState = SimpleStoreWorker.INSTANCE.toStoreGlobalState(expected);
ContentAndState<ByteString> expect = ContentAndState.of(expectValue, expectState);
assertThat(values).containsExactly(Maps.immutableEntry(key, expect));
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
try (Stream<KeyListEntry> keys = databaseAdapter.keys(commit, KeyFilterPredicate.ALLOW_ALL)) {
assertThat(keys.map(KeyListEntry::getKey)).containsExactly(key);
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceNotFoundException in project nessie by projectnessie.
the class TxDatabaseAdapter method create.
@SuppressWarnings("RedundantThrows")
@Override
public Hash create(NamedRef ref, Hash target) throws ReferenceAlreadyExistsException, ReferenceNotFoundException {
try {
return opLoop("createRef", ref, true, (conn, nullHead) -> {
if (checkNamedRefExistence(conn, ref.getName())) {
throw referenceAlreadyExists(ref);
}
Hash hash = target;
if (hash == null) {
// Special case: Don't validate, if the 'target' parameter is null.
// This is mostly used for tests that re-create the default-branch.
hash = NO_ANCESTOR;
}
validateHashExists(conn, hash);
insertNewReference(conn, ref, hash);
commitRefLog(conn, commitTimeInMicros(), hash, ref, RefLogEntry.Operation.CREATE_REFERENCE, Collections.emptyList());
return hash;
}, () -> createConflictMessage("Conflict", ref, target), () -> createConflictMessage("Retry-Failure", ref, target));
} catch (ReferenceAlreadyExistsException | ReferenceNotFoundException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceNotFoundException in project nessie by projectnessie.
the class TxDatabaseAdapter method merge.
@Override
public Hash merge(Hash from, BranchName toBranch, Optional<Hash> expectedHead, Function<ByteString, ByteString> updateCommitMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
// creates a new commit-tree that is decoupled from other commit-trees.
try {
return opLoop("merge", toBranch, false, (conn, currentHead) -> {
long timeInMicros = commitTimeInMicros();
Hash toHead = mergeAttempt(conn, timeInMicros, from, toBranch, expectedHead, currentHead, h -> {
}, h -> {
}, updateCommitMetadata);
Hash resultHash = tryMoveNamedReference(conn, toBranch, currentHead, toHead);
commitRefLog(conn, timeInMicros, toHead, toBranch, RefLogEntry.Operation.MERGE, Collections.singletonList(from));
return resultHash;
}, () -> mergeConflictMessage("Conflict", from, toBranch, expectedHead), () -> mergeConflictMessage("Retry-failure", from, toBranch, expectedHead));
} catch (ReferenceNotFoundException | ReferenceConflictException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.ReferenceNotFoundException in project nessie by projectnessie.
the class AbstractDatabaseAdapter method transplantAttempt.
/**
* Logic implementation of a transplant-attempt.
*
* @param ctx technical operation context
* @param targetBranch target reference with expected HEAD
* @param expectedHead if present, {@code targetBranch}'s current HEAD must be equal to this value
* @param targetHead current HEAD of {@code targetBranch}
* @param sequenceToTransplant sequential list of commits to transplant from {@code source}
* @param branchCommits consumer for the individual commits to merge
* @param newKeyLists consumer for optimistically written {@link KeyListEntity}s
* @param rewriteMetadata function to rewrite the commit-metadata for copied commits
* @return hash of the last commit-log-entry written to {@code targetBranch}
*/
protected Hash transplantAttempt(OP_CONTEXT ctx, long timeInMicros, BranchName targetBranch, Optional<Hash> expectedHead, Hash targetHead, List<Hash> sequenceToTransplant, Consumer<Hash> branchCommits, Consumer<Hash> newKeyLists, Function<ByteString, ByteString> rewriteMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
if (sequenceToTransplant.isEmpty()) {
throw new IllegalArgumentException("No hashes to transplant given.");
}
// 1. ensure 'expectedHash' is a parent of HEAD-of-'targetBranch' & collect keys
List<CommitLogEntry> targetEntriesReverseChronological = new ArrayList<>();
hashOnRef(ctx, targetHead, targetBranch, expectedHead, targetEntriesReverseChronological::add);
// Exclude the expected-hash on the target-branch from key-collisions check
if (!targetEntriesReverseChronological.isEmpty() && expectedHead.isPresent() && targetEntriesReverseChronological.get(0).getHash().equals(expectedHead.get())) {
targetEntriesReverseChronological.remove(0);
}
Collections.reverse(targetEntriesReverseChronological);
// 2. Collect modified keys.
Set<Key> keysTouchedOnTarget = collectModifiedKeys(targetEntriesReverseChronological);
// 4. ensure 'sequenceToTransplant' is sequential
int[] index = new int[] { sequenceToTransplant.size() - 1 };
Hash lastHash = sequenceToTransplant.get(sequenceToTransplant.size() - 1);
List<CommitLogEntry> commitsToTransplantChronological = takeUntilExcludeLast(readCommitLogStream(ctx, lastHash), e -> {
int i = index[0]--;
if (i == -1) {
return true;
}
if (!e.getHash().equals(sequenceToTransplant.get(i))) {
throw new IllegalArgumentException("Sequence of hashes is not contiguous.");
}
return false;
}).collect(Collectors.toList());
// 5. check for key-collisions
checkForKeyCollisions(ctx, targetHead, keysTouchedOnTarget, commitsToTransplantChronological);
// (no need to verify the global states during a transplant)
// 6. re-apply commits in 'sequenceToTransplant' onto 'targetBranch'
targetHead = copyCommits(ctx, timeInMicros, targetHead, commitsToTransplantChronological, newKeyLists, rewriteMetadata);
// 7. Write commits
commitsToTransplantChronological.stream().map(CommitLogEntry::getHash).forEach(branchCommits);
writeMultipleCommits(ctx, commitsToTransplantChronological);
return targetHead;
}
Aggregations