use of org.projectnessie.versioned.Key in project nessie by projectnessie.
the class AbstractManyCommits method verify.
private void verify(int i, int numCommits, BranchName branch, Hash commit, ContentId contentId) {
Key key = Key.of("many", "commits", Integer.toString(numCommits));
try {
commit = databaseAdapter.hashOnReference(branch, Optional.of(commit));
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
try {
Map<Key, ContentAndState<ByteString>> values = databaseAdapter.values(commit, Collections.singletonList(key), KeyFilterPredicate.ALLOW_ALL);
WithGlobalStateContent expected = WithGlobalStateContent.withGlobal("state for #" + (numCommits - 1) + " of " + numCommits, "value for #" + i + " of " + numCommits, contentId.getId());
ByteString expectValue = SimpleStoreWorker.INSTANCE.toStoreOnReferenceState(expected);
ByteString expectState = SimpleStoreWorker.INSTANCE.toStoreGlobalState(expected);
ContentAndState<ByteString> expect = ContentAndState.of(expectValue, expectState);
assertThat(values).containsExactly(Maps.immutableEntry(key, expect));
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
try (Stream<KeyListEntry> keys = databaseAdapter.keys(commit, KeyFilterPredicate.ALLOW_ALL)) {
assertThat(keys.map(KeyListEntry::getKey)).containsExactly(key);
} catch (ReferenceNotFoundException e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.Key in project nessie by projectnessie.
the class TestSerialization method stableOrderOfRepoDescProps.
@RepeatedTest(50)
public void stableOrderOfRepoDescProps() {
ImmutableRepoDescription.Builder repoDescription = RepoDescription.builder().repoVersion(42);
Map<String, String> props = new HashMap<>();
for (int i = 0; i < 20; i++) {
String key = randomString(50);
String value = randomString(50);
props.put(key, value);
repoDescription.putProperties(key, value);
}
AdapterTypes.RepoProps repoProps = toProto(repoDescription.build());
List<AdapterTypes.Entry> expected = props.entrySet().stream().sorted(Map.Entry.comparingByKey()).map(e -> AdapterTypes.Entry.newBuilder().setKey(e.getKey()).setValue(e.getValue()).build()).collect(Collectors.toList());
assertThat(repoProps.getPropertiesList()).containsExactlyElementsOf(expected);
}
use of org.projectnessie.versioned.Key in project nessie by projectnessie.
the class AbstractDatabaseAdapter method checkForModifiedKeysBetweenExpectedAndCurrentCommit.
/**
* If the current HEAD of the target branch for a commit/transplant/merge is not equal to the
* expected/reference HEAD, verify that there is no conflict, like keys in the operations of the
* commit(s) contained in keys of the commits 'expectedHead (excluding) .. currentHead
* (including)'.
*/
protected void checkForModifiedKeysBetweenExpectedAndCurrentCommit(OP_CONTEXT ctx, CommitAttempt commitAttempt, Hash branchHead, List<String> mismatches) throws ReferenceNotFoundException {
if (commitAttempt.getExpectedHead().isPresent()) {
Hash expectedHead = commitAttempt.getExpectedHead().get();
if (!expectedHead.equals(branchHead)) {
Set<Key> operationKeys = new HashSet<>();
operationKeys.addAll(commitAttempt.getDeletes());
operationKeys.addAll(commitAttempt.getUnchanged());
commitAttempt.getPuts().stream().map(KeyWithBytes::getKey).forEach(operationKeys::add);
boolean sinceSeen = checkConflictingKeysForCommit(ctx, branchHead, expectedHead, operationKeys, mismatches::add);
// branch.
if (!sinceSeen && !expectedHead.equals(NO_ANCESTOR)) {
throw hashNotFound(commitAttempt.getCommitToBranch(), expectedHead);
}
}
}
}
use of org.projectnessie.versioned.Key in project nessie by projectnessie.
the class AbstractDatabaseAdapter method transplantAttempt.
/**
* Logic implementation of a transplant-attempt.
*
* @param ctx technical operation context
* @param targetBranch target reference with expected HEAD
* @param expectedHead if present, {@code targetBranch}'s current HEAD must be equal to this value
* @param targetHead current HEAD of {@code targetBranch}
* @param sequenceToTransplant sequential list of commits to transplant from {@code source}
* @param branchCommits consumer for the individual commits to merge
* @param newKeyLists consumer for optimistically written {@link KeyListEntity}s
* @param rewriteMetadata function to rewrite the commit-metadata for copied commits
* @return hash of the last commit-log-entry written to {@code targetBranch}
*/
protected Hash transplantAttempt(OP_CONTEXT ctx, long timeInMicros, BranchName targetBranch, Optional<Hash> expectedHead, Hash targetHead, List<Hash> sequenceToTransplant, Consumer<Hash> branchCommits, Consumer<Hash> newKeyLists, Function<ByteString, ByteString> rewriteMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
if (sequenceToTransplant.isEmpty()) {
throw new IllegalArgumentException("No hashes to transplant given.");
}
// 1. ensure 'expectedHash' is a parent of HEAD-of-'targetBranch' & collect keys
List<CommitLogEntry> targetEntriesReverseChronological = new ArrayList<>();
hashOnRef(ctx, targetHead, targetBranch, expectedHead, targetEntriesReverseChronological::add);
// Exclude the expected-hash on the target-branch from key-collisions check
if (!targetEntriesReverseChronological.isEmpty() && expectedHead.isPresent() && targetEntriesReverseChronological.get(0).getHash().equals(expectedHead.get())) {
targetEntriesReverseChronological.remove(0);
}
Collections.reverse(targetEntriesReverseChronological);
// 2. Collect modified keys.
Set<Key> keysTouchedOnTarget = collectModifiedKeys(targetEntriesReverseChronological);
// 4. ensure 'sequenceToTransplant' is sequential
int[] index = new int[] { sequenceToTransplant.size() - 1 };
Hash lastHash = sequenceToTransplant.get(sequenceToTransplant.size() - 1);
List<CommitLogEntry> commitsToTransplantChronological = takeUntilExcludeLast(readCommitLogStream(ctx, lastHash), e -> {
int i = index[0]--;
if (i == -1) {
return true;
}
if (!e.getHash().equals(sequenceToTransplant.get(i))) {
throw new IllegalArgumentException("Sequence of hashes is not contiguous.");
}
return false;
}).collect(Collectors.toList());
// 5. check for key-collisions
checkForKeyCollisions(ctx, targetHead, keysTouchedOnTarget, commitsToTransplantChronological);
// (no need to verify the global states during a transplant)
// 6. re-apply commits in 'sequenceToTransplant' onto 'targetBranch'
targetHead = copyCommits(ctx, timeInMicros, targetHead, commitsToTransplantChronological, newKeyLists, rewriteMetadata);
// 7. Write commits
commitsToTransplantChronological.stream().map(CommitLogEntry::getHash).forEach(branchCommits);
writeMultipleCommits(ctx, commitsToTransplantChronological);
return targetHead;
}
use of org.projectnessie.versioned.Key in project nessie by projectnessie.
the class AbstractDatabaseAdapter method checkForKeyCollisions.
/**
* For merge/transplant, verifies that the given commits do not touch any of the given keys.
*
* @param commitsChronological list of commit-log-entries, in order of commit-operations,
* chronological order
*/
protected void checkForKeyCollisions(OP_CONTEXT ctx, Hash refHead, Set<Key> keysTouchedOnTarget, List<CommitLogEntry> commitsChronological) throws ReferenceConflictException, ReferenceNotFoundException {
Set<Key> keyCollisions = new HashSet<>();
for (int i = commitsChronological.size() - 1; i >= 0; i--) {
CommitLogEntry sourceCommit = commitsChronological.get(i);
Stream.concat(sourceCommit.getPuts().stream().map(KeyWithBytes::getKey), sourceCommit.getDeletes().stream()).filter(keysTouchedOnTarget::contains).forEach(keyCollisions::add);
}
if (!keyCollisions.isEmpty()) {
removeKeyCollisionsForNamespaces(ctx, refHead, commitsChronological.get(commitsChronological.size() - 1).getHash(), keyCollisions);
if (!keyCollisions.isEmpty()) {
throw new ReferenceConflictException(String.format("The following keys have been changed in conflict: %s", keyCollisions.stream().map(k -> String.format("'%s'", k.toString())).collect(Collectors.joining(", "))));
}
}
}
Aggregations