use of org.projectnessie.versioned.persist.adapter.KeyListEntity in project nessie by projectnessie.
the class AbstractDatabaseAdapter method transplantAttempt.
/**
* Logic implementation of a transplant-attempt.
*
* @param ctx technical operation context
* @param targetBranch target reference with expected HEAD
* @param expectedHead if present, {@code targetBranch}'s current HEAD must be equal to this value
* @param targetHead current HEAD of {@code targetBranch}
* @param sequenceToTransplant sequential list of commits to transplant from {@code source}
* @param branchCommits consumer for the individual commits to merge
* @param newKeyLists consumer for optimistically written {@link KeyListEntity}s
* @param rewriteMetadata function to rewrite the commit-metadata for copied commits
* @return hash of the last commit-log-entry written to {@code targetBranch}
*/
protected Hash transplantAttempt(OP_CONTEXT ctx, long timeInMicros, BranchName targetBranch, Optional<Hash> expectedHead, Hash targetHead, List<Hash> sequenceToTransplant, Consumer<Hash> branchCommits, Consumer<Hash> newKeyLists, Function<ByteString, ByteString> rewriteMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
if (sequenceToTransplant.isEmpty()) {
throw new IllegalArgumentException("No hashes to transplant given.");
}
// 1. ensure 'expectedHash' is a parent of HEAD-of-'targetBranch' & collect keys
List<CommitLogEntry> targetEntriesReverseChronological = new ArrayList<>();
hashOnRef(ctx, targetHead, targetBranch, expectedHead, targetEntriesReverseChronological::add);
// Exclude the expected-hash on the target-branch from key-collisions check
if (!targetEntriesReverseChronological.isEmpty() && expectedHead.isPresent() && targetEntriesReverseChronological.get(0).getHash().equals(expectedHead.get())) {
targetEntriesReverseChronological.remove(0);
}
Collections.reverse(targetEntriesReverseChronological);
// 2. Collect modified keys.
Set<Key> keysTouchedOnTarget = collectModifiedKeys(targetEntriesReverseChronological);
// 4. ensure 'sequenceToTransplant' is sequential
int[] index = new int[] { sequenceToTransplant.size() - 1 };
Hash lastHash = sequenceToTransplant.get(sequenceToTransplant.size() - 1);
List<CommitLogEntry> commitsToTransplantChronological = takeUntilExcludeLast(readCommitLogStream(ctx, lastHash), e -> {
int i = index[0]--;
if (i == -1) {
return true;
}
if (!e.getHash().equals(sequenceToTransplant.get(i))) {
throw new IllegalArgumentException("Sequence of hashes is not contiguous.");
}
return false;
}).collect(Collectors.toList());
// 5. check for key-collisions
checkForKeyCollisions(ctx, targetHead, keysTouchedOnTarget, commitsToTransplantChronological);
// (no need to verify the global states during a transplant)
// 6. re-apply commits in 'sequenceToTransplant' onto 'targetBranch'
targetHead = copyCommits(ctx, timeInMicros, targetHead, commitsToTransplantChronological, newKeyLists, rewriteMetadata);
// 7. Write commits
commitsToTransplantChronological.stream().map(CommitLogEntry::getHash).forEach(branchCommits);
writeMultipleCommits(ctx, commitsToTransplantChronological);
return targetHead;
}
use of org.projectnessie.versioned.persist.adapter.KeyListEntity in project nessie by projectnessie.
the class RocksDatabaseAdapter method doWriteKeyListEntities.
@Override
protected void doWriteKeyListEntities(NonTransactionalOperationContext ctx, List<KeyListEntity> newKeyListEntities) {
Lock lock = dbInstance.getLock().writeLock();
lock.lock();
try {
for (KeyListEntity keyListEntity : newKeyListEntities) {
byte[] key = dbKey(keyListEntity.getId());
db.put(dbInstance.getCfKeyList(), key, toProto(keyListEntity.getKeys()).toByteArray());
}
} catch (RocksDBException e) {
throw new RuntimeException(e);
} finally {
lock.unlock();
}
}
use of org.projectnessie.versioned.persist.adapter.KeyListEntity in project nessie by projectnessie.
the class ITDynamoDatabaseAdapter method cleanUpCasBatch.
@ParameterizedTest
@MethodSource("cleanUpCasBatch")
public void cleanUpCasBatch(int numCommits, int numKeyLists) throws Exception {
Hash globalId = randomHash();
Set<Hash> branchCommits = new HashSet<>();
Set<Hash> newKeyLists = new HashSet<>();
Hash refLogId = randomHash();
GlobalStateLogEntry globalStateLogEntry = GlobalStateLogEntry.newBuilder().setId(globalId.asBytes()).build();
RefLogEntry refLogEntry = RefLogEntry.newBuilder().setRefLogId(refLogId.asBytes()).build();
NonTransactionalOperationContext ctx = NON_TRANSACTIONAL_OPERATION_CONTEXT;
implDatabaseAdapter().doWriteRefLog(ctx, refLogEntry);
implDatabaseAdapter().doWriteGlobalCommit(ctx, globalStateLogEntry);
for (int i = 0; i < numCommits; i++) {
Hash commitId = randomHash();
CommitLogEntry commit = CommitLogEntry.of(0L, commitId, 0, Collections.emptyList(), ByteString.EMPTY, Collections.emptyList(), Collections.emptyList(), 0, KeyList.of(Collections.emptyList()), Collections.emptyList());
implDatabaseAdapter().doWriteIndividualCommit(ctx, commit);
branchCommits.add(commitId);
}
for (int i = 0; i < numKeyLists; i++) {
Hash keyListId = randomHash();
KeyListEntity keyListEntity = KeyListEntity.of(keyListId, KeyList.of(Collections.emptyList()));
implDatabaseAdapter().doWriteKeyListEntities(ctx, Collections.singletonList(keyListEntity));
newKeyLists.add(keyListId);
}
assertThat(implDatabaseAdapter().doFetchFromGlobalLog(ctx, globalId)).isNotNull();
assertThat(implDatabaseAdapter().doFetchFromRefLog(ctx, refLogId)).isNotNull();
assertThat(branchCommits).map(id -> implDatabaseAdapter().doFetchFromCommitLog(ctx, id)).allMatch(Objects::nonNull);
assertThat(newKeyLists).map(id -> implDatabaseAdapter().doFetchKeyLists(ctx, Collections.singletonList(id)).collect(Collectors.toList())).allMatch(l -> l.size() == 1).extracting(l -> l.get(0)).allMatch(Objects::nonNull);
implDatabaseAdapter().doCleanUpCommitCas(ctx, globalId, branchCommits, newKeyLists, refLogId);
assertThat(implDatabaseAdapter().doFetchFromGlobalLog(ctx, globalId)).isNull();
assertThat(implDatabaseAdapter().doFetchFromRefLog(ctx, refLogId)).isNull();
assertThat(branchCommits).map(id -> implDatabaseAdapter().doFetchFromCommitLog(ctx, id)).allMatch(Objects::isNull);
assertThat(newKeyLists).map(id -> implDatabaseAdapter().doFetchKeyLists(ctx, Collections.singletonList(id)).collect(Collectors.toList())).allMatch(l -> l.size() == 1).extracting(l -> l.get(0)).allMatch(Objects::isNull);
}
use of org.projectnessie.versioned.persist.adapter.KeyListEntity in project nessie by projectnessie.
the class AbstractDatabaseAdapter method mergeAttempt.
/**
* Logic implementation of a merge-attempt.
*
* @param ctx technical operation context
* @param from merge-from commit
* @param toBranch merge-into reference with expected hash of HEAD
* @param expectedHead if present, {@code toBranch}'s current HEAD must be equal to this value
* @param toHead current HEAD of {@code toBranch}
* @param branchCommits consumer for the individual commits to merge
* @param newKeyLists consumer for optimistically written {@link KeyListEntity}s
* @param rewriteMetadata function to rewrite the commit-metadata for copied commits
* @return hash of the last commit-log-entry written to {@code toBranch}
*/
protected Hash mergeAttempt(OP_CONTEXT ctx, long timeInMicros, Hash from, BranchName toBranch, Optional<Hash> expectedHead, Hash toHead, Consumer<Hash> branchCommits, Consumer<Hash> newKeyLists, Function<ByteString, ByteString> rewriteMetadata) throws ReferenceNotFoundException, ReferenceConflictException {
validateHashExists(ctx, from);
// 1. ensure 'expectedHash' is a parent of HEAD-of-'toBranch'
hashOnRef(ctx, toBranch, expectedHead, toHead);
// 2. find nearest common-ancestor between 'from' + 'fromHash'
Hash commonAncestor = findCommonAncestor(ctx, from, toBranch, toHead);
// 3. Collect commit-log-entries
List<CommitLogEntry> toEntriesReverseChronological = takeUntilExcludeLast(readCommitLogStream(ctx, toHead), e -> e.getHash().equals(commonAncestor)).collect(Collectors.toList());
Collections.reverse(toEntriesReverseChronological);
List<CommitLogEntry> commitsToMergeChronological = takeUntilExcludeLast(readCommitLogStream(ctx, from), e -> e.getHash().equals(commonAncestor)).collect(Collectors.toList());
if (commitsToMergeChronological.isEmpty()) {
// Nothing to merge, shortcut
throw new IllegalArgumentException(String.format("No hashes to merge from '%s' onto '%s' @ '%s'.", from.asString(), toBranch.getName(), toHead));
}
// 4. Collect modified keys.
Set<Key> keysTouchedOnTarget = collectModifiedKeys(toEntriesReverseChronological);
// 5. check for key-collisions
checkForKeyCollisions(ctx, toHead, keysTouchedOnTarget, commitsToMergeChronological);
// (no need to verify the global states during a transplant)
// 6. re-apply commits in 'sequenceToTransplant' onto 'targetBranch'
toHead = copyCommits(ctx, timeInMicros, toHead, commitsToMergeChronological, newKeyLists, rewriteMetadata);
// 7. Write commits
commitsToMergeChronological.stream().map(CommitLogEntry::getHash).forEach(branchCommits);
writeMultipleCommits(ctx, commitsToMergeChronological);
return toHead;
}
Aggregations