use of org.projectnessie.versioned.Hash in project nessie by projectnessie.
the class AbstractMergeTransplant method transplant.
@ParameterizedTest
@ValueSource(ints = { 3, 10, DEFAULT_KEY_LIST_DISTANCE, DEFAULT_KEY_LIST_DISTANCE + 1, 100 })
void transplant(int numCommits) throws Exception {
AtomicInteger unifier = new AtomicInteger();
Function<ByteString, ByteString> metadataUpdater = commitMeta -> ByteString.copyFromUtf8(commitMeta.toStringUtf8() + " transplanted " + unifier.getAndIncrement());
Hash[] commits = mergeTransplant(numCommits, (target, expectedHead, branch, commitHashes, i) -> databaseAdapter.transplant(target, expectedHead, Arrays.asList(commitHashes).subList(0, i + 1), metadataUpdater));
BranchName conflict = BranchName.of("conflict");
// no conflict, when transplanting the commits from against the current HEAD of the
// conflict-branch
Hash noConflictHead = databaseAdapter.hashOnReference(conflict, Optional.empty());
Hash transplanted = databaseAdapter.transplant(conflict, Optional.of(noConflictHead), Arrays.asList(commits), metadataUpdater);
int offset = unifier.get();
try (Stream<CommitLogEntry> log = databaseAdapter.commitLog(transplanted).limit(commits.length)) {
AtomicInteger testOffset = new AtomicInteger(offset);
assertThat(log.map(CommitLogEntry::getMetadata).map(ByteString::toStringUtf8)).containsExactlyElementsOf(IntStream.range(0, commits.length).map(i -> commits.length - i - 1).mapToObj(i -> "commit " + i + " transplanted " + testOffset.decrementAndGet()).collect(Collectors.toList()));
}
// again, no conflict (same as above, just again)
transplanted = databaseAdapter.transplant(conflict, Optional.empty(), Arrays.asList(commits), metadataUpdater);
offset = unifier.get();
try (Stream<CommitLogEntry> log = databaseAdapter.commitLog(transplanted).limit(commits.length)) {
AtomicInteger testOffset = new AtomicInteger(offset);
assertThat(log.map(CommitLogEntry::getMetadata).map(ByteString::toStringUtf8)).containsExactlyElementsOf(IntStream.range(0, commits.length).map(i -> commits.length - i - 1).mapToObj(i -> "commit " + i + " transplanted " + testOffset.decrementAndGet()).collect(Collectors.toList()));
}
assertThatThrownBy(() -> databaseAdapter.transplant(conflict, Optional.empty(), Collections.emptyList(), Function.identity())).isInstanceOf(IllegalArgumentException.class).hasMessage("No hashes to transplant given.");
}
use of org.projectnessie.versioned.Hash in project nessie by projectnessie.
the class TxDatabaseAdapter method doFetchKeyLists.
@Override
protected Stream<KeyListEntity> doFetchKeyLists(ConnectionWrapper c, List<Hash> keyListsIds) {
try (Traced ignore = trace("doFetchKeyLists.stream")) {
return JdbcSelectSpliterator.buildStream(c.conn(), sqlForManyPlaceholders(SqlStatements.SELECT_KEY_LIST_MANY, keyListsIds.size()), ps -> {
ps.setString(1, config.getRepositoryId());
int i = 2;
for (Hash id : keyListsIds) {
ps.setString(i++, id.asString());
}
}, (rs) -> KeyListEntity.of(Hash.of(rs.getString(1)), protoToKeyList(rs.getBytes(2))));
}
}
use of org.projectnessie.versioned.Hash in project nessie by projectnessie.
the class TxDatabaseAdapter method create.
@SuppressWarnings("RedundantThrows")
@Override
public Hash create(NamedRef ref, Hash target) throws ReferenceAlreadyExistsException, ReferenceNotFoundException {
try {
return opLoop("createRef", ref, true, (conn, nullHead) -> {
if (checkNamedRefExistence(conn, ref.getName())) {
throw referenceAlreadyExists(ref);
}
Hash hash = target;
if (hash == null) {
// Special case: Don't validate, if the 'target' parameter is null.
// This is mostly used for tests that re-create the default-branch.
hash = NO_ANCESTOR;
}
validateHashExists(conn, hash);
insertNewReference(conn, ref, hash);
commitRefLog(conn, commitTimeInMicros(), hash, ref, RefLogEntry.Operation.CREATE_REFERENCE, Collections.emptyList());
return hash;
}, () -> createConflictMessage("Conflict", ref, target), () -> createConflictMessage("Retry-Failure", ref, target));
} catch (ReferenceAlreadyExistsException | ReferenceNotFoundException | RuntimeException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.projectnessie.versioned.Hash in project nessie by projectnessie.
the class TxDatabaseAdapter method namedRef.
@Override
public ReferenceInfo<ByteString> namedRef(String ref, GetNamedRefsParams params) throws ReferenceNotFoundException {
Preconditions.checkNotNull(params, "Parameter for GetNamedRefsParams must not be null");
try (ConnectionWrapper conn = borrowConnection()) {
ReferenceInfo<ByteString> refInfo = fetchNamedRef(conn, ref);
Hash defaultBranchHead = namedRefsDefaultBranchHead(conn, params);
Stream<ReferenceInfo<ByteString>> refs = Stream.of(refInfo);
return namedRefsFilterAndEnhance(conn, params, defaultBranchHead, refs).findFirst().orElseThrow(() -> referenceNotFound(ref));
}
}
use of org.projectnessie.versioned.Hash in project nessie by projectnessie.
the class TxDatabaseAdapter method getRefLogHead.
protected RefLogHead getRefLogHead(ConnectionWrapper conn) throws SQLException {
try (Traced ignore = trace("getRefLogHead");
PreparedStatement psSelect = conn.conn().prepareStatement(SqlStatements.SELECT_REF_LOG_HEAD)) {
psSelect.setString(1, config.getRepositoryId());
ResultSet resultSet = psSelect.executeQuery();
if (resultSet.next()) {
Hash head = Hash.of(resultSet.getString(1));
ImmutableRefLogHead.Builder refLogHead = RefLogHead.builder().refLogHead(head);
byte[] parentsBytes = resultSet.getBytes(2);
if (parentsBytes != null) {
try {
RefLogParents refLogParents = RefLogParents.parseFrom(parentsBytes);
refLogParents.getRefLogParentsInclHeadList().forEach(b -> refLogHead.addRefLogParentsInclHead(Hash.of(b)));
} catch (InvalidProtocolBufferException e) {
throw new RuntimeException(e);
}
}
return refLogHead.build();
}
return null;
}
}
Aggregations