use of org.projectnessie.model.CommitMeta in project nessie by projectnessie.
the class AbstractRestNamespace method testNamespaceMergeWithConflict.
@Test
public void testNamespaceMergeWithConflict() throws BaseNessieClientServerException {
Branch base = createBranch("merge-base");
Branch branch = createBranch("merge-branch");
Namespace ns = Namespace.parse("a.b.c");
// create a namespace on the base branch
getApi().createNamespace().namespace(ns).refName(base.getName()).create();
base = (Branch) getApi().getReference().refName(base.getName()).get();
// create a table with the same name on the other branch
IcebergTable table = IcebergTable.of("merge-table1", 42, 42, 42, 42);
branch = getApi().commitMultipleOperations().branchName(branch.getName()).hash(branch.getHash()).commitMeta(CommitMeta.fromMessage("test-merge-branch1")).operation(Put.of(ContentKey.of("a", "b", "c"), table)).commit();
Branch finalBase = base;
Branch finalBranch = branch;
assertThatThrownBy(() -> getApi().mergeRefIntoBranch().branch(finalBase).fromRef(finalBranch).merge()).isInstanceOf(NessieReferenceConflictException.class).hasMessage("The following keys have been changed in conflict: 'a.b.c'");
LogResponse log = getApi().getCommitLog().refName(base.getName()).untilHash(base.getHash()).get();
// merging should not have been possible ("test-merge-branch1" shouldn't be in the commits)
assertThat(log.getLogEntries().stream().map(LogEntry::getCommitMeta).map(CommitMeta::getMessage)).containsExactly("create namespace a.b.c");
List<Entry> entries = getApi().getEntries().refName(base.getName()).get().getEntries();
assertThat(entries.stream().map(Entry::getName)).containsExactly(ContentKey.of(ns.getElements()));
assertThat(getApi().getNamespace().refName(base.getName()).namespace(ns).get()).isNotNull();
}
use of org.projectnessie.model.CommitMeta in project nessie by projectnessie.
the class AbstractRestRefLog method testReflog.
@Test
public void testReflog() throws BaseNessieClientServerException {
String tagName = "tag1_test_reflog";
String branch1 = "branch1_test_reflog";
String branch2 = "branch2_test_reflog";
String branch3 = "branch3_test_reflog";
String root = "ref_name_test_reflog";
List<Tuple> expectedEntries = new ArrayList<>(12);
// reflog 1: creating the default branch0
Branch branch0 = createBranch(root);
expectedEntries.add(Tuple.tuple(root, "CREATE_REFERENCE"));
// reflog 2: create tag1
Reference createdTag = getApi().createReference().sourceRefName(branch0.getName()).reference(Tag.of(tagName, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(tagName, "CREATE_REFERENCE"));
// reflog 3: create branch1
Reference createdBranch1 = getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch1, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch1, "CREATE_REFERENCE"));
// reflog 4: create branch2
Reference createdBranch2 = getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch2, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch2, "CREATE_REFERENCE"));
// reflog 5: create branch2
Branch createdBranch3 = (Branch) getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch3, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch3, "CREATE_REFERENCE"));
// reflog 6: commit on default branch0
IcebergTable meta = IcebergTable.of("meep", 42, 42, 42, 42);
branch0 = getApi().commitMultipleOperations().branchName(branch0.getName()).hash(branch0.getHash()).commitMeta(CommitMeta.builder().message("dummy commit log").properties(ImmutableMap.of("prop1", "val1", "prop2", "val2")).build()).operation(Operation.Put.of(ContentKey.of("meep"), meta)).commit();
expectedEntries.add(Tuple.tuple(root, "COMMIT"));
// reflog 7: assign tag
getApi().assignTag().tagName(tagName).hash(createdTag.getHash()).assignTo(branch0).assign();
expectedEntries.add(Tuple.tuple(tagName, "ASSIGN_REFERENCE"));
// reflog 8: assign ref
getApi().assignBranch().branchName(branch1).hash(createdBranch1.getHash()).assignTo(branch0).assign();
expectedEntries.add(Tuple.tuple(branch1, "ASSIGN_REFERENCE"));
// reflog 9: merge
getApi().mergeRefIntoBranch().branchName(branch2).hash(createdBranch2.getHash()).fromRefName(branch1).fromHash(branch0.getHash()).merge();
expectedEntries.add(Tuple.tuple(branch2, "MERGE"));
// reflog 10: transplant
getApi().transplantCommitsIntoBranch().hashesToTransplant(ImmutableList.of(Objects.requireNonNull(branch0.getHash()))).fromRefName(branch1).branch(createdBranch3).transplant();
expectedEntries.add(Tuple.tuple(branch3, "TRANSPLANT"));
// reflog 11: delete branch
getApi().deleteBranch().branchName(branch1).hash(branch0.getHash()).delete();
expectedEntries.add(Tuple.tuple(branch1, "DELETE_REFERENCE"));
// reflog 12: delete tag
getApi().deleteTag().tagName(tagName).hash(branch0.getHash()).delete();
expectedEntries.add(Tuple.tuple(tagName, "DELETE_REFERENCE"));
// In the reflog output new entry will be the head. Hence, reverse the expected list
Collections.reverse(expectedEntries);
RefLogResponse refLogResponse = getApi().getRefLog().get();
// verify reflog entries
assertThat(refLogResponse.getLogEntries().subList(0, 12)).extracting(RefLogResponse.RefLogResponseEntry::getRefName, RefLogResponse.RefLogResponseEntry::getOperation).isEqualTo(expectedEntries);
// verify pagination (limit and token)
RefLogResponse refLogResponse1 = getApi().getRefLog().maxRecords(2).get();
assertThat(refLogResponse1.getLogEntries()).isEqualTo(refLogResponse.getLogEntries().subList(0, 2));
assertThat(refLogResponse1.isHasMore()).isTrue();
RefLogResponse refLogResponse2 = getApi().getRefLog().pageToken(refLogResponse1.getToken()).get();
// should start from the token.
assertThat(refLogResponse2.getLogEntries().get(0).getRefLogId()).isEqualTo(refLogResponse1.getToken());
assertThat(refLogResponse2.getLogEntries().subList(0, 10)).isEqualTo(refLogResponse.getLogEntries().subList(2, 12));
// verify startHash and endHash
RefLogResponse refLogResponse3 = getApi().getRefLog().fromHash(refLogResponse.getLogEntries().get(10).getRefLogId()).get();
assertThat(refLogResponse3.getLogEntries().subList(0, 2)).isEqualTo(refLogResponse.getLogEntries().subList(10, 12));
RefLogResponse refLogResponse4 = getApi().getRefLog().fromHash(refLogResponse.getLogEntries().get(3).getRefLogId()).untilHash(refLogResponse.getLogEntries().get(5).getRefLogId()).get();
assertThat(refLogResponse4.getLogEntries()).isEqualTo(refLogResponse.getLogEntries().subList(3, 6));
// use invalid reflog id f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4
// computed using Hash.of(
// UnsafeByteOperations.unsafeWrap(newHasher().putString("invalid",
// StandardCharsets.UTF_8).hash().asBytes()));
assertThatThrownBy(() -> getApi().getRefLog().fromHash("f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4").get()).isInstanceOf(NessieRefLogNotFoundException.class).hasMessageContaining("RefLog entry for 'f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4' does not exist");
// verify source hashes for assign reference
assertThat(refLogResponse.getLogEntries().get(4).getSourceHashes()).isEqualTo(Collections.singletonList(createdBranch1.getHash()));
// verify source hashes for merge
assertThat(refLogResponse.getLogEntries().get(3).getSourceHashes()).isEqualTo(Collections.singletonList(branch0.getHash()));
// verify source hashes for transplant
assertThat(refLogResponse.getLogEntries().get(2).getSourceHashes()).isEqualTo(Collections.singletonList(branch0.getHash()));
// test filter with stream
List<RefLogResponse.RefLogResponseEntry> filteredResult = StreamingUtil.getReflogStream(getApi(), builder -> builder.filter("reflog.operation == 'ASSIGN_REFERENCE' " + "&& reflog.refName == 'tag1_test_reflog'"), OptionalInt.empty()).collect(Collectors.toList());
assertThat(filteredResult.size()).isEqualTo(1);
assertThat(filteredResult.get(0)).extracting(RefLogResponse.RefLogResponseEntry::getRefName, RefLogResponse.RefLogResponseEntry::getOperation).isEqualTo(expectedEntries.get(5).toList());
}
use of org.projectnessie.model.CommitMeta in project nessie by projectnessie.
the class AbstractRestCommitLog method commitLogPaging.
@Test
public void commitLogPaging() throws BaseNessieClientServerException {
Branch branch = createBranch("commitLogPaging");
int commits = 95;
int pageSizeHint = 10;
String currentHash = branch.getHash();
List<String> allMessages = new ArrayList<>();
for (int i = 0; i < commits; i++) {
String msg = "message-for-" + i;
allMessages.add(msg);
IcebergTable tableMeta = IcebergTable.of("some-file-" + i, 42, 42, 42, 42);
String nextHash = getApi().commitMultipleOperations().branchName(branch.getName()).hash(currentHash).commitMeta(CommitMeta.fromMessage(msg)).operation(Put.of(ContentKey.of("table"), tableMeta)).commit().getHash();
assertNotEquals(currentHash, nextHash);
currentHash = nextHash;
}
Collections.reverse(allMessages);
verifyPaging(branch.getName(), commits, pageSizeHint, allMessages, null);
List<CommitMeta> completeLog = StreamingUtil.getCommitLogStream(getApi(), c -> c.refName(branch.getName()).fetch(FetchOption.MINIMAL), OptionalInt.of(pageSizeHint)).map(LogEntry::getCommitMeta).collect(Collectors.toList());
assertEquals(completeLog.stream().map(CommitMeta::getMessage).collect(Collectors.toList()), allMessages);
}
use of org.projectnessie.model.CommitMeta in project nessie by projectnessie.
the class AbstractRestCommitLog method commitLogPagingAndFilteringByAuthor.
@Test
public void commitLogPagingAndFilteringByAuthor() throws BaseNessieClientServerException {
Branch branch = createBranch("commitLogPagingAndFiltering");
int numAuthors = 3;
int commits = 45;
int pageSizeHint = 10;
int expectedTotalSize = numAuthors * commits;
createCommits(branch, numAuthors, commits, branch.getHash());
LogResponse log = getApi().getCommitLog().refName(branch.getName()).get();
assertThat(log).isNotNull();
assertThat(log.getLogEntries()).hasSize(expectedTotalSize);
String author = "author-1";
List<String> messagesOfAuthorOne = log.getLogEntries().stream().map(LogEntry::getCommitMeta).filter(c -> author.equals(c.getAuthor())).map(CommitMeta::getMessage).collect(Collectors.toList());
verifyPaging(branch.getName(), commits, pageSizeHint, messagesOfAuthorOne, author);
List<String> allMessages = log.getLogEntries().stream().map(LogEntry::getCommitMeta).map(CommitMeta::getMessage).collect(Collectors.toList());
List<CommitMeta> completeLog = StreamingUtil.getCommitLogStream(getApi(), c -> c.refName(branch.getName()).fetch(FetchOption.MINIMAL), OptionalInt.of(pageSizeHint)).map(LogEntry::getCommitMeta).collect(Collectors.toList());
assertThat(completeLog.stream().map(CommitMeta::getMessage)).containsExactlyElementsOf(allMessages);
}
use of org.projectnessie.model.CommitMeta in project nessie by projectnessie.
the class AbstractRestCommitLog method verifyPaging.
void verifyPaging(String branchName, int commits, int pageSizeHint, List<String> commitMessages, String filterByAuthor) throws NessieNotFoundException {
String pageToken = null;
for (int pos = 0; pos < commits; pos += pageSizeHint) {
String filter = null;
if (null != filterByAuthor) {
filter = String.format("commit.author=='%s'", filterByAuthor);
}
LogResponse response = getApi().getCommitLog().refName(branchName).maxRecords(pageSizeHint).pageToken(pageToken).filter(filter).get();
if (pos + pageSizeHint <= commits) {
assertTrue(response.isHasMore());
assertNotNull(response.getToken());
assertEquals(commitMessages.subList(pos, pos + pageSizeHint), response.getLogEntries().stream().map(LogEntry::getCommitMeta).map(CommitMeta::getMessage).collect(Collectors.toList()));
pageToken = response.getToken();
} else {
assertFalse(response.isHasMore());
assertNull(response.getToken());
assertEquals(commitMessages.subList(pos, commitMessages.size()), response.getLogEntries().stream().map(LogEntry::getCommitMeta).map(CommitMeta::getMessage).collect(Collectors.toList()));
break;
}
}
}
Aggregations