use of org.projectnessie.model.Content in project nessie by projectnessie.
the class TestStoreWorker method testDeserialization.
@ParameterizedTest
@MethodSource("provideDeserialization")
void testDeserialization(Map.Entry<ByteString, Content> entry) {
Content actual = worker.valueFromStore(entry.getKey(), Optional.empty());
Assertions.assertEquals(entry.getValue(), actual);
}
use of org.projectnessie.model.Content in project nessie by projectnessie.
the class GenerateContent method execute.
@Override
public void execute() throws BaseNessieClientServerException {
if (runtimeDuration != null) {
if (runtimeDuration.isZero() || runtimeDuration.isNegative()) {
throw new ParameterException(spec.commandLine(), "Duration must be absent to greater than zero.");
}
}
Duration perCommitDuration = Optional.ofNullable(runtimeDuration).orElse(Duration.ZERO).dividedBy(numCommits);
ThreadLocalRandom random = ThreadLocalRandom.current();
String runStartTime = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss").format(LocalDateTime.now());
List<ContentKey> tableNames = IntStream.range(0, numTables).mapToObj(i -> ContentKey.of(String.format("create-contents-%s", runStartTime), "contents", Integer.toString(i))).collect(Collectors.toList());
try (NessieApiV1 api = createNessieApiInstance()) {
Branch defaultBranch;
if (defaultBranchName == null) {
// Use the server's default branch.
defaultBranch = api.getDefaultBranch();
} else {
// Use the specified default branch.
try {
defaultBranch = (Branch) api.getReference().refName(defaultBranchName).get();
} catch (NessieReferenceNotFoundException e) {
// Create branch if it does not exist.
defaultBranch = api.getDefaultBranch();
defaultBranch = (Branch) api.createReference().reference(Branch.of(defaultBranchName, defaultBranch.getHash())).sourceRefName(defaultBranch.getName()).create();
}
}
List<String> branches = new ArrayList<>();
branches.add(defaultBranch.getName());
while (branches.size() < branchCount) {
// Create a new branch
String newBranchName = "branch-" + runStartTime + "_" + (branches.size() - 1);
Branch branch = Branch.of(newBranchName, defaultBranch.getHash());
spec.commandLine().getOut().printf("Creating branch '%s' from '%s' at %s%n", branch.getName(), defaultBranch.getName(), branch.getHash());
api.createReference().reference(branch).sourceRefName(defaultBranch.getName()).create();
branches.add(newBranchName);
}
spec.commandLine().getOut().printf("Starting contents generation, %d commits...%n", numCommits);
for (int i = 0; i < numCommits; i++) {
// Choose a random branch to commit to
String branchName = branches.get(random.nextInt(branches.size()));
Branch commitToBranch = (Branch) api.getReference().refName(branchName).get();
ContentKey tableName = tableNames.get(random.nextInt(tableNames.size()));
Content tableContents = api.getContent().refName(branchName).key(tableName).get().get(tableName);
Content newContents = createContents(tableContents, random);
spec.commandLine().getOut().printf("Committing content-key '%s' to branch '%s' at %s%n", tableName, commitToBranch.getName(), commitToBranch.getHash());
CommitMultipleOperationsBuilder commit = api.commitMultipleOperations().branch(commitToBranch).commitMeta(CommitMeta.builder().message(String.format("%s table %s on %s, commit #%d of %d", tableContents != null ? "Update" : "Create", tableName, branchName, i, numCommits)).author(System.getProperty("user.name")).authorTime(Instant.now()).build());
if (newContents instanceof IcebergTable || newContents instanceof IcebergView) {
commit.operation(Put.of(tableName, newContents, tableContents));
} else {
commit.operation(Put.of(tableName, newContents));
}
Branch newHead = commit.commit();
if (random.nextDouble() < newTagProbability) {
Tag tag = Tag.of("new-tag-" + random.nextLong(), newHead.getHash());
spec.commandLine().getOut().printf("Creating tag '%s' from '%s' at %s%n", tag.getName(), branchName, tag.getHash());
api.createReference().reference(tag).sourceRefName(branchName).create();
}
try {
TimeUnit.NANOSECONDS.sleep(perCommitDuration.toNanos());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
}
spec.commandLine().getOut().printf("Done creating contents.%n");
}
use of org.projectnessie.model.Content in project nessie by projectnessie.
the class ReadContent method execute.
@Override
public void execute() throws NessieNotFoundException {
try (NessieApiV1 api = createNessieApiInstance()) {
ContentKey contentKey = ContentKey.of(key);
spec.commandLine().getOut().printf("Reading content for key '%s'\n\n", contentKey);
Map<ContentKey, Content> contentMap = api.getContent().refName(ref).key(contentKey).get();
for (Map.Entry<ContentKey, Content> entry : contentMap.entrySet()) {
spec.commandLine().getOut().printf("Key: %s\n", entry.getKey());
if (isVerbose()) {
List<String> key = entry.getKey().getElements();
for (int i = 0; i < key.size(); i++) {
spec.commandLine().getOut().printf(" key[%d]: %s\n", i, key.get(i));
}
}
spec.commandLine().getOut().printf("Value: %s\n", entry.getValue());
}
spec.commandLine().getOut().printf("\nDone reading content for key '%s'\n\n", contentKey);
}
}
use of org.projectnessie.model.Content in project nessie by projectnessie.
the class AbstractRestContents method multiget.
@Test
public void multiget() throws BaseNessieClientServerException {
Branch branch = createBranch("foo");
ContentKey a = ContentKey.of("a");
ContentKey b = ContentKey.of("b");
IcebergTable ta = IcebergTable.of("path1", 42, 42, 42, 42);
IcebergTable tb = IcebergTable.of("path2", 42, 42, 42, 42);
getApi().commitMultipleOperations().branch(branch).operation(Put.of(a, ta)).commitMeta(CommitMeta.fromMessage("commit 1")).commit();
getApi().commitMultipleOperations().branch(branch).operation(Put.of(b, tb)).commitMeta(CommitMeta.fromMessage("commit 2")).commit();
Map<ContentKey, Content> response = getApi().getContent().key(a).key(b).key(ContentKey.of("noexist")).refName("foo").get();
assertThat(response).containsEntry(a, ta).containsEntry(b, tb).doesNotContainKey(ContentKey.of("noexist"));
}
use of org.projectnessie.model.Content in project nessie by projectnessie.
the class AbstractRestDiff method testDiff.
@ParameterizedTest
@MethodSource("diffRefModes")
public void testDiff(ReferenceMode refModeFrom, ReferenceMode refModeTo) throws BaseNessieClientServerException {
int commitsPerBranch = 10;
Reference fromRef = getApi().createReference().reference(Branch.of("testDiffFromRef", null)).create();
Reference toRef = getApi().createReference().reference(Branch.of("testDiffToRef", null)).create();
String toRefHash = createCommits(toRef, 1, commitsPerBranch, toRef.getHash());
toRef = Branch.of(toRef.getName(), toRefHash);
List<DiffEntry> diffOnRefHeadResponse = getApi().getDiff().fromRef(refModeFrom.transform(fromRef)).toRef(refModeTo.transform(toRef)).get().getDiffs();
// we only committed to toRef, the "from" diff should be null
assertThat(diffOnRefHeadResponse).hasSize(commitsPerBranch).allSatisfy(diff -> {
assertThat(diff.getKey()).isNotNull();
assertThat(diff.getFrom()).isNull();
assertThat(diff.getTo()).isNotNull();
});
// Some combinations with explicit fromHashOnRef/toHashOnRef
assertThat(getApi().getDiff().fromRefName(fromRef.getName()).fromHashOnRef(fromRef.getHash()).toRefName(toRef.getName()).toHashOnRef(toRef.getHash()).get().getDiffs()).isEqualTo(diffOnRefHeadResponse);
// result
if (refModeTo != ReferenceMode.NAME_ONLY) {
Branch toRefAtFrom = Branch.of(toRef.getName(), fromRef.getHash());
assertThat(getApi().getDiff().fromRef(refModeFrom.transform(fromRef)).toRef(refModeTo.transform(toRefAtFrom)).get().getDiffs()).isEmpty();
}
// after committing to fromRef, "from/to" diffs should both have data
fromRef = Branch.of(fromRef.getName(), createCommits(fromRef, 1, commitsPerBranch, fromRef.getHash()));
assertThat(getApi().getDiff().fromRef(refModeFrom.transform(fromRef)).toRef(refModeTo.transform(toRef)).get().getDiffs()).hasSize(commitsPerBranch).allSatisfy(diff -> {
assertThat(diff.getKey()).isNotNull();
assertThat(diff.getFrom()).isNotNull();
assertThat(diff.getTo()).isNotNull();
// we only have a diff on the ID
assertThat(diff.getFrom().getId()).isNotEqualTo(diff.getTo().getId());
Optional<IcebergTable> fromTable = diff.getFrom().unwrap(IcebergTable.class);
assertThat(fromTable).isPresent();
Optional<IcebergTable> toTable = diff.getTo().unwrap(IcebergTable.class);
assertThat(toTable).isPresent();
assertThat(fromTable.get().getMetadataLocation()).isEqualTo(toTable.get().getMetadataLocation());
assertThat(fromTable.get().getSchemaId()).isEqualTo(toTable.get().getSchemaId());
assertThat(fromTable.get().getSnapshotId()).isEqualTo(toTable.get().getSnapshotId());
assertThat(fromTable.get().getSortOrderId()).isEqualTo(toTable.get().getSortOrderId());
assertThat(fromTable.get().getSpecId()).isEqualTo(toTable.get().getSpecId());
});
List<ContentKey> keys = IntStream.rangeClosed(0, commitsPerBranch).mapToObj(i -> ContentKey.of("table" + i)).collect(Collectors.toList());
// request all keys and delete the tables for them on toRef
Map<ContentKey, Content> map = getApi().getContent().refName(toRef.getName()).keys(keys).get();
for (Map.Entry<ContentKey, Content> entry : map.entrySet()) {
toRef = getApi().commitMultipleOperations().branchName(toRef.getName()).hash(toRefHash).commitMeta(CommitMeta.fromMessage("delete")).operation(Delete.of(entry.getKey())).commit();
}
// now that we deleted all tables on toRef, the diff for "to" should be null
assertThat(getApi().getDiff().fromRef(refModeFrom.transform(fromRef)).toRef(refModeTo.transform(toRef)).get().getDiffs()).hasSize(commitsPerBranch).allSatisfy(diff -> {
assertThat(diff.getKey()).isNotNull();
assertThat(diff.getFrom()).isNotNull();
assertThat(diff.getTo()).isNull();
});
}
Aggregations