use of org.projectnessie.model.Operation in project iceberg by apache.
the class NessieCatalog method renameTable.
@Override
public void renameTable(TableIdentifier from, TableIdentifier toOriginal) {
reference.checkMutable();
TableIdentifier to = NessieUtil.removeCatalogName(toOriginal, name());
IcebergTable existingFromTable = table(from);
if (existingFromTable == null) {
throw new NoSuchTableException("table %s doesn't exists", from.name());
}
IcebergTable existingToTable = table(to);
if (existingToTable != null) {
throw new AlreadyExistsException("table %s already exists", to.name());
}
CommitMultipleOperationsBuilder operations = api.commitMultipleOperations().commitMeta(NessieUtil.buildCommitMetadata(String.format("Iceberg rename table from '%s' to '%s'", from, to), catalogOptions)).operation(Operation.Put.of(NessieUtil.toKey(to), existingFromTable, existingFromTable)).operation(Operation.Delete.of(NessieUtil.toKey(from)));
try {
Tasks.foreach(operations).retry(5).stopRetryOn(NessieNotFoundException.class).throwFailureWhenFinished().onFailure((o, exception) -> refresh()).run(ops -> {
Branch branch = ops.branch(reference.getAsBranch()).commit();
reference.updateReference(branch);
}, BaseNessieClientServerException.class);
} catch (NessieNotFoundException e) {
// and removed by another.
throw new RuntimeException("Failed to drop table as ref is no longer valid.", e);
} catch (BaseNessieClientServerException e) {
throw new CommitFailedException(e, "Failed to rename table: the current reference is not up to date.");
} catch (HttpClientException ex) {
// safe than sorry.
throw new CommitStateUnknownException(ex);
}
// Intentionally just "throw through" Nessie's HttpClientException here and do not "special case"
// just the "timeout" variant to propagate all kinds of network errors (e.g. connection reset).
// Network code implementation details and all kinds of network devices can induce unexpected
// behavior. So better be safe than sorry.
}
use of org.projectnessie.model.Operation in project iceberg by apache.
the class NessieCatalog method dropTable.
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
reference.checkMutable();
IcebergTable existingTable = table(identifier);
if (existingTable == null) {
return false;
}
if (purge) {
LOG.info("Purging data for table {} was set to true but is ignored", identifier.toString());
}
CommitMultipleOperationsBuilder commitBuilderBase = api.commitMultipleOperations().commitMeta(NessieUtil.buildCommitMetadata(String.format("Iceberg delete table %s", identifier), catalogOptions)).operation(Operation.Delete.of(NessieUtil.toKey(identifier)));
// We try to drop the table. Simple retry after ref update.
boolean threw = true;
try {
Tasks.foreach(commitBuilderBase).retry(5).stopRetryOn(NessieNotFoundException.class).throwFailureWhenFinished().onFailure((o, exception) -> refresh()).run(commitBuilder -> {
Branch branch = commitBuilder.branch(reference.getAsBranch()).commit();
reference.updateReference(branch);
}, BaseNessieClientServerException.class);
threw = false;
} catch (NessieConflictException e) {
LOG.error("Cannot drop table: failed after retry (update ref and retry)", e);
} catch (NessieNotFoundException e) {
LOG.error("Cannot drop table: ref is no longer valid.", e);
} catch (BaseNessieClientServerException e) {
LOG.error("Cannot drop table: unknown error", e);
}
return !threw;
}
use of org.projectnessie.model.Operation in project nessie by projectnessie.
the class AbstractRestRefLog method testReflog.
@Test
public void testReflog() throws BaseNessieClientServerException {
String tagName = "tag1_test_reflog";
String branch1 = "branch1_test_reflog";
String branch2 = "branch2_test_reflog";
String branch3 = "branch3_test_reflog";
String root = "ref_name_test_reflog";
List<Tuple> expectedEntries = new ArrayList<>(12);
// reflog 1: creating the default branch0
Branch branch0 = createBranch(root);
expectedEntries.add(Tuple.tuple(root, "CREATE_REFERENCE"));
// reflog 2: create tag1
Reference createdTag = getApi().createReference().sourceRefName(branch0.getName()).reference(Tag.of(tagName, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(tagName, "CREATE_REFERENCE"));
// reflog 3: create branch1
Reference createdBranch1 = getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch1, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch1, "CREATE_REFERENCE"));
// reflog 4: create branch2
Reference createdBranch2 = getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch2, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch2, "CREATE_REFERENCE"));
// reflog 5: create branch2
Branch createdBranch3 = (Branch) getApi().createReference().sourceRefName(branch0.getName()).reference(Branch.of(branch3, branch0.getHash())).create();
expectedEntries.add(Tuple.tuple(branch3, "CREATE_REFERENCE"));
// reflog 6: commit on default branch0
IcebergTable meta = IcebergTable.of("meep", 42, 42, 42, 42);
branch0 = getApi().commitMultipleOperations().branchName(branch0.getName()).hash(branch0.getHash()).commitMeta(CommitMeta.builder().message("dummy commit log").properties(ImmutableMap.of("prop1", "val1", "prop2", "val2")).build()).operation(Operation.Put.of(ContentKey.of("meep"), meta)).commit();
expectedEntries.add(Tuple.tuple(root, "COMMIT"));
// reflog 7: assign tag
getApi().assignTag().tagName(tagName).hash(createdTag.getHash()).assignTo(branch0).assign();
expectedEntries.add(Tuple.tuple(tagName, "ASSIGN_REFERENCE"));
// reflog 8: assign ref
getApi().assignBranch().branchName(branch1).hash(createdBranch1.getHash()).assignTo(branch0).assign();
expectedEntries.add(Tuple.tuple(branch1, "ASSIGN_REFERENCE"));
// reflog 9: merge
getApi().mergeRefIntoBranch().branchName(branch2).hash(createdBranch2.getHash()).fromRefName(branch1).fromHash(branch0.getHash()).merge();
expectedEntries.add(Tuple.tuple(branch2, "MERGE"));
// reflog 10: transplant
getApi().transplantCommitsIntoBranch().hashesToTransplant(ImmutableList.of(Objects.requireNonNull(branch0.getHash()))).fromRefName(branch1).branch(createdBranch3).transplant();
expectedEntries.add(Tuple.tuple(branch3, "TRANSPLANT"));
// reflog 11: delete branch
getApi().deleteBranch().branchName(branch1).hash(branch0.getHash()).delete();
expectedEntries.add(Tuple.tuple(branch1, "DELETE_REFERENCE"));
// reflog 12: delete tag
getApi().deleteTag().tagName(tagName).hash(branch0.getHash()).delete();
expectedEntries.add(Tuple.tuple(tagName, "DELETE_REFERENCE"));
// In the reflog output new entry will be the head. Hence, reverse the expected list
Collections.reverse(expectedEntries);
RefLogResponse refLogResponse = getApi().getRefLog().get();
// verify reflog entries
assertThat(refLogResponse.getLogEntries().subList(0, 12)).extracting(RefLogResponse.RefLogResponseEntry::getRefName, RefLogResponse.RefLogResponseEntry::getOperation).isEqualTo(expectedEntries);
// verify pagination (limit and token)
RefLogResponse refLogResponse1 = getApi().getRefLog().maxRecords(2).get();
assertThat(refLogResponse1.getLogEntries()).isEqualTo(refLogResponse.getLogEntries().subList(0, 2));
assertThat(refLogResponse1.isHasMore()).isTrue();
RefLogResponse refLogResponse2 = getApi().getRefLog().pageToken(refLogResponse1.getToken()).get();
// should start from the token.
assertThat(refLogResponse2.getLogEntries().get(0).getRefLogId()).isEqualTo(refLogResponse1.getToken());
assertThat(refLogResponse2.getLogEntries().subList(0, 10)).isEqualTo(refLogResponse.getLogEntries().subList(2, 12));
// verify startHash and endHash
RefLogResponse refLogResponse3 = getApi().getRefLog().fromHash(refLogResponse.getLogEntries().get(10).getRefLogId()).get();
assertThat(refLogResponse3.getLogEntries().subList(0, 2)).isEqualTo(refLogResponse.getLogEntries().subList(10, 12));
RefLogResponse refLogResponse4 = getApi().getRefLog().fromHash(refLogResponse.getLogEntries().get(3).getRefLogId()).untilHash(refLogResponse.getLogEntries().get(5).getRefLogId()).get();
assertThat(refLogResponse4.getLogEntries()).isEqualTo(refLogResponse.getLogEntries().subList(3, 6));
// use invalid reflog id f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4
// computed using Hash.of(
// UnsafeByteOperations.unsafeWrap(newHasher().putString("invalid",
// StandardCharsets.UTF_8).hash().asBytes()));
assertThatThrownBy(() -> getApi().getRefLog().fromHash("f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4").get()).isInstanceOf(NessieRefLogNotFoundException.class).hasMessageContaining("RefLog entry for 'f1234d75178d892a133a410355a5a990cf75d2f33eba25d575943d4df632f3a4' does not exist");
// verify source hashes for assign reference
assertThat(refLogResponse.getLogEntries().get(4).getSourceHashes()).isEqualTo(Collections.singletonList(createdBranch1.getHash()));
// verify source hashes for merge
assertThat(refLogResponse.getLogEntries().get(3).getSourceHashes()).isEqualTo(Collections.singletonList(branch0.getHash()));
// verify source hashes for transplant
assertThat(refLogResponse.getLogEntries().get(2).getSourceHashes()).isEqualTo(Collections.singletonList(branch0.getHash()));
// test filter with stream
List<RefLogResponse.RefLogResponseEntry> filteredResult = StreamingUtil.getReflogStream(getApi(), builder -> builder.filter("reflog.operation == 'ASSIGN_REFERENCE' " + "&& reflog.refName == 'tag1_test_reflog'"), OptionalInt.empty()).collect(Collectors.toList());
assertThat(filteredResult.size()).isEqualTo(1);
assertThat(filteredResult.get(0)).extracting(RefLogResponse.RefLogResponseEntry::getRefName, RefLogResponse.RefLogResponseEntry::getOperation).isEqualTo(expectedEntries.get(5).toList());
}
use of org.projectnessie.model.Operation in project nessie by projectnessie.
the class TreeApiImpl method filterCommitLog.
/**
* Applies different filters to the {@link Stream} of commits based on the filter.
*
* @param logEntries The commit log that different filters will be applied to
* @param filter The filter to filter by
* @return A potentially filtered {@link Stream} of commits based on the filter
*/
private Stream<LogEntry> filterCommitLog(Stream<LogEntry> logEntries, String filter) {
if (Strings.isNullOrEmpty(filter)) {
return logEntries;
}
final Script script;
try {
script = SCRIPT_HOST.buildScript(filter).withContainer(CONTAINER).withDeclarations(COMMIT_LOG_DECLARATIONS).withTypes(COMMIT_LOG_TYPES).build();
} catch (ScriptException e) {
throw new IllegalArgumentException(e);
}
return logEntries.filter(logEntry -> {
try {
List<Operation> operations = logEntry.getOperations();
if (operations == null) {
operations = Collections.emptyList();
}
// ContentKey has some @JsonIgnore attributes, which would otherwise not be accessible.
List<Object> operationsForCel = operations.stream().map(CELUtil::forCel).collect(Collectors.toList());
return script.execute(Boolean.class, ImmutableMap.of(VAR_COMMIT, logEntry.getCommitMeta(), VAR_OPERATIONS, operationsForCel));
} catch (ScriptException e) {
throw new RuntimeException(e);
}
});
}
use of org.projectnessie.model.Operation in project nessie by projectnessie.
the class IdentifyContentsPerExecutor method handleCommitForExpiredContents.
private static void handleCommitForExpiredContents(Reference reference, LogResponse.LogEntry logEntry, Map<String, ContentBloomFilter> liveContentsBloomFilterMap, IdentifiedResult result) {
if (logEntry.getOperations() != null) {
logEntry.getOperations().stream().filter(operation -> operation instanceof Operation.Put).forEach(operation -> {
Content content = ((Operation.Put) operation).getContent();
ContentBloomFilter bloomFilter = liveContentsBloomFilterMap.get(content.getId());
// But live contents never be considered as expired.
if (bloomFilter == null || !bloomFilter.mightContain(content)) {
result.addContent(reference.getName(), content);
}
});
}
}
Aggregations