use of org.projectnessie.model.Reference in project nessie by projectnessie.
the class TreeApiImplWithAuthorization method getAllReferences.
@Override
public ReferencesResponse getAllReferences(ReferencesParams params) {
ImmutableReferencesResponse.Builder resp = ReferencesResponse.builder();
BatchAccessChecker check = startAccessCheck();
List<Reference> refs = super.getAllReferences(params).getReferences().stream().peek(ref -> check.canViewReference(RefUtil.toNamedRef(ref))).collect(Collectors.toList());
Set<NamedRef> notAllowed = check.check().keySet().stream().map(Check::ref).filter(Objects::nonNull).collect(Collectors.toSet());
refs.stream().filter(ref -> !notAllowed.contains(RefUtil.toNamedRef(ref))).forEach(resp::addReferences);
return resp.build();
}
use of org.projectnessie.model.Reference in project nessie by projectnessie.
the class AbstractContentGeneratorTest method makeCommit.
protected Branch makeCommit(NessieApiV1 api, String contentId) throws NessieConflictException, NessieNotFoundException {
String branchName = "test-" + UUID.randomUUID();
Branch main = api.getDefaultBranch();
Reference branch = api.createReference().sourceRefName(main.getName()).reference(Branch.of(branchName, main.getHash())).create();
return api.commitMultipleOperations().branchName(branch.getName()).hash(branch.getHash()).commitMeta(CommitMeta.fromMessage(COMMIT_MSG)).operation(Operation.Put.of(CONTENT_KEY, IcebergTable.of("testMeta", 123, 456, 789, 321, contentId))).commit();
}
use of org.projectnessie.model.Reference in project nessie by projectnessie.
the class ITDeltaLog method testCommitRetry.
@Test
void testCommitRetry() throws Exception {
String csvSalaries1 = ITDeltaLog.class.getResource("/salaries1.csv").getPath();
String csvSalaries2 = ITDeltaLog.class.getResource("/salaries2.csv").getPath();
String csvSalaries3 = ITDeltaLog.class.getResource("/salaries3.csv").getPath();
String pathSalaries = new File(tempPath, "salaries").getAbsolutePath();
spark.sql(String.format("CREATE TABLE IF NOT EXISTS test_commit_retry (Season STRING, Team STRING, Salary STRING, " + "Player STRING) USING delta LOCATION '%s'", pathSalaries));
Dataset<Row> salariesDf1 = spark.read().option("header", true).csv(csvSalaries1);
salariesDf1.write().format("delta").mode("overwrite").save(pathSalaries);
Dataset<Row> count1 = spark.sql("SELECT COUNT(*) FROM test_commit_retry");
Assertions.assertEquals(15L, count1.collectAsList().get(0).getLong(0));
Reference mainBranch = api.getReference().refName("main").get();
Reference devBranch = api.createReference().sourceRefName(mainBranch.getName()).reference(Branch.of("testCommitRetry", mainBranch.getHash())).create();
spark.sparkContext().conf().set("spark.sql.catalog.spark_catalog.ref", devBranch.getName());
Dataset<Row> salariesDf2 = spark.read().option("header", true).csv(csvSalaries2);
salariesDf2.write().format("delta").mode("append").save(pathSalaries);
Dataset<Row> count2 = spark.sql("SELECT COUNT(*) FROM test_commit_retry");
Assertions.assertEquals(30L, count2.collectAsList().get(0).getLong(0));
Reference to = api.getReference().refName("main").get();
Reference from = api.getReference().refName("testCommitRetry").get();
api.mergeRefIntoBranch().branch((Branch) to).fromRef(from).merge();
spark.sparkContext().conf().set("spark.sql.catalog.spark_catalog.ref", "main");
Dataset<Row> salariesDf3 = spark.read().option("header", true).csv(csvSalaries3);
salariesDf3.write().format("delta").mode("append").save(pathSalaries);
Dataset<Row> count3 = spark.sql("SELECT COUNT(*) FROM test_commit_retry");
Assertions.assertEquals(50L, count3.collectAsList().get(0).getLong(0));
}
Aggregations