use of co.cask.cdap.api.Predicate in project cdap by caskdata.
the class DatasetInstanceService method list.
/**
* Lists all dataset instances in a namespace. If perimeter security and authorization are enabled, only returns the
* dataset instances that the current user has access to.
*
* @param namespace the namespace to list datasets for
* @return the dataset instances in the provided namespace
* @throws NotFoundException if the namespace was not found
* @throws IOException if there is a problem in making an HTTP request to check if the namespace exists
*/
Collection<DatasetSpecification> list(final NamespaceId namespace) throws Exception {
Principal principal = authenticationContext.getPrincipal();
ensureNamespaceExists(namespace);
Collection<DatasetSpecification> datasets = instanceManager.getAll(namespace);
final Predicate<EntityId> filter = authorizationEnforcer.createFilter(principal);
return Lists.newArrayList(Iterables.filter(datasets, new com.google.common.base.Predicate<DatasetSpecification>() {
@Override
public boolean apply(DatasetSpecification spec) {
return filter.apply(namespace.dataset(spec.getName()));
}
}));
}
use of co.cask.cdap.api.Predicate in project cdap by caskdata.
the class AuthorizationBootstrapperTest method test.
@Test
public void test() throws Exception {
final Principal systemUser = new Principal(UserGroupInformation.getCurrentUser().getShortUserName(), Principal.PrincipalType.USER);
// initial state: no privileges for system or admin users
Predicate<EntityId> systemUserFilter = authorizationEnforcer.createFilter(systemUser);
Predicate<EntityId> adminUserFilter = authorizationEnforcer.createFilter(ADMIN_USER);
Assert.assertFalse(systemUserFilter.apply(instanceId));
Assert.assertFalse(systemUserFilter.apply(NamespaceId.SYSTEM));
Assert.assertFalse(adminUserFilter.apply(NamespaceId.DEFAULT));
// privileges should be granted after running bootstrap
authorizationBootstrapper.run();
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
Predicate<EntityId> systemUserFilter = authorizationEnforcer.createFilter(systemUser);
Predicate<EntityId> adminUserFilter = authorizationEnforcer.createFilter(ADMIN_USER);
return systemUserFilter.apply(instanceId) && systemUserFilter.apply(NamespaceId.SYSTEM) && adminUserFilter.apply(NamespaceId.DEFAULT);
}
}, 10, TimeUnit.SECONDS);
txManager.startAndWait();
datasetService.startAndWait();
waitForService(Constants.Service.DATASET_MANAGER);
defaultNamespaceEnsurer.startAndWait();
systemArtifactLoader.startAndWait();
waitForService(defaultNamespaceEnsurer);
waitForService(systemArtifactLoader);
// ensure that the default namespace was created, and that the system user has privileges to access it
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
try {
return namespaceQueryAdmin.exists(NamespaceId.DEFAULT);
} catch (Exception e) {
return false;
}
}
}, 10, TimeUnit.SECONDS);
Assert.assertTrue(defaultNamespaceEnsurer.isRunning());
// ensure that the system artifact was deployed, and that the system user has privileges to access it
// this will throw an ArtifactNotFoundException if the artifact was not deployed, and UnauthorizedException if
// the user does not have required privileges
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
try {
artifactRepository.getArtifact(SYSTEM_ARTIFACT.toId());
return true;
} catch (Exception e) {
return false;
}
}
}, 20, TimeUnit.SECONDS);
Assert.assertTrue(systemArtifactLoader.isRunning());
// ensure that system datasets can be created by the system user
Dataset systemDataset = DatasetsUtil.getOrCreateDataset(dsFramework, NamespaceId.SYSTEM.dataset("system-dataset"), Table.class.getName(), DatasetProperties.EMPTY, Collections.<String, String>emptyMap());
Assert.assertNotNull(systemDataset);
// as part of bootstrapping, admin users were also granted admin privileges on the CDAP instance, so they can
// create namespaces
SecurityRequestContext.setUserId(ADMIN_USER.getName());
namespaceAdmin.create(new NamespaceMeta.Builder().setName("success").build());
SecurityRequestContext.setUserId("bob");
try {
namespaceAdmin.create(new NamespaceMeta.Builder().setName("failure").build());
Assert.fail("Bob should not have been able to create a namespace since he is not an admin user");
} catch (UnauthorizedException expected) {
// expected
}
}
use of co.cask.cdap.api.Predicate in project cdap by caskdata.
the class PartitionedFileSetTest method testPartitionConsumingWithFilterAndLimit.
@Test
public void testPartitionConsumingWithFilterAndLimit() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
final TransactionAware txAwareDataset = (TransactionAware) dataset;
final Set<PartitionKey> partitionKeys1 = Sets.newHashSet();
for (int i = 0; i < 10; i++) {
partitionKeys1.add(generateUniqueKey());
}
final Set<PartitionKey> partitionKeys2 = Sets.newHashSet();
for (int i = 0; i < 15; i++) {
partitionKeys2.add(generateUniqueKey());
}
final SimplePartitionConsumer partitionConsumer = new SimplePartitionConsumer(dataset);
// (consumption only happens at transaction borders)
for (final PartitionKey partitionKey : partitionKeys1) {
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.getPartitionOutput(partitionKey).addPartition();
}
});
}
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Initial consumption results in the partitions corresponding to partitionKeys1 to be consumed because only
// those partitions are added to the dataset at this point
List<Partition> consumedPartitions = Lists.newArrayList();
// with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1));
Assert.assertEquals(1, consumedPartitions.size());
// ask for 5 more
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5));
Assert.assertEquals(6, consumedPartitions.size());
// ask for 5 more, but there are only 4 more unconsumed partitions (size of partitionKeys1 is 10).
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5));
Assert.assertEquals(10, consumedPartitions.size());
Set<PartitionKey> retrievedKeys = Sets.newHashSet();
for (Partition consumedPartition : consumedPartitions) {
retrievedKeys.add(consumedPartition.getPartitionKey());
}
Assert.assertEquals(partitionKeys1, retrievedKeys);
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (PartitionKey partitionKey : partitionKeys2) {
dataset.getPartitionOutput(partitionKey).addPartition();
}
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// using the same PartitionConsumer (which remembers the PartitionConsumerState) to consume additional
// partitions results in only the newly added partitions (corresponding to partitionKeys2) to be returned
List<Partition> consumedPartitions = Lists.newArrayList();
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1));
// even though we set limit to 1 in the previous call to consumePartitions, we get all the elements of
// partitionKeys2, because they were all added in the same transaction
Set<PartitionKey> retrievedKeys = Sets.newHashSet();
for (Partition consumedPartition : consumedPartitions) {
retrievedKeys.add(consumedPartition.getPartitionKey());
}
Assert.assertEquals(partitionKeys2, retrievedKeys);
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// consuming the partitions again, without adding any new partitions returns an empty iterator
Assert.assertTrue(partitionConsumer.consumePartitions().isEmpty());
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// creating a new PartitionConsumer resets the consumption state.
// test combination of filter and limit
SimplePartitionConsumer newPartitionConsumer = new SimplePartitionConsumer(dataset);
List<Partition> consumedPartitions = Lists.newArrayList();
// the partitionFilter will match partitionKeys [1, 7), of which there are 6
final PartitionFilter partitionFilter = PartitionFilter.builder().addRangeCondition("i", 1, 7).build();
final Predicate<PartitionDetail> predicate = new Predicate<PartitionDetail>() {
@Override
public boolean apply(PartitionDetail partitionDetail) {
return partitionFilter.match(partitionDetail.getPartitionKey());
}
};
// apply the filter (narrows it down to 6 elements) and apply a limit of 4 results in 4 consumed partitions
Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(4, predicate));
Assert.assertEquals(4, consumedPartitions.size());
// apply a limit of 3, using the same filter returns the remaining 2 elements that fit that filter
Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(3, predicate));
Assert.assertEquals(6, consumedPartitions.size());
// assert that the partitions returned have partition keys, where the i values range from [1, 7]
Set<Integer> expectedIFields = new HashSet<>();
for (int i = 1; i < 7; i++) {
expectedIFields.add(i);
}
Set<Integer> actualIFields = new HashSet<>();
for (Partition consumedPartition : consumedPartitions) {
actualIFields.add((Integer) consumedPartition.getPartitionKey().getField("i"));
}
Assert.assertEquals(expectedIFields, actualIFields);
}
});
}
use of co.cask.cdap.api.Predicate in project cdap by caskdata.
the class ArtifactRepository method getArtifactDetails.
/**
* Get all artifact details that match artifacts in the given ranges.
*
* @param range the range to match artifacts in
* @param limit the limit number of the result
* @param order the order of the result
* @return an unmodifiable list of all artifacts that match the given ranges. If none exist, an empty list is returned
*/
public List<ArtifactDetail> getArtifactDetails(final ArtifactRange range, int limit, ArtifactSortOrder order) throws Exception {
List<ArtifactDetail> artifacts = artifactStore.getArtifacts(range, limit, order);
// No authorization for system artifacts
if (NamespaceId.SYSTEM.getNamespace().equals(range.getNamespace())) {
return artifacts;
}
Principal principal = authenticationContext.getPrincipal();
final Predicate<EntityId> filter = authorizationEnforcer.createFilter(principal);
return Lists.newArrayList(Iterables.filter(artifacts, new com.google.common.base.Predicate<ArtifactDetail>() {
@Override
public boolean apply(ArtifactDetail artifactDetail) {
ArtifactId artifactId = artifactDetail.getDescriptor().getArtifactId();
return filter.apply(new NamespaceId(range.getNamespace()).artifact(artifactId.getName(), artifactId.getVersion().getVersion()));
}
}));
}
use of co.cask.cdap.api.Predicate in project cdap by caskdata.
the class PartitionConsumerTest method testPartitionConsumingWithFilterAndLimit.
@Test
public void testPartitionConsumingWithFilterAndLimit() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
final TransactionAware txAwareDataset = (TransactionAware) dataset;
final Set<PartitionKey> partitionKeys1 = new HashSet<>();
for (int i = 0; i < 10; i++) {
partitionKeys1.add(generateUniqueKey());
}
final Set<PartitionKey> partitionKeys2 = new HashSet<>();
for (int i = 0; i < 15; i++) {
partitionKeys2.add(generateUniqueKey());
}
final PartitionConsumer partitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor());
// (consumption only happens at transaction borders)
for (final PartitionKey partitionKey : partitionKeys1) {
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
dataset.getPartitionOutput(partitionKey).addPartition();
}
});
}
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// Initial consumption results in the partitions corresponding to partitionKeys1 to be consumed because only
// those partitions are added to the dataset at this point
List<Partition> consumedPartitions = new ArrayList<>();
// with limit = 1, the returned iterator is only size 1, even though there are more unconsumed partitions
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(1).getPartitions());
Assert.assertEquals(1, consumedPartitions.size());
// ask for 5 more
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5).getPartitions());
Assert.assertEquals(6, consumedPartitions.size());
// ask for 5 more, but there are only 4 more unconsumed partitions (size of partitionKeys1 is 10).
Iterables.addAll(consumedPartitions, partitionConsumer.consumePartitions(5).getPartitions());
Assert.assertEquals(10, consumedPartitions.size());
Assert.assertEquals(partitionKeys1, toKeys(consumedPartitions));
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
for (PartitionKey partitionKey : partitionKeys2) {
dataset.getPartitionOutput(partitionKey).addPartition();
}
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// using the same PartitionConsumer (which remembers the PartitionConsumerState) to consume additional
// partitions results in only the newly added partitions (corresponding to partitionKeys2) to be returned
Assert.assertEquals(partitionKeys2, toKeys(partitionConsumer.consumePartitions().getPartitions()));
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// consuming the partitions again, without adding any new partitions returns an empty iterator
Assert.assertTrue(partitionConsumer.consumePartitions().getPartitions().isEmpty());
}
});
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// creating a new PartitionConsumer resets the consumption state.
// test combination of filter and limit
// the partitionFilter will match partitionKeys [1, 7), of which there are 6
final PartitionFilter partitionFilter = PartitionFilter.builder().addRangeCondition("i", 1, 7).build();
final Predicate<PartitionDetail> predicate = new Predicate<PartitionDetail>() {
@Override
public boolean apply(PartitionDetail partitionDetail) {
return partitionFilter.match(partitionDetail.getPartitionKey());
}
};
ConsumerConfiguration configuration = ConsumerConfiguration.builder().setPartitionPredicate(predicate).build();
PartitionConsumer newPartitionConsumer = new ConcurrentPartitionConsumer(dataset, new InMemoryStatePersistor(), configuration);
List<Partition> consumedPartitions = new ArrayList<>();
// apply the filter (narrows it down to 6 elements) and apply a limit of 4 results in 4 consumed partitions
Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(4).getPartitions());
Assert.assertEquals(4, consumedPartitions.size());
// apply a limit of 3, using the same filter returns the remaining 2 elements that fit that filter
Iterables.addAll(consumedPartitions, newPartitionConsumer.consumePartitions(3).getPartitions());
Assert.assertEquals(6, consumedPartitions.size());
// assert that the partitions returned have partition keys, where the i values range from [1, 7]
Set<Integer> expectedIFields = new HashSet<>();
for (int i = 1; i < 7; i++) {
expectedIFields.add(i);
}
Set<Integer> actualIFields = new HashSet<>();
for (Partition consumedPartition : consumedPartitions) {
actualIFields.add((Integer) consumedPartition.getPartitionKey().getField("i"));
}
Assert.assertEquals(expectedIFields, actualIFields);
}
});
}
Aggregations