use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class PartitionedFileSetTest method testUpdateMetadata.
@Test
public void testUpdateMetadata() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
PartitionOutput partitionOutput = dataset.getPartitionOutput(PARTITION_KEY);
ImmutableMap<String, String> originalEntries = ImmutableMap.of("key1", "value1");
partitionOutput.setMetadata(originalEntries);
partitionOutput.addPartition();
ImmutableMap<String, String> updatedMetadata = ImmutableMap.of("key2", "value2");
dataset.addMetadata(PARTITION_KEY, updatedMetadata);
PartitionDetail partitionDetail = dataset.getPartition(PARTITION_KEY);
Assert.assertNotNull(partitionDetail);
HashMap<String, String> combinedEntries = Maps.newHashMap();
combinedEntries.putAll(originalEntries);
combinedEntries.putAll(updatedMetadata);
Assert.assertEquals(combinedEntries, partitionDetail.getMetadata().asMap());
// adding an entry, for a key that already exists will throw an Exception
try {
dataset.addMetadata(PARTITION_KEY, "key2", "value3");
Assert.fail("Expected not to be able to update an existing metadata entry");
} catch (DataSetException expected) {
}
PartitionKey nonexistentPartitionKey = PartitionKey.builder().addIntField("i", 42).addLongField("l", 17L).addStringField("s", "nonexistent").build();
try {
// adding an entry, for a key that already exists will throw an Exception
dataset.addMetadata(nonexistentPartitionKey, "key2", "value3");
Assert.fail("Expected not to be able to add metadata for a nonexistent partition");
} catch (DataSetException expected) {
}
}
});
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class TimePartitionedFileSetTest method testPartitionMetadata.
@Test
public void testPartitionMetadata() throws Exception {
final TimePartitionedFileSet tpfs = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
TransactionAware txAware = (TransactionAware) tpfs;
dsFrameworkUtil.newInMemoryTransactionExecutor(txAware).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// make sure the dataset has no partitions
validateTimePartitions(tpfs, 0L, MAX, Collections.<Long, String>emptyMap());
Date date = DATE_FORMAT.parse("6/4/12 10:00 am");
long time = date.getTime();
// keep track of all the metadata added
Map<String, String> allMetadata = Maps.newHashMap();
Map<String, String> metadata = ImmutableMap.of("key1", "value1", "key2", "value3", "key100", "value4");
tpfs.addPartition(time, "file", metadata);
allMetadata.putAll(metadata);
TimePartitionDetail partitionByTime = tpfs.getPartitionByTime(time);
Assert.assertNotNull(partitionByTime);
Assert.assertEquals(metadata, partitionByTime.getMetadata().asMap());
tpfs.addMetadata(time, "key3", "value4");
allMetadata.put("key3", "value4");
try {
// attempting to update an existing key throws a DatasetException
tpfs.addMetadata(time, "key3", "value5");
Assert.fail("Expected not to be able to update an existing metadata entry");
} catch (DataSetException expected) {
}
Map<String, String> newMetadata = ImmutableMap.of("key4", "value4", "key5", "value5");
tpfs.addMetadata(time, newMetadata);
allMetadata.putAll(newMetadata);
partitionByTime = tpfs.getPartitionByTime(time);
Assert.assertNotNull(partitionByTime);
Assert.assertEquals(allMetadata, partitionByTime.getMetadata().asMap());
}
});
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class TimePartitionedFileSetTest method testOutputPartitionPath.
/**
* Tests that the output file path is set correctly, based on the output partition time.
*/
@Test
public void testOutputPartitionPath() throws Exception {
// test specifying output time
Date date = DATE_FORMAT.parse("1/1/15 8:42 pm");
Map<String, String> args = Maps.newHashMap();
TimePartitionedFileSetArguments.setOutputPartitionTime(args, date.getTime());
TimeZone timeZone = Calendar.getInstance().getTimeZone();
TimePartitionedFileSetArguments.setOutputPathFormat(args, "yyyy-MM-dd/HH_mm", timeZone.getID());
TimePartitionedFileSet ds = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
String outputPath = ds.getEmbeddedFileSet().getOutputLocation().toURI().getPath();
Assert.assertTrue(outputPath.endsWith("2015-01-01/20_42"));
Map<String, String> outputConfig = ds.getOutputFormatConfiguration();
Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("2015-01-01/20_42"));
// test specifying output time and partition key -> time should prevail
PartitionKey key = PartitionKey.builder().addIntField("year", 2014).addIntField("month", 1).addIntField("day", 1).addIntField("hour", 20).addIntField("minute", 54).build();
TimePartitionedFileSet ds1 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
TimePartitionedFileSetArguments.setOutputPartitionKey(args, key);
outputConfig = ds1.getOutputFormatConfiguration();
Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("2015-01-01/20_42"));
args.clear();
TimePartitionedFileSetArguments.setOutputPartitionKey(args, key);
TimePartitionedFileSet ds2 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
outputConfig = ds2.getOutputFormatConfiguration();
Assert.assertTrue(outputConfig.get(FileOutputFormat.OUTDIR).endsWith("54"));
args.clear();
TimePartitionedFileSet ds3 = dsFrameworkUtil.getInstance(TPFS_INSTANCE, args);
try {
ds3.getOutputFormatConfiguration();
Assert.fail("getOutputFormatConfiguration should have failed with neither output time nor partition key");
} catch (DataSetException e) {
// expected
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class ApplicationVerificationStage method verifyData.
protected void verifyData(ApplicationId appId, ApplicationSpecification specification, @Nullable KerberosPrincipalId specifiedOwnerPrincipal) throws DatasetManagementException, UnauthorizedException {
// NOTE: no special restrictions on dataset module names, etc
VerifyResult result;
for (DatasetCreationSpec dataSetCreateSpec : specification.getDatasets().values()) {
result = getVerifier(DatasetCreationSpec.class).verify(appId, dataSetCreateSpec);
if (!result.isSuccess()) {
throw new RuntimeException(result.getMessage());
}
String dsName = dataSetCreateSpec.getInstanceName();
DatasetId datasetInstanceId = appId.getParent().dataset(dsName);
DatasetSpecification existingSpec = dsFramework.getDatasetSpec(datasetInstanceId);
if (existingSpec != null && !existingSpec.getType().equals(dataSetCreateSpec.getTypeName())) {
// New app trying to deploy an dataset with same instanceName but different Type than that of existing.
throw new DataSetException(String.format("Cannot Deploy Dataset : %s with Type : %s : Dataset with different Type Already Exists", dsName, dataSetCreateSpec.getTypeName()));
}
// if the dataset existed verify its owner is same.
if (existingSpec != null) {
verifyOwner(datasetInstanceId, specifiedOwnerPrincipal);
}
}
for (StreamSpecification spec : specification.getStreams().values()) {
result = getVerifier(StreamSpecification.class).verify(appId, spec);
if (!result.isSuccess()) {
throw new RuntimeException(result.getMessage());
}
// if the stream existed verify the owner to be the same
if (store.getStream(appId.getNamespaceId(), spec.getName()) != null) {
verifyOwner(appId.getParent().stream(spec.getName()), specifiedOwnerPrincipal);
}
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class DefaultDatasetRuntimeContext method onMethodEntry.
@Override
public void onMethodEntry(boolean constructor, @Nullable Class<? extends Annotation> annotation) {
CallStack callStack = this.callStack.get();
AccessInfo accessInfo = UNKNOWN_ACCESS_INFO;
if (annotation == null && constructor) {
annotation = constructorDefaultAnnotation;
}
if (annotation != null) {
accessInfo = ANNOTATION_TO_ACCESS_INFO.get(annotation);
if (accessInfo == null) {
// shouldn't happen
throw new DataSetException("Unsupported annotation " + annotation + " on dataset " + datasetId);
}
}
// but we won't allow no privilege at all
try {
enforcer.enforce(datasetId, principal, accessInfo.getActions());
} catch (Exception e) {
throw new DataSetException("The principal " + principal + " is not authorized to access " + datasetId + " for operation types " + accessInfo.getActions(), e);
}
recordAccess(callStack.enter(accessInfo.getAccessType()), accessInfo.getAccessType());
}
Aggregations