use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class HBaseMetricsTable method increment.
@Override
public void increment(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], NavigableMap<byte[], Long>> row : updates.entrySet()) {
byte[] distributedKey = createDistributedRowKey(row.getKey());
Put increment = getIncrementalPut(distributedKey, row.getValue());
puts.add(increment);
}
try {
mutator.mutate(puts);
mutator.flush();
} catch (IOException e) {
// currently there is not other way to extract that from the HBase exception than string match
if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
throw new NumberFormatException("Attempted to increment a value that is not convertible to long.");
}
throw new DataSetException("Increment failed on table " + tableId, e);
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class HBaseMetricsTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
byte[] distributedKey = createDistributedRowKey(row.getKey());
PutBuilder put = tableUtil.buildPut(distributedKey);
for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), column.getValue());
}
puts.add(put.build());
}
try {
mutator.mutate(puts);
mutator.flush();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.
the class ObjectMappedTableDataset method write.
@WriteOnly
@Override
public void write(byte[] key, T object) {
Put put = new Put(key);
try {
putWriter.write(object, put);
table.put(put);
} catch (IOException e) {
// should never happen
throw new DataSetException("Failed to encode object to be written: " + e.getMessage(), e);
}
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class PartitionedFileSetTest method testUpdateMetadata.
@Test
public void testUpdateMetadata() throws Exception {
final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
PartitionOutput partitionOutput = dataset.getPartitionOutput(PARTITION_KEY);
ImmutableMap<String, String> originalEntries = ImmutableMap.of("key1", "value1", "key2", "value2");
partitionOutput.setMetadata(originalEntries);
partitionOutput.addPartition();
ImmutableMap<String, String> updatedMetadata = ImmutableMap.of("key3", "value3");
dataset.addMetadata(PARTITION_KEY, updatedMetadata);
PartitionDetail partitionDetail = dataset.getPartition(PARTITION_KEY);
Assert.assertNotNull(partitionDetail);
HashMap<String, String> combinedEntries = Maps.newHashMap();
combinedEntries.putAll(originalEntries);
combinedEntries.putAll(updatedMetadata);
Assert.assertEquals(combinedEntries, partitionDetail.getMetadata().asMap());
// using the setMetadata API, adding an entry, for a key that already exists will overwrite the previous value
dataset.setMetadata(PARTITION_KEY, Collections.singletonMap("key3", "value4"));
partitionDetail = dataset.getPartition(PARTITION_KEY);
Assert.assertNotNull(partitionDetail);
Assert.assertEquals(ImmutableMap.of("key1", "value1", "key2", "value2", "key3", "value4"), partitionDetail.getMetadata().asMap());
// adding an entry, for a key that already exists will throw an Exception
try {
dataset.addMetadata(PARTITION_KEY, "key2", "value3");
Assert.fail("Expected not to be able to update an existing metadata entry");
} catch (DataSetException expected) {
}
// possible to remove multiple metadata entries; if a key doesn't exist, no error is thrown
dataset.removeMetadata(PARTITION_KEY, ImmutableSet.of("key2", "key3", "key4"));
// key2 and key3 were removed
partitionDetail = dataset.getPartition(PARTITION_KEY);
Assert.assertNotNull(partitionDetail);
Assert.assertEquals(ImmutableMap.of("key1", "value1"), partitionDetail.getMetadata().asMap());
try {
// adding an entry, for a key that already exists will throw an Exception
PartitionKey nonexistentPartitionKey = PartitionKey.builder().addIntField("i", 42).addLongField("l", 17L).addStringField("s", "nonexistent").build();
dataset.addMetadata(nonexistentPartitionKey, "key2", "value3");
Assert.fail("Expected not to be able to add metadata for a nonexistent partition");
} catch (DataSetException expected) {
}
}
});
}
use of io.cdap.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class DefaultDatasetRuntimeContext method onMethodEntry.
@Override
public void onMethodEntry(boolean constructor, @Nullable Class<? extends Annotation> annotation) {
CallStack callStack = this.callStack.get();
AccessInfo accessInfo = UNKNOWN_ACCESS_INFO;
if (annotation == null && constructor) {
annotation = constructorDefaultAnnotation;
}
if (annotation != null) {
accessInfo = ANNOTATION_TO_ACCESS_INFO.get(annotation);
if (accessInfo == null) {
// shouldn't happen
throw new DataSetException("Unsupported annotation " + annotation + " on dataset " + datasetId);
}
}
// but we won't allow no privilege at all
try {
enforcer.enforce(datasetId, principal, accessInfo.getPermissions());
} catch (Exception e) {
throw new DataSetException("The principal " + principal + " is not authorized to access " + datasetId + " for operation types " + accessInfo.getPermissions(), e);
}
recordAccess(callStack.enter(accessInfo.getAccessType()), accessInfo.getAccessType());
}
Aggregations