Search in sources :

Example 31 with DataSetException

use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.

the class HBaseMetricsTable method increment.

@Override
public void increment(NavigableMap<byte[], NavigableMap<byte[], Long>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], NavigableMap<byte[], Long>> row : updates.entrySet()) {
        byte[] distributedKey = createDistributedRowKey(row.getKey());
        Put increment = getIncrementalPut(distributedKey, row.getValue());
        puts.add(increment);
    }
    try {
        mutator.mutate(puts);
        mutator.flush();
    } catch (IOException e) {
        // currently there is not other way to extract that from the HBase exception than string match
        if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
            throw new NumberFormatException("Attempted to increment a value that is not convertible to long.");
        }
        throw new DataSetException("Increment failed on table " + tableId, e);
    }
}
Also used : NavigableMap(java.util.NavigableMap) DataSetException(io.cdap.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 32 with DataSetException

use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.

the class HBaseMetricsTable method putBytes.

@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
        byte[] distributedKey = createDistributedRowKey(row.getKey());
        PutBuilder put = tableUtil.buildPut(distributedKey);
        for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), column.getValue());
        }
        puts.add(put.build());
    }
    try {
        mutator.mutate(puts);
        mutator.flush();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(io.cdap.cdap.data2.util.hbase.PutBuilder) DataSetException(io.cdap.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 33 with DataSetException

use of io.cdap.cdap.api.dataset.DataSetException in project cdap by cdapio.

the class ObjectMappedTableDataset method write.

@WriteOnly
@Override
public void write(byte[] key, T object) {
    Put put = new Put(key);
    try {
        putWriter.write(object, put);
        table.put(put);
    } catch (IOException e) {
        // should never happen
        throw new DataSetException("Failed to encode object to be written: " + e.getMessage(), e);
    }
}
Also used : DataSetException(io.cdap.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Put(io.cdap.cdap.api.dataset.table.Put) WriteOnly(io.cdap.cdap.api.annotation.WriteOnly)

Example 34 with DataSetException

use of io.cdap.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class PartitionedFileSetTest method testUpdateMetadata.

@Test
public void testUpdateMetadata() throws Exception {
    final PartitionedFileSet dataset = dsFrameworkUtil.getInstance(pfsInstance);
    dsFrameworkUtil.newTransactionExecutor((TransactionAware) dataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            PartitionOutput partitionOutput = dataset.getPartitionOutput(PARTITION_KEY);
            ImmutableMap<String, String> originalEntries = ImmutableMap.of("key1", "value1", "key2", "value2");
            partitionOutput.setMetadata(originalEntries);
            partitionOutput.addPartition();
            ImmutableMap<String, String> updatedMetadata = ImmutableMap.of("key3", "value3");
            dataset.addMetadata(PARTITION_KEY, updatedMetadata);
            PartitionDetail partitionDetail = dataset.getPartition(PARTITION_KEY);
            Assert.assertNotNull(partitionDetail);
            HashMap<String, String> combinedEntries = Maps.newHashMap();
            combinedEntries.putAll(originalEntries);
            combinedEntries.putAll(updatedMetadata);
            Assert.assertEquals(combinedEntries, partitionDetail.getMetadata().asMap());
            // using the setMetadata API, adding an entry, for a key that already exists will overwrite the previous value
            dataset.setMetadata(PARTITION_KEY, Collections.singletonMap("key3", "value4"));
            partitionDetail = dataset.getPartition(PARTITION_KEY);
            Assert.assertNotNull(partitionDetail);
            Assert.assertEquals(ImmutableMap.of("key1", "value1", "key2", "value2", "key3", "value4"), partitionDetail.getMetadata().asMap());
            // adding an entry, for a key that already exists will throw an Exception
            try {
                dataset.addMetadata(PARTITION_KEY, "key2", "value3");
                Assert.fail("Expected not to be able to update an existing metadata entry");
            } catch (DataSetException expected) {
            }
            // possible to remove multiple metadata entries; if a key doesn't exist, no error is thrown
            dataset.removeMetadata(PARTITION_KEY, ImmutableSet.of("key2", "key3", "key4"));
            // key2 and key3 were removed
            partitionDetail = dataset.getPartition(PARTITION_KEY);
            Assert.assertNotNull(partitionDetail);
            Assert.assertEquals(ImmutableMap.of("key1", "value1"), partitionDetail.getMetadata().asMap());
            try {
                // adding an entry, for a key that already exists will throw an Exception
                PartitionKey nonexistentPartitionKey = PartitionKey.builder().addIntField("i", 42).addLongField("l", 17L).addStringField("s", "nonexistent").build();
                dataset.addMetadata(nonexistentPartitionKey, "key2", "value3");
                Assert.fail("Expected not to be able to add metadata for a nonexistent partition");
            } catch (DataSetException expected) {
            }
        }
    });
}
Also used : DataSetException(io.cdap.cdap.api.dataset.DataSetException) PartitionOutput(io.cdap.cdap.api.dataset.lib.PartitionOutput) HashMap(java.util.HashMap) TransactionAware(org.apache.tephra.TransactionAware) PartitionKey(io.cdap.cdap.api.dataset.lib.PartitionKey) PartitionedFileSet(io.cdap.cdap.api.dataset.lib.PartitionedFileSet) TransactionExecutor(org.apache.tephra.TransactionExecutor) PartitionDetail(io.cdap.cdap.api.dataset.lib.PartitionDetail) DataSetException(io.cdap.cdap.api.dataset.DataSetException) PartitionNotFoundException(io.cdap.cdap.api.dataset.PartitionNotFoundException) PartitionAlreadyExistsException(io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException) IOException(java.io.IOException) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 35 with DataSetException

use of io.cdap.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class DefaultDatasetRuntimeContext method onMethodEntry.

@Override
public void onMethodEntry(boolean constructor, @Nullable Class<? extends Annotation> annotation) {
    CallStack callStack = this.callStack.get();
    AccessInfo accessInfo = UNKNOWN_ACCESS_INFO;
    if (annotation == null && constructor) {
        annotation = constructorDefaultAnnotation;
    }
    if (annotation != null) {
        accessInfo = ANNOTATION_TO_ACCESS_INFO.get(annotation);
        if (accessInfo == null) {
            // shouldn't happen
            throw new DataSetException("Unsupported annotation " + annotation + " on dataset " + datasetId);
        }
    }
    // but we won't allow no privilege at all
    try {
        enforcer.enforce(datasetId, principal, accessInfo.getPermissions());
    } catch (Exception e) {
        throw new DataSetException("The principal " + principal + " is not authorized to access " + datasetId + " for operation types " + accessInfo.getPermissions(), e);
    }
    recordAccess(callStack.enter(accessInfo.getAccessType()), accessInfo.getAccessType());
}
Also used : DataSetException(io.cdap.cdap.api.dataset.DataSetException) DataSetException(io.cdap.cdap.api.dataset.DataSetException)

Aggregations

DataSetException (io.cdap.cdap.api.dataset.DataSetException)74 IOException (java.io.IOException)54 ReadOnly (io.cdap.cdap.api.annotation.ReadOnly)14 Map (java.util.Map)12 TransactionFailureException (org.apache.tephra.TransactionFailureException)12 Location (org.apache.twill.filesystem.Location)12 PartitionKey (io.cdap.cdap.api.dataset.lib.PartitionKey)10 Result (io.cdap.cdap.api.dataset.table.Result)10 NavigableMap (java.util.NavigableMap)10 Test (org.junit.Test)10 PartitionAlreadyExistsException (io.cdap.cdap.api.dataset.lib.PartitionAlreadyExistsException)8 TimePartitionedFileSet (io.cdap.cdap.api.dataset.lib.TimePartitionedFileSet)8 Put (org.apache.hadoop.hbase.client.Put)8 ImmutableMap (com.google.common.collect.ImmutableMap)6 WriteOnly (io.cdap.cdap.api.annotation.WriteOnly)6 DatasetManagementException (io.cdap.cdap.api.dataset.DatasetManagementException)6 PartitionedFileSet (io.cdap.cdap.api.dataset.lib.PartitionedFileSet)6 Put (io.cdap.cdap.api.dataset.table.Put)6 Row (io.cdap.cdap.api.dataset.table.Row)6 UnauthorizedException (io.cdap.cdap.security.spi.authorization.UnauthorizedException)6