Search in sources :

Example 1 with DataSetException

use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class HBaseMetricsTable method put.

@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
        PutBuilder put = tableUtil.buildPut(row.getKey());
        for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 2 with DataSetException

use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class HBaseMetricsTable method putBytes.

@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
    List<Put> puts = Lists.newArrayList();
    for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
        PutBuilder put = tableUtil.buildPut(row.getKey());
        for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
            put.add(columnFamily, column.getKey(), column.getValue());
        }
        puts.add(put.build());
    }
    try {
        hTable.put(puts);
        hTable.flushCommits();
    } catch (IOException e) {
        throw new DataSetException("Put failed on table " + tableId, e);
    }
}
Also used : PutBuilder(co.cask.cdap.data2.util.hbase.PutBuilder) DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Map(java.util.Map) NavigableMap(java.util.NavigableMap) SortedMap(java.util.SortedMap) Put(org.apache.hadoop.hbase.client.Put)

Example 3 with DataSetException

use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class HBaseMetricsTable method get.

@Override
@Nullable
public byte[] get(byte[] row, byte[] column) {
    try {
        Get get = tableUtil.buildGet(row).addColumn(columnFamily, column).setMaxVersions(1).build();
        Result getResult = hTable.get(get);
        if (!getResult.isEmpty()) {
            return getResult.getValue(columnFamily, column);
        }
        return null;
    } catch (IOException e) {
        throw new DataSetException("Get failed on table " + tableId, e);
    }
}
Also used : DataSetException(co.cask.cdap.api.dataset.DataSetException) Get(org.apache.hadoop.hbase.client.Get) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) Nullable(javax.annotation.Nullable)

Example 4 with DataSetException

use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class HBaseMetricsTable method increment.

@Override
public void increment(byte[] row, Map<byte[], Long> increments) {
    Put increment = getIncrementalPut(row, increments);
    try {
        hTable.put(increment);
        hTable.flushCommits();
    } catch (IOException e) {
        // currently there is not other way to extract that from the HBase exception than string match
        if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
            throw new NumberFormatException("Attempted to increment a value that is not convertible to long," + " row: " + Bytes.toStringBinary(row));
        }
        throw new DataSetException("Increment failed on table " + tableId, e);
    }
}
Also used : DataSetException(co.cask.cdap.api.dataset.DataSetException) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put)

Example 5 with DataSetException

use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.

the class TimePartitionedFileSetTest method testAddGetPartitions.

@Test
public void testAddGetPartitions() throws Exception {
    final TimePartitionedFileSet fileSet = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
    TransactionAware txAwareDataset = (TransactionAware) fileSet;
    dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {

        @Override
        public void apply() throws Exception {
            // this is an arbitrary data to use as the test time
            long time = DATE_FORMAT.parse("12/10/14 5:10 am").getTime();
            long time2 = time + HOUR;
            String firstPath = "first/partition";
            String secondPath = "second/partition";
            // make sure the file set has no partitions initially
            validateTimePartition(fileSet, time, null);
            validateTimePartitions(fileSet, 0L, MAX, Collections.<Long, String>emptyMap());
            // add a partition, verify getPartition() works
            fileSet.addPartition(time, firstPath);
            validateTimePartition(fileSet, time, firstPath);
            Map<Long, String> expectNone = Collections.emptyMap();
            Map<Long, String> expectFirst = ImmutableMap.of(time, firstPath);
            Map<Long, String> expectSecond = ImmutableMap.of(time2, secondPath);
            Map<Long, String> expectBoth = ImmutableMap.of(time, firstPath, time2, secondPath);
            // verify various ways to list partitions with various ranges
            validateTimePartitions(fileSet, time + MINUTE, MAX, expectNone);
            validateTimePartitions(fileSet, 0L, time, expectNone);
            validateTimePartitions(fileSet, 0L, MAX, expectFirst);
            validateTimePartitions(fileSet, 0L, time + MINUTE, expectFirst);
            validateTimePartitions(fileSet, 0L, time + MINUTE, expectFirst);
            validateTimePartitions(fileSet, 0L, time + HOUR, expectFirst);
            validateTimePartitions(fileSet, time - HOUR, time + HOUR, expectFirst);
            // add and verify another partition
            fileSet.addPartition(time2, secondPath);
            validateTimePartition(fileSet, time2, secondPath);
            // verify various ways to list partitions with various ranges
            validateTimePartitions(fileSet, 0L, MAX, expectBoth);
            validateTimePartitions(fileSet, time, time + 30 * MINUTE, expectFirst);
            validateTimePartitions(fileSet, time + 30 * MINUTE, time2, expectNone);
            validateTimePartitions(fileSet, time + 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
            validateTimePartitions(fileSet, time - 30 * MINUTE, time2 + 30 * MINUTE, expectBoth);
            // try to add another partition with the same key
            try {
                fileSet.addPartition(time2, "third/partition");
                Assert.fail("Should have thrown Exception for duplicate partition");
            } catch (DataSetException e) {
            //expected
            }
            // remove first partition and validate
            fileSet.dropPartition(time);
            validateTimePartition(fileSet, time, null);
            // verify various ways to list partitions with various ranges
            validateTimePartitions(fileSet, 0L, MAX, expectSecond);
            validateTimePartitions(fileSet, time, time + 30 * MINUTE, expectNone);
            validateTimePartitions(fileSet, time + 30 * MINUTE, time2, expectNone);
            validateTimePartitions(fileSet, time + 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
            validateTimePartitions(fileSet, time - 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
            // try to delete  another partition with the same key
            try {
                fileSet.dropPartition(time);
            } catch (DataSetException e) {
                Assert.fail("Should not have have thrown Exception for removing non-existent partition");
            }
        }
    });
}
Also used : DataSetException(co.cask.cdap.api.dataset.DataSetException) TransactionAware(org.apache.tephra.TransactionAware) TransactionExecutor(org.apache.tephra.TransactionExecutor) TimePartitionedFileSet(co.cask.cdap.api.dataset.lib.TimePartitionedFileSet) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) TransactionFailureException(org.apache.tephra.TransactionFailureException) DatasetManagementException(co.cask.cdap.api.dataset.DatasetManagementException) IOException(java.io.IOException) DataSetException(co.cask.cdap.api.dataset.DataSetException) Test(org.junit.Test)

Aggregations

DataSetException (co.cask.cdap.api.dataset.DataSetException)35 IOException (java.io.IOException)26 Map (java.util.Map)8 ReadOnly (co.cask.cdap.api.annotation.ReadOnly)6 PartitionKey (co.cask.cdap.api.dataset.lib.PartitionKey)5 Result (co.cask.cdap.api.dataset.table.Result)5 ImmutableMap (com.google.common.collect.ImmutableMap)5 TransactionFailureException (org.apache.tephra.TransactionFailureException)5 WriteOnly (co.cask.cdap.api.annotation.WriteOnly)4 TimePartitionedFileSet (co.cask.cdap.api.dataset.lib.TimePartitionedFileSet)4 Put (co.cask.cdap.api.dataset.table.Put)4 HashMap (java.util.HashMap)4 NavigableMap (java.util.NavigableMap)4 Location (org.apache.twill.filesystem.Location)4 Test (org.junit.Test)4 PartitionNotFoundException (co.cask.cdap.api.dataset.PartitionNotFoundException)3 Row (co.cask.cdap.api.dataset.table.Row)3 DatasetId (co.cask.cdap.proto.id.DatasetId)3 Put (org.apache.hadoop.hbase.client.Put)3 TransactionAware (org.apache.tephra.TransactionAware)3