use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class HBaseMetricsTable method put.
@Override
public void put(SortedMap<byte[], ? extends SortedMap<byte[], Long>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], Long>> row : updates.entrySet()) {
PutBuilder put = tableUtil.buildPut(row.getKey());
for (Map.Entry<byte[], Long> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), Bytes.toBytes(column.getValue()));
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class HBaseMetricsTable method putBytes.
@Override
public void putBytes(SortedMap<byte[], ? extends SortedMap<byte[], byte[]>> updates) {
List<Put> puts = Lists.newArrayList();
for (Map.Entry<byte[], ? extends SortedMap<byte[], byte[]>> row : updates.entrySet()) {
PutBuilder put = tableUtil.buildPut(row.getKey());
for (Map.Entry<byte[], byte[]> column : row.getValue().entrySet()) {
put.add(columnFamily, column.getKey(), column.getValue());
}
puts.add(put.build());
}
try {
hTable.put(puts);
hTable.flushCommits();
} catch (IOException e) {
throw new DataSetException("Put failed on table " + tableId, e);
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class HBaseMetricsTable method get.
@Override
@Nullable
public byte[] get(byte[] row, byte[] column) {
try {
Get get = tableUtil.buildGet(row).addColumn(columnFamily, column).setMaxVersions(1).build();
Result getResult = hTable.get(get);
if (!getResult.isEmpty()) {
return getResult.getValue(columnFamily, column);
}
return null;
} catch (IOException e) {
throw new DataSetException("Get failed on table " + tableId, e);
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class HBaseMetricsTable method increment.
@Override
public void increment(byte[] row, Map<byte[], Long> increments) {
Put increment = getIncrementalPut(row, increments);
try {
hTable.put(increment);
hTable.flushCommits();
} catch (IOException e) {
// currently there is not other way to extract that from the HBase exception than string match
if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
throw new NumberFormatException("Attempted to increment a value that is not convertible to long," + " row: " + Bytes.toStringBinary(row));
}
throw new DataSetException("Increment failed on table " + tableId, e);
}
}
use of co.cask.cdap.api.dataset.DataSetException in project cdap by caskdata.
the class TimePartitionedFileSetTest method testAddGetPartitions.
@Test
public void testAddGetPartitions() throws Exception {
final TimePartitionedFileSet fileSet = dsFrameworkUtil.getInstance(TPFS_INSTANCE);
TransactionAware txAwareDataset = (TransactionAware) fileSet;
dsFrameworkUtil.newInMemoryTransactionExecutor(txAwareDataset).execute(new TransactionExecutor.Subroutine() {
@Override
public void apply() throws Exception {
// this is an arbitrary data to use as the test time
long time = DATE_FORMAT.parse("12/10/14 5:10 am").getTime();
long time2 = time + HOUR;
String firstPath = "first/partition";
String secondPath = "second/partition";
// make sure the file set has no partitions initially
validateTimePartition(fileSet, time, null);
validateTimePartitions(fileSet, 0L, MAX, Collections.<Long, String>emptyMap());
// add a partition, verify getPartition() works
fileSet.addPartition(time, firstPath);
validateTimePartition(fileSet, time, firstPath);
Map<Long, String> expectNone = Collections.emptyMap();
Map<Long, String> expectFirst = ImmutableMap.of(time, firstPath);
Map<Long, String> expectSecond = ImmutableMap.of(time2, secondPath);
Map<Long, String> expectBoth = ImmutableMap.of(time, firstPath, time2, secondPath);
// verify various ways to list partitions with various ranges
validateTimePartitions(fileSet, time + MINUTE, MAX, expectNone);
validateTimePartitions(fileSet, 0L, time, expectNone);
validateTimePartitions(fileSet, 0L, MAX, expectFirst);
validateTimePartitions(fileSet, 0L, time + MINUTE, expectFirst);
validateTimePartitions(fileSet, 0L, time + MINUTE, expectFirst);
validateTimePartitions(fileSet, 0L, time + HOUR, expectFirst);
validateTimePartitions(fileSet, time - HOUR, time + HOUR, expectFirst);
// add and verify another partition
fileSet.addPartition(time2, secondPath);
validateTimePartition(fileSet, time2, secondPath);
// verify various ways to list partitions with various ranges
validateTimePartitions(fileSet, 0L, MAX, expectBoth);
validateTimePartitions(fileSet, time, time + 30 * MINUTE, expectFirst);
validateTimePartitions(fileSet, time + 30 * MINUTE, time2, expectNone);
validateTimePartitions(fileSet, time + 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
validateTimePartitions(fileSet, time - 30 * MINUTE, time2 + 30 * MINUTE, expectBoth);
// try to add another partition with the same key
try {
fileSet.addPartition(time2, "third/partition");
Assert.fail("Should have thrown Exception for duplicate partition");
} catch (DataSetException e) {
//expected
}
// remove first partition and validate
fileSet.dropPartition(time);
validateTimePartition(fileSet, time, null);
// verify various ways to list partitions with various ranges
validateTimePartitions(fileSet, 0L, MAX, expectSecond);
validateTimePartitions(fileSet, time, time + 30 * MINUTE, expectNone);
validateTimePartitions(fileSet, time + 30 * MINUTE, time2, expectNone);
validateTimePartitions(fileSet, time + 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
validateTimePartitions(fileSet, time - 30 * MINUTE, time2 + 30 * MINUTE, expectSecond);
// try to delete another partition with the same key
try {
fileSet.dropPartition(time);
} catch (DataSetException e) {
Assert.fail("Should not have have thrown Exception for removing non-existent partition");
}
}
});
}
Aggregations