use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.
the class KeyValueTable method readAll.
/**
* Reads the values for an array of given keys.
*
* @param keys the keys to be read
* @return a map of the stored values, keyed by key
*/
@ReadOnly
public Map<byte[], byte[]> readAll(byte[][] keys) {
List<Get> gets = new ArrayList<>(keys.length);
for (byte[] key : keys) {
gets.add(new Get(key).add(KEY_COLUMN));
}
List<Row> results = table.get(gets);
Map<byte[], byte[]> values = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for (Row row : results) {
if (row.get(KEY_COLUMN) != null) {
values.put(row.getRow(), row.get(KEY_COLUMN));
}
}
return values;
}
use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.
the class TableTest method testMetrics.
private void testMetrics(boolean readless) throws Exception {
final String tableName = "survive";
DatasetProperties props = TableProperties.builder().setReadlessIncrementSupport(readless).build();
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName, props);
admin.create();
Table table = getTable(CONTEXT1, tableName, props);
final Map<String, Long> metrics = Maps.newHashMap();
((MeteredDataset) table).setMetricsCollector(new MetricsCollector() {
@Override
public void increment(String metricName, long value) {
Long old = metrics.get(metricName);
metrics.put(metricName, old == null ? value : old + value);
}
@Override
public void gauge(String metricName, long value) {
metrics.put(metricName, value);
}
});
// Note that we don't need to finish tx for metrics to be reported
Transaction tx0 = txClient.startShort();
((TransactionAware) table).startTx(tx0);
int writes = 0;
int reads = 0;
table.put(new Put(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, ++writes, ++reads);
// note: will not write anything as expected value will not match
table.compareAndSwap(R1, C1, V1, V2);
verifyDatasetMetrics(metrics, writes, ++reads);
table.increment(new Increment(R2, C2, 1L));
if (readless) {
verifyDatasetMetrics(metrics, ++writes, reads);
} else {
verifyDatasetMetrics(metrics, ++writes, ++reads);
}
table.incrementAndGet(new Increment(R2, C2, 1L));
verifyDatasetMetrics(metrics, ++writes, ++reads);
table.get(new Get(R1, C1, V1));
verifyDatasetMetrics(metrics, writes, ++reads);
Scanner scanner = table.scan(new Scan(null, null));
while (scanner.next() != null) {
verifyDatasetMetrics(metrics, writes, ++reads);
}
table.delete(new Delete(R1, C1, V1));
verifyDatasetMetrics(metrics, ++writes, reads);
// drop table
admin.drop();
}
use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.
the class TableTest method testBatchWritableKeyIsIgnored.
@Test
public void testBatchWritableKeyIsIgnored() throws Exception {
String tableName = "batchWritableTable";
getTableAdmin(CONTEXT1, tableName).create();
try {
// write in a transaction, three times, with key = null, a, q, always Put with row = a
Transaction tx = txClient.startShort();
Table table = getTable(CONTEXT1, tableName);
((TransactionAware) table).startTx(tx);
table.write(null, new Put("a").add("x", "x"));
table.write(new byte[] { 'q' }, new Put("a").add("y", "y"));
table.write(new byte[] { 'a' }, new Put("a").add("z", "z"));
Assert.assertTrue(txClient.canCommit(tx, ((TransactionAware) table).getTxChanges()));
((TransactionAware) table).commitTx();
Assert.assertTrue(txClient.commit(tx));
// validate that all writes went to row a, and row q was not written
tx = txClient.startShort();
((TransactionAware) table).startTx(tx);
Assert.assertTrue(table.get(new Get("q")).isEmpty());
Row row = table.get(new Get("a"));
Assert.assertEquals(3, row.getColumns().size());
Assert.assertEquals("x", row.getString("x"));
Assert.assertEquals("y", row.getString("y"));
Assert.assertEquals("z", row.getString("z"));
((TransactionAware) table).commitTx();
txClient.abort(tx);
} finally {
getTableAdmin(CONTEXT1, tableName).drop();
}
}
use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.
the class TableTest method testReadOwnWrite.
@Test
public void testReadOwnWrite() throws Exception {
final String tableName = "readOwnWrite";
DatasetAdmin admin = getTableAdmin(CONTEXT1, tableName);
admin.create();
Table table = getTable(CONTEXT1, tableName);
Transaction tx = txClient.startShort();
try {
((TransactionAware) table).startTx(tx);
// Write some data, then flush it by calling commitTx.
table.put(new Put(R1, C1, V1));
((TransactionAware) table).commitTx();
// Try to read the previous write.
Assert.assertArrayEquals(V1, table.get(new Get(R1, C1)).get(C1));
} finally {
txClient.commit(tx);
}
// drop table
admin.drop();
}
use of co.cask.cdap.api.dataset.table.Get in project cdap by caskdata.
the class TableTest method testMultiGetWithEmpty.
@Test
public void testMultiGetWithEmpty() throws Exception {
DatasetAdmin admin = getTableAdmin(CONTEXT1, MY_TABLE);
admin.create();
try {
Transaction tx = txClient.startShort();
Table myTable = getTable(CONTEXT1, MY_TABLE);
((TransactionAware) myTable).startTx(tx);
myTable.put(R1, C1, V1);
myTable.put(R1, C2, V2);
myTable.put(R1, C3, V3);
myTable.put(R1, C4, V4);
List<Get> gets = new ArrayList<>();
// the second and fourth Gets are requesting 0 columns. This tests correctness of batch-get logic, when there
// is/are empty Gets among them.
gets.add(new Get(R1, C1));
gets.add(new Get(R1, ImmutableList.<byte[]>of()));
gets.add(new Get(R1, C2, C3));
gets.add(new Get(R1, ImmutableList.<byte[]>of()));
gets.add(new Get(R1, C4));
List<Row> rows = myTable.get(gets);
// first off, the Gets at index two and four should be empty
Assert.assertEquals(0, rows.get(1).getColumns().size());
Assert.assertEquals(0, rows.get(3).getColumns().size());
// verify the results of the other Gets
Assert.assertEquals(1, rows.get(0).getColumns().size());
Assert.assertArrayEquals(V1, rows.get(0).get(C1));
Assert.assertEquals(2, rows.get(2).getColumns().size());
Assert.assertArrayEquals(V2, rows.get(2).get(C2));
Assert.assertArrayEquals(V3, rows.get(2).get(C3));
Assert.assertEquals(1, rows.get(4).getColumns().size());
Assert.assertArrayEquals(V4, rows.get(4).get(C4));
txClient.abort(tx);
} finally {
admin.drop();
}
}
Aggregations