use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestThriftHBaseServiceHandler method testAttribute.
@Test
public void testAttribute() throws Exception {
byte[] rowName = Bytes.toBytes("testAttribute");
byte[] attributeKey = Bytes.toBytes("attribute1");
byte[] attributeValue = Bytes.toBytes("value1");
Map<ByteBuffer, ByteBuffer> attributes = new HashMap<>();
attributes.put(wrap(attributeKey), wrap(attributeValue));
TGet tGet = new TGet(wrap(rowName));
tGet.setAttributes(attributes);
Get get = getFromThrift(tGet);
assertArrayEquals(get.getAttribute("attribute1"), attributeValue);
List<TColumnValue> columnValues = new ArrayList<>(1);
columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
TPut tPut = new TPut(wrap(rowName), columnValues);
tPut.setAttributes(attributes);
Put put = putFromThrift(tPut);
assertArrayEquals(put.getAttribute("attribute1"), attributeValue);
TScan tScan = new TScan();
tScan.setAttributes(attributes);
Scan scan = scanFromThrift(tScan);
assertArrayEquals(scan.getAttribute("attribute1"), attributeValue);
List<TColumnIncrement> incrementColumns = new ArrayList<>(1);
incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns);
tIncrement.setAttributes(attributes);
Increment increment = incrementFromThrift(tIncrement);
assertArrayEquals(increment.getAttribute("attribute1"), attributeValue);
TDelete tDelete = new TDelete(wrap(rowName));
tDelete.setAttributes(attributes);
Delete delete = deleteFromThrift(tDelete);
assertArrayEquals(delete.getAttribute("attribute1"), attributeValue);
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestVisibilityLabels method testLabelsWithIncrement.
@Test
public void testLabelsWithIncrement() throws Throwable {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try (Table table = TEST_UTIL.createTable(tableName, fam)) {
byte[] row1 = Bytes.toBytes("row1");
byte[] val = Bytes.toBytes(1L);
Put put = new Put(row1);
put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, val);
put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL));
table.put(put);
Get get = new Get(row1);
get.setAuthorizations(new Authorizations(SECRET));
Result result = table.get(get);
assertTrue(result.isEmpty());
table.incrementColumnValue(row1, fam, qual, 2L);
result = table.get(get);
assertTrue(result.isEmpty());
Increment increment = new Increment(row1);
increment.addColumn(fam, qual, 2L);
increment.setCellVisibility(new CellVisibility(SECRET));
table.increment(increment);
result = table.get(get);
assertTrue(!result.isEmpty());
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class ThriftHBaseServiceHandler method increment.
@Override
public void increment(TIncrement tincrement) throws IOError, TException {
if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) {
throw new TException("Must supply a table and a row key; can't increment");
}
if (conf.getBoolean(COALESCE_INC_KEY, false)) {
this.coalescer.queueIncrement(tincrement);
return;
}
Table table = null;
try {
table = getTable(tincrement.getTable());
Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
table.increment(inc);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw getIOError(e);
} finally {
closeTable(table);
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestHRegion method testMutateRowInParallel.
@Test
public void testMutateRowInParallel() throws Exception {
final int numReaderThreads = 100;
final CountDownLatch latch = new CountDownLatch(numReaderThreads);
final byte[] row = Bytes.toBytes("row");
final byte[] q1 = Bytes.toBytes("q1");
final byte[] q2 = Bytes.toBytes("q2");
final byte[] q3 = Bytes.toBytes("q3");
final byte[] q4 = Bytes.toBytes("q4");
final String v1 = "v1";
final String v2 = "v2";
// We need to ensure the timestamp of the delete operation is more than the previous one
final AtomicLong deleteTimestamp = new AtomicLong();
region = initHRegion(tableName, method, CONF, fam1);
// Initial values
region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, Bytes.toBytes(1L)).addColumn(fam1, q4, Bytes.toBytes("a")) });
final AtomicReference<AssertionError> assertionError = new AtomicReference<>();
// Writer thread
Thread writerThread = new Thread(() -> {
try {
while (true) {
// If all the reader threads finish, then stop the writer thread
if (latch.await(0, TimeUnit.MILLISECONDS)) {
return;
}
// Execute the mutations. This should be done atomically
region.mutateRow(new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), new Increment(row).addColumn(fam1, q3, 1L), new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))));
// We need to ensure the timestamps of the Increment/Append operations are more than the
// previous ones
Result result = region.get(new Get(row).addColumn(fam1, q3).addColumn(fam1, q4));
long tsIncrement = result.getColumnLatestCell(fam1, q3).getTimestamp();
long tsAppend = result.getColumnLatestCell(fam1, q4).getTimestamp();
// Put the initial values
region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, tsIncrement + 1, Bytes.toBytes(1L)).addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) });
}
} catch (Exception e) {
assertionError.set(new AssertionError(e));
}
});
writerThread.start();
// Reader threads
for (int i = 0; i < numReaderThreads; i++) {
new Thread(() -> {
try {
for (int j = 0; j < 10000; j++) {
// Verify the values
Result result = region.get(new Get(row));
// The values should be equals to either the initial values or the values after
// executing the mutations
String q1Value = Bytes.toString(result.getValue(fam1, q1));
if (v1.equals(q1Value)) {
assertEquals(v2, Bytes.toString(result.getValue(fam1, q2)));
assertEquals(1L, Bytes.toLong(result.getValue(fam1, q3)));
assertEquals("a", Bytes.toString(result.getValue(fam1, q4)));
} else if (v2.equals(q1Value)) {
assertNull(Bytes.toString(result.getValue(fam1, q2)));
assertEquals(2L, Bytes.toLong(result.getValue(fam1, q3)));
assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
} else {
fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + ", but " + q1Value);
}
}
} catch (Exception e) {
assertionError.set(new AssertionError(e));
} catch (AssertionError e) {
assertionError.set(e);
}
latch.countDown();
}).start();
}
writerThread.join();
if (assertionError.get() != null) {
throw assertionError.get();
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestHRegion method testCheckAndMutate_wrongMutationType.
@Test
@Deprecated
public void testCheckAndMutate_wrongMutationType() throws Throwable {
// Setting up region
this.region = initHRegion(tableName, method, CONF, fam1);
try {
region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value1), new Increment(row).addColumn(fam1, qual1, 1));
fail("should throw DoNotRetryIOException");
} catch (DoNotRetryIOException e) {
assertEquals("Unsupported mutate type: INCREMENT", e.getMessage());
}
try {
region.checkAndMutate(row, new SingleColumnValueFilter(fam1, qual1, CompareOperator.EQUAL, value1), new Increment(row).addColumn(fam1, qual1, 1));
fail("should throw DoNotRetryIOException");
} catch (DoNotRetryIOException e) {
assertEquals("Unsupported mutate type: INCREMENT", e.getMessage());
}
}
Aggregations