use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class MultiThreadedUpdater method mutate.
public void mutate(Table table, Mutation m, long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
long start = EnvironmentEdgeManager.currentTime();
try {
m = dataGenerator.beforeMutate(keyBase, m);
if (m instanceof Increment) {
table.increment((Increment) m);
} else if (m instanceof Append) {
table.append((Append) m);
} else if (m instanceof Put) {
table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m);
} else if (m instanceof Delete) {
table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m);
} else {
throw new IllegalArgumentException("unsupported mutation " + m.getClass().getSimpleName());
}
totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start);
} catch (IOException e) {
failedKeySet.add(keyBase);
String exceptionInfo;
if (e instanceof RetriesExhaustedWithDetailsException) {
RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
exceptionInfo = aggEx.getExhaustiveDescription();
} else {
StringWriter stackWriter = new StringWriter();
PrintWriter pw = new PrintWriter(stackWriter);
e.printStackTrace(pw);
pw.flush();
exceptionInfo = StringUtils.stringifyException(e);
}
LOG.error("Failed to mutate: " + keyBase + " after " + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo);
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestAtomicOperation method testIncrementWithNonExistingFamily.
@Test
public void testIncrementWithNonExistingFamily() throws IOException {
initHRegion(tableName, name.getMethodName(), fam1);
final Increment inc = new Increment(row);
inc.addColumn(fam1, qual1, 1);
inc.addColumn(fam2, qual2, 1);
inc.setDurability(Durability.ASYNC_WAL);
try {
region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
} catch (NoSuchColumnFamilyException e) {
final Get g = new Get(row);
final Result result = region.get(g);
assertEquals(null, result.getValue(fam1, qual1));
assertEquals(null, result.getValue(fam2, qual2));
} catch (Exception e) {
fail("Increment operation should fail with NoSuchColumnFamilyException.");
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestDurability method testIncrement.
@Test
public void testIncrement() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
byte[] col2 = Bytes.toBytes("col2");
byte[] col3 = Bytes.toBytes("col3");
// Setting up region
WALFactory wals = new WALFactory(CONF, ServerName.valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime()).toString());
HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
WAL wal = region.getWAL();
// col1: amount = 0, 1 write back to WAL
Increment inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
Result res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 1);
// col1: amount = 1, 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 1);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 2);
// col1: amount = 0, 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
res = region.increment(inc1);
assertEquals(1, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
verifyWALCount(wals, wal, 3);
// col1: amount = 0, col2: amount = 0, col3: amount = 0
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 0);
inc1.addColumn(FAMILY, col2, 0);
inc1.addColumn(FAMILY, col3, 0);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 4);
// col1: amount = 5, col2: amount = 4, col3: amount = 3
// 1 write back to WAL
inc1 = new Increment(row1);
inc1.addColumn(FAMILY, col1, 5);
inc1.addColumn(FAMILY, col2, 4);
inc1.addColumn(FAMILY, col3, 3);
res = region.increment(inc1);
assertEquals(3, res.size());
assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
verifyWALCount(wals, wal, 5);
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestDurability method testIncrementWithReturnResultsSetToFalse.
/**
* Test when returnResults set to false in increment it should not return the result instead it
* resturn null.
*/
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
byte[] row1 = Bytes.toBytes("row1");
byte[] col1 = Bytes.toBytes("col1");
// Setting up region
WALFactory wals = new WALFactory(CONF, ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010, EnvironmentEdgeManager.currentTime()).toString());
HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
Increment inc1 = new Increment(row1);
inc1.setReturnResults(false);
inc1.addColumn(FAMILY, col1, 1);
Result res = region.increment(inc1);
assertTrue(res.isEmpty());
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class ThriftUtilities method incrementFromThrift.
/**
* From a {@link TIncrement} create an {@link Increment}.
* @param tincrement the Thrift version of an increment
* @return an increment that the {@link TIncrement} represented.
*/
public static Increment incrementFromThrift(TIncrement tincrement) {
Increment inc = new Increment(tincrement.getRow());
byte[][] famAndQf = CellUtil.parseColumn(tincrement.getColumn());
if (famAndQf.length != 2) {
return null;
}
inc.addColumn(famAndQf[0], famAndQf[1], tincrement.getAmmount());
return inc;
}
Aggregations