Search in sources :

Example 21 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class MultiThreadedUpdater method mutate.

public void mutate(Table table, Mutation m, long keyBase, byte[] row, byte[] cf, byte[] q, byte[] v) {
    long start = EnvironmentEdgeManager.currentTime();
    try {
        m = dataGenerator.beforeMutate(keyBase, m);
        if (m instanceof Increment) {
            table.increment((Increment) m);
        } else if (m instanceof Append) {
            table.append((Append) m);
        } else if (m instanceof Put) {
            table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenPut((Put) m);
        } else if (m instanceof Delete) {
            table.checkAndMutate(row, cf).qualifier(q).ifEquals(v).thenDelete((Delete) m);
        } else {
            throw new IllegalArgumentException("unsupported mutation " + m.getClass().getSimpleName());
        }
        totalOpTimeMs.addAndGet(EnvironmentEdgeManager.currentTime() - start);
    } catch (IOException e) {
        failedKeySet.add(keyBase);
        String exceptionInfo;
        if (e instanceof RetriesExhaustedWithDetailsException) {
            RetriesExhaustedWithDetailsException aggEx = (RetriesExhaustedWithDetailsException) e;
            exceptionInfo = aggEx.getExhaustiveDescription();
        } else {
            StringWriter stackWriter = new StringWriter();
            PrintWriter pw = new PrintWriter(stackWriter);
            e.printStackTrace(pw);
            pw.flush();
            exceptionInfo = StringUtils.stringifyException(e);
        }
        LOG.error("Failed to mutate: " + keyBase + " after " + (EnvironmentEdgeManager.currentTime() - start) + "ms; region information: " + getRegionDebugInfoSafe(table, m.getRow()) + "; errors: " + exceptionInfo);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Append(org.apache.hadoop.hbase.client.Append) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) StringWriter(java.io.StringWriter) Increment(org.apache.hadoop.hbase.client.Increment) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) PrintWriter(java.io.PrintWriter)

Example 22 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestAtomicOperation method testIncrementWithNonExistingFamily.

@Test
public void testIncrementWithNonExistingFamily() throws IOException {
    initHRegion(tableName, name.getMethodName(), fam1);
    final Increment inc = new Increment(row);
    inc.addColumn(fam1, qual1, 1);
    inc.addColumn(fam2, qual2, 1);
    inc.setDurability(Durability.ASYNC_WAL);
    try {
        region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE);
    } catch (NoSuchColumnFamilyException e) {
        final Get g = new Get(row);
        final Result result = region.get(g);
        assertEquals(null, result.getValue(fam1, qual1));
        assertEquals(null, result.getValue(fam2, qual2));
    } catch (Exception e) {
        fail("Increment operation should fail with NoSuchColumnFamilyException.");
    }
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 23 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestDurability method testIncrement.

@Test
public void testIncrement() throws Exception {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    byte[] col2 = Bytes.toBytes("col2");
    byte[] col3 = Bytes.toBytes("col3");
    // Setting up region
    WALFactory wals = new WALFactory(CONF, ServerName.valueOf("TestIncrement", 16010, EnvironmentEdgeManager.currentTime()).toString());
    HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
    WAL wal = region.getWAL();
    // col1: amount = 0, 1 write back to WAL
    Increment inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    Result res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 1);
    // col1: amount = 1, 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 1);
    res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 2);
    // col1: amount = 0, 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    res = region.increment(inc1);
    assertEquals(1, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    verifyWALCount(wals, wal, 3);
    // col1: amount = 0, col2: amount = 0, col3: amount = 0
    // 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 0);
    inc1.addColumn(FAMILY, col2, 0);
    inc1.addColumn(FAMILY, col3, 0);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(1, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(0, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 4);
    // col1: amount = 5, col2: amount = 4, col3: amount = 3
    // 1 write back to WAL
    inc1 = new Increment(row1);
    inc1.addColumn(FAMILY, col1, 5);
    inc1.addColumn(FAMILY, col2, 4);
    inc1.addColumn(FAMILY, col3, 3);
    res = region.increment(inc1);
    assertEquals(3, res.size());
    assertEquals(6, Bytes.toLong(res.getValue(FAMILY, col1)));
    assertEquals(4, Bytes.toLong(res.getValue(FAMILY, col2)));
    assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3)));
    verifyWALCount(wals, wal, 5);
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) WAL(org.apache.hadoop.hbase.wal.WAL) Increment(org.apache.hadoop.hbase.client.Increment) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 24 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestDurability method testIncrementWithReturnResultsSetToFalse.

/**
 * Test when returnResults set to false in increment it should not return the result instead it
 * resturn null.
 */
@Test
public void testIncrementWithReturnResultsSetToFalse() throws Exception {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    // Setting up region
    WALFactory wals = new WALFactory(CONF, ServerName.valueOf("testIncrementWithReturnResultsSetToFalse", 16010, EnvironmentEdgeManager.currentTime()).toString());
    HRegion region = createHRegion(wals, Durability.USE_DEFAULT);
    Increment inc1 = new Increment(row1);
    inc1.setReturnResults(false);
    inc1.addColumn(FAMILY, col1, 1);
    Result res = region.increment(inc1);
    assertTrue(res.isEmpty());
}
Also used : HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Increment(org.apache.hadoop.hbase.client.Increment) WALFactory(org.apache.hadoop.hbase.wal.WALFactory) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 25 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class ThriftUtilities method incrementFromThrift.

/**
 * From a {@link TIncrement} create an {@link Increment}.
 * @param tincrement the Thrift version of an increment
 * @return an increment that the {@link TIncrement} represented.
 */
public static Increment incrementFromThrift(TIncrement tincrement) {
    Increment inc = new Increment(tincrement.getRow());
    byte[][] famAndQf = CellUtil.parseColumn(tincrement.getColumn());
    if (famAndQf.length != 2) {
        return null;
    }
    inc.addColumn(famAndQf[0], famAndQf[1], tincrement.getAmmount());
    return inc;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) TIncrement(org.apache.hadoop.hbase.thrift.generated.TIncrement)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5