Search in sources :

Example 56 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class ThriftUtilities method appendFromThrift.

/**
 * From a {@link TAppend} create an {@link Append}.
 * @param tappend the Thrift version of an append.
 * @return an increment that the {@link TAppend} represented.
 */
public static Append appendFromThrift(TAppend tappend) {
    Append append = new Append(tappend.getRow());
    List<ByteBuffer> columns = tappend.getColumns();
    List<ByteBuffer> values = tappend.getValues();
    if (columns.size() != values.size()) {
        throw new IllegalArgumentException("Sizes of columns and values in tappend object are not matching");
    }
    int length = columns.size();
    for (int i = 0; i < length; i++) {
        byte[][] famAndQf = CellUtil.parseColumn(getBytes(columns.get(i)));
        append.addColumn(famAndQf[0], famAndQf[1], getBytes(values.get(i)));
    }
    return append;
}
Also used : TAppend(org.apache.hadoop.hbase.thrift.generated.TAppend) Append(org.apache.hadoop.hbase.client.Append) ByteBuffer(java.nio.ByteBuffer)

Example 57 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestHRegion method testMutateRowInParallel.

@Test
public void testMutateRowInParallel() throws Exception {
    final int numReaderThreads = 100;
    final CountDownLatch latch = new CountDownLatch(numReaderThreads);
    final byte[] row = Bytes.toBytes("row");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    final String v1 = "v1";
    final String v2 = "v2";
    // We need to ensure the timestamp of the delete operation is more than the previous one
    final AtomicLong deleteTimestamp = new AtomicLong();
    region = initHRegion(tableName, method, CONF, fam1);
    // Initial values
    region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, Bytes.toBytes(1L)).addColumn(fam1, q4, Bytes.toBytes("a")) });
    final AtomicReference<AssertionError> assertionError = new AtomicReference<>();
    // Writer thread
    Thread writerThread = new Thread(() -> {
        try {
            while (true) {
                // If all the reader threads finish, then stop the writer thread
                if (latch.await(0, TimeUnit.MILLISECONDS)) {
                    return;
                }
                // Execute the mutations. This should be done atomically
                region.mutateRow(new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), new Increment(row).addColumn(fam1, q3, 1L), new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))));
                // We need to ensure the timestamps of the Increment/Append operations are more than the
                // previous ones
                Result result = region.get(new Get(row).addColumn(fam1, q3).addColumn(fam1, q4));
                long tsIncrement = result.getColumnLatestCell(fam1, q3).getTimestamp();
                long tsAppend = result.getColumnLatestCell(fam1, q4).getTimestamp();
                // Put the initial values
                region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, tsIncrement + 1, Bytes.toBytes(1L)).addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) });
            }
        } catch (Exception e) {
            assertionError.set(new AssertionError(e));
        }
    });
    writerThread.start();
    // Reader threads
    for (int i = 0; i < numReaderThreads; i++) {
        new Thread(() -> {
            try {
                for (int j = 0; j < 10000; j++) {
                    // Verify the values
                    Result result = region.get(new Get(row));
                    // The values should be equals to either the initial values or the values after
                    // executing the mutations
                    String q1Value = Bytes.toString(result.getValue(fam1, q1));
                    if (v1.equals(q1Value)) {
                        assertEquals(v2, Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(1L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("a", Bytes.toString(result.getValue(fam1, q4)));
                    } else if (v2.equals(q1Value)) {
                        assertNull(Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(2L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
                    } else {
                        fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + ", but " + q1Value);
                    }
                }
            } catch (Exception e) {
                assertionError.set(new AssertionError(e));
            } catch (AssertionError e) {
                assertionError.set(e);
            }
            latch.countDown();
        }).start();
    }
    writerThread.join();
    if (assertionError.get() != null) {
        throw assertionError.get();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ExpectedException(org.junit.rules.ExpectedException) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread) TestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread) RowMutations(org.apache.hadoop.hbase.client.RowMutations) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) AtomicLong(java.util.concurrent.atomic.AtomicLong) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 58 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestHRegion method testCheckAndAppend.

@Test
public void testCheckAndAppend() throws Throwable {
    final byte[] FAMILY = Bytes.toBytes("fam");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, FAMILY);
    region.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")));
    // CheckAndAppend with correct value
    CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("a")).build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))));
    assertTrue(res.isSuccess());
    assertEquals("b", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B"))));
    Result result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B")));
    assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))));
    // CheckAndAppend with wrong value
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row).ifEquals(FAMILY, Bytes.toBytes("A"), Bytes.toBytes("b")).build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b"))));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
    result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B")));
    assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))));
    region.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")));
    // CheckAndAppend with a filter and correct value
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row).ifMatches(new FilterList(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a")), new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, Bytes.toBytes("c")))).build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb"))));
    assertTrue(res.isSuccess());
    assertEquals("bbb", Bytes.toString(res.getResult().getValue(FAMILY, Bytes.toBytes("B"))));
    result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B")));
    assertEquals("bbb", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))));
    // CheckAndAppend with a filter and wrong value
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row).ifMatches(new FilterList(new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("b")), new SingleColumnValueFilter(FAMILY, Bytes.toBytes("C"), CompareOperator.EQUAL, Bytes.toBytes("d")))).build(new Append(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("bb"))));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
    result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B")));
    assertEquals("bbb", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))));
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Get(org.apache.hadoop.hbase.client.Get) FilterList(org.apache.hadoop.hbase.filter.FilterList) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 59 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestHRegion method testCheckAndIncrementAndAppend.

@Test
public void testCheckAndIncrementAndAppend() throws Throwable {
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, fam1);
    // CheckAndMutate with Increment and Append
    CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a"))));
    CheckAndMutateResult result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertEquals(1L, Bytes.toLong(result.getResult().getValue(fam1, qual1)));
    assertEquals("a", Bytes.toString(result.getResult().getValue(fam1, qual2)));
    Result r = region.get(new Get(row));
    assertEquals(1L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("a", Bytes.toString(r.getValue(fam1, qual2)));
    // Set return results to false
    checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L).setReturnResults(false)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")).setReturnResults(false)));
    result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertNull(result.getResult().getValue(fam1, qual1));
    assertNull(result.getResult().getValue(fam1, qual2));
    r = region.get(new Get(row));
    assertEquals(2L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("aa", Bytes.toString(r.getValue(fam1, qual2)));
    checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")).setReturnResults(false)));
    result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertEquals(3L, Bytes.toLong(result.getResult().getValue(fam1, qual1)));
    assertNull(result.getResult().getValue(fam1, qual2));
    r = region.get(new Get(row));
    assertEquals(3L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("aaa", Bytes.toString(r.getValue(fam1, qual2)));
}
Also used : Append(org.apache.hadoop.hbase.client.Append) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) Mutation(org.apache.hadoop.hbase.client.Mutation) RowMutations(org.apache.hadoop.hbase.client.RowMutations) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 60 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestHRegion method testMutateRow.

@Test
public void testMutateRow() throws Exception {
    final byte[] row = Bytes.toBytes("row");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    final String v1 = "v1";
    region = initHRegion(tableName, method, CONF, fam1);
    // Initial values
    region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q2, Bytes.toBytes("toBeDeleted")), new Put(row).addColumn(fam1, q3, Bytes.toBytes(5L)), new Put(row).addColumn(fam1, q4, Bytes.toBytes("a")) });
    // Do mutateRow
    Result result = region.mutateRow(new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)), new Delete(row).addColumns(fam1, q2), new Increment(row).addColumn(fam1, q3, 1), new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))));
    assertNotNull(result);
    assertEquals(6L, Bytes.toLong(result.getValue(fam1, q3)));
    assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
    // Verify the value
    result = region.get(new Get(row));
    assertEquals(v1, Bytes.toString(result.getValue(fam1, q1)));
    assertNull(result.getValue(fam1, q2));
    assertEquals(6L, Bytes.toLong(result.getValue(fam1, q3)));
    assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Test(org.junit.Test)

Aggregations

Append (org.apache.hadoop.hbase.client.Append)62 Test (org.junit.Test)31 Result (org.apache.hadoop.hbase.client.Result)26 Increment (org.apache.hadoop.hbase.client.Increment)25 Put (org.apache.hadoop.hbase.client.Put)23 IOException (java.io.IOException)17 Get (org.apache.hadoop.hbase.client.Get)17 Delete (org.apache.hadoop.hbase.client.Delete)16 Table (org.apache.hadoop.hbase.client.Table)15 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 TableName (org.apache.hadoop.hbase.TableName)10 RowMutations (org.apache.hadoop.hbase.client.RowMutations)10 Cell (org.apache.hadoop.hbase.Cell)9 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ArrayList (java.util.ArrayList)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)5 List (java.util.List)4