Search in sources :

Example 31 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TestHRegion method testMutateRowInParallel.

@Test
public void testMutateRowInParallel() throws Exception {
    final int numReaderThreads = 100;
    final CountDownLatch latch = new CountDownLatch(numReaderThreads);
    final byte[] row = Bytes.toBytes("row");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    final String v1 = "v1";
    final String v2 = "v2";
    // We need to ensure the timestamp of the delete operation is more than the previous one
    final AtomicLong deleteTimestamp = new AtomicLong();
    region = initHRegion(tableName, method, CONF, fam1);
    // Initial values
    region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, Bytes.toBytes(1L)).addColumn(fam1, q4, Bytes.toBytes("a")) });
    final AtomicReference<AssertionError> assertionError = new AtomicReference<>();
    // Writer thread
    Thread writerThread = new Thread(() -> {
        try {
            while (true) {
                // If all the reader threads finish, then stop the writer thread
                if (latch.await(0, TimeUnit.MILLISECONDS)) {
                    return;
                }
                // Execute the mutations. This should be done atomically
                region.mutateRow(new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), new Increment(row).addColumn(fam1, q3, 1L), new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))));
                // We need to ensure the timestamps of the Increment/Append operations are more than the
                // previous ones
                Result result = region.get(new Get(row).addColumn(fam1, q3).addColumn(fam1, q4));
                long tsIncrement = result.getColumnLatestCell(fam1, q3).getTimestamp();
                long tsAppend = result.getColumnLatestCell(fam1, q4).getTimestamp();
                // Put the initial values
                region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, tsIncrement + 1, Bytes.toBytes(1L)).addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) });
            }
        } catch (Exception e) {
            assertionError.set(new AssertionError(e));
        }
    });
    writerThread.start();
    // Reader threads
    for (int i = 0; i < numReaderThreads; i++) {
        new Thread(() -> {
            try {
                for (int j = 0; j < 10000; j++) {
                    // Verify the values
                    Result result = region.get(new Get(row));
                    // The values should be equals to either the initial values or the values after
                    // executing the mutations
                    String q1Value = Bytes.toString(result.getValue(fam1, q1));
                    if (v1.equals(q1Value)) {
                        assertEquals(v2, Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(1L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("a", Bytes.toString(result.getValue(fam1, q4)));
                    } else if (v2.equals(q1Value)) {
                        assertNull(Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(2L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
                    } else {
                        fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + ", but " + q1Value);
                    }
                }
            } catch (Exception e) {
                assertionError.set(new AssertionError(e));
            } catch (AssertionError e) {
                assertionError.set(e);
            }
            latch.countDown();
        }).start();
    }
    writerThread.join();
    if (assertionError.get() != null) {
        throw assertionError.get();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ExpectedException(org.junit.rules.ExpectedException) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread) TestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread) RowMutations(org.apache.hadoop.hbase.client.RowMutations) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) AtomicLong(java.util.concurrent.atomic.AtomicLong) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 32 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TestHRegion method testCheckAndRowMutateTimestampsAreMonotonic.

@Test
public void testCheckAndRowMutateTimestampsAreMonotonic() throws IOException {
    region = initHRegion(tableName, method, CONF, fam1);
    ManualEnvironmentEdge edge = new ManualEnvironmentEdge();
    EnvironmentEdgeManager.injectEdge(edge);
    edge.setValue(10);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.addColumn(fam1, qual1, qual1);
    region.put(p);
    Result result = region.get(new Get(row));
    Cell c = result.getColumnLatestCell(fam1, qual1);
    assertNotNull(c);
    assertEquals(10L, c.getTimestamp());
    // clock goes back
    edge.setValue(1);
    p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.addColumn(fam1, qual1, qual2);
    RowMutations rm = new RowMutations(row);
    rm.add(p);
    assertTrue(region.checkAndRowMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(qual1), rm));
    result = region.get(new Get(row));
    c = result.getColumnLatestCell(fam1, qual1);
    assertEquals(10L, c.getTimestamp());
    LOG.info("c value " + Bytes.toStringBinary(c.getValueArray(), c.getValueOffset(), c.getValueLength()));
    assertTrue(Bytes.equals(c.getValueArray(), c.getValueOffset(), c.getValueLength(), qual2, 0, qual2.length));
}
Also used : Get(org.apache.hadoop.hbase.client.Get) Cell(org.apache.hadoop.hbase.Cell) ManualEnvironmentEdge(org.apache.hadoop.hbase.util.ManualEnvironmentEdge) Put(org.apache.hadoop.hbase.client.Put) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Test(org.junit.Test)

Example 33 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TestHRegion method testCheckAndMutate_WithFiltersAndTimeRange.

@Test
@Deprecated
public void testCheckAndMutate_WithFiltersAndTimeRange() throws Throwable {
    final byte[] FAMILY = Bytes.toBytes("fam");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, FAMILY);
    // Put with specifying the timestamp
    region.put(new Put(row).addColumn(FAMILY, Bytes.toBytes("A"), 100, Bytes.toBytes("a")));
    // Put with success
    boolean ok = region.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a")), TimeRange.between(0, 101), new Put(row).addColumn(FAMILY, Bytes.toBytes("B"), Bytes.toBytes("b")));
    assertTrue(ok);
    Result result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("B")));
    assertEquals("b", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("B"))));
    // Put with failure
    ok = region.checkAndMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a")), TimeRange.between(0, 100), new Put(row).addColumn(FAMILY, Bytes.toBytes("C"), Bytes.toBytes("c")));
    assertFalse(ok);
    assertTrue(region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("C"))).isEmpty());
    // Mutate with success
    ok = region.checkAndRowMutate(row, new SingleColumnValueFilter(FAMILY, Bytes.toBytes("A"), CompareOperator.EQUAL, Bytes.toBytes("a")), TimeRange.between(0, 101), new RowMutations(row).add((Mutation) new Put(row).addColumn(FAMILY, Bytes.toBytes("D"), Bytes.toBytes("d"))).add((Mutation) new Delete(row).addColumns(FAMILY, Bytes.toBytes("A"))));
    assertTrue(ok);
    result = region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("D")));
    assertEquals("d", Bytes.toString(result.getValue(FAMILY, Bytes.toBytes("D"))));
    assertTrue(region.get(new Get(row).addColumn(FAMILY, Bytes.toBytes("A"))).isEmpty());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) Get(org.apache.hadoop.hbase.client.Get) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Test(org.junit.Test)

Example 34 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TestHRegion method testCheckAndMutate_wrongRow.

@Test
@Deprecated
public void testCheckAndMutate_wrongRow() throws Throwable {
    final byte[] wrongRow = Bytes.toBytes("wrongRow");
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, fam1);
    try {
        region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value1), new Put(wrongRow).addColumn(fam1, qual1, value1));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("The row of the action <wrongRow> doesn't match the original one <rowA>", e.getMessage());
    }
    try {
        region.checkAndMutate(row, new SingleColumnValueFilter(fam1, qual1, CompareOperator.EQUAL, value1), new Put(wrongRow).addColumn(fam1, qual1, value1));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("The row of the action <wrongRow> doesn't match the original one <rowA>", e.getMessage());
    }
    try {
        region.checkAndRowMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value1), new RowMutations(wrongRow).add((Mutation) new Put(wrongRow).addColumn(fam1, qual1, value1)).add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2)));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("The row of the action <wrongRow> doesn't match the original one <rowA>", e.getMessage());
    }
    try {
        region.checkAndRowMutate(row, new SingleColumnValueFilter(fam1, qual1, CompareOperator.EQUAL, value1), new RowMutations(wrongRow).add((Mutation) new Put(wrongRow).addColumn(fam1, qual1, value1)).add((Mutation) new Delete(wrongRow).addColumns(fam1, qual2)));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("The row of the action <wrongRow> doesn't match the original one <rowA>", e.getMessage());
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Mutation(org.apache.hadoop.hbase.client.Mutation) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Test(org.junit.Test)

Example 35 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TestHRegion method testCheckAndIncrementAndAppend.

@Test
public void testCheckAndIncrementAndAppend() throws Throwable {
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, fam1);
    // CheckAndMutate with Increment and Append
    CheckAndMutate checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a"))));
    CheckAndMutateResult result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertEquals(1L, Bytes.toLong(result.getResult().getValue(fam1, qual1)));
    assertEquals("a", Bytes.toString(result.getResult().getValue(fam1, qual2)));
    Result r = region.get(new Get(row));
    assertEquals(1L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("a", Bytes.toString(r.getValue(fam1, qual2)));
    // Set return results to false
    checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L).setReturnResults(false)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")).setReturnResults(false)));
    result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertNull(result.getResult().getValue(fam1, qual1));
    assertNull(result.getResult().getValue(fam1, qual2));
    r = region.get(new Get(row));
    assertEquals(2L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("aa", Bytes.toString(r.getValue(fam1, qual2)));
    checkAndMutate = CheckAndMutate.newBuilder(row).ifNotExists(fam1, qual).build(new RowMutations(row).add((Mutation) new Increment(row).addColumn(fam1, qual1, 1L)).add((Mutation) new Append(row).addColumn(fam1, qual2, Bytes.toBytes("a")).setReturnResults(false)));
    result = region.checkAndMutate(checkAndMutate);
    assertTrue(result.isSuccess());
    assertEquals(3L, Bytes.toLong(result.getResult().getValue(fam1, qual1)));
    assertNull(result.getResult().getValue(fam1, qual2));
    r = region.get(new Get(row));
    assertEquals(3L, Bytes.toLong(r.getValue(fam1, qual1)));
    assertEquals("aaa", Bytes.toString(r.getValue(fam1, qual2)));
}
Also used : Append(org.apache.hadoop.hbase.client.Append) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) Mutation(org.apache.hadoop.hbase.client.Mutation) RowMutations(org.apache.hadoop.hbase.client.RowMutations) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

RowMutations (org.apache.hadoop.hbase.client.RowMutations)36 Put (org.apache.hadoop.hbase.client.Put)28 Test (org.junit.Test)19 Delete (org.apache.hadoop.hbase.client.Delete)18 Get (org.apache.hadoop.hbase.client.Get)18 Result (org.apache.hadoop.hbase.client.Result)14 Append (org.apache.hadoop.hbase.client.Append)10 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)10 Increment (org.apache.hadoop.hbase.client.Increment)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 Table (org.apache.hadoop.hbase.client.Table)8 TableName (org.apache.hadoop.hbase.TableName)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)4 Action (org.apache.hadoop.hbase.client.Action)3 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)3 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)3 IOException (java.io.IOException)2