Search in sources :

Example 66 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestThriftHBaseServiceHandler method testAttribute.

@Test
public void testAttribute() throws Exception {
    byte[] rowName = Bytes.toBytes("testAttribute");
    byte[] attributeKey = Bytes.toBytes("attribute1");
    byte[] attributeValue = Bytes.toBytes("value1");
    Map<ByteBuffer, ByteBuffer> attributes = new HashMap<>();
    attributes.put(wrap(attributeKey), wrap(attributeValue));
    TGet tGet = new TGet(wrap(rowName));
    tGet.setAttributes(attributes);
    Get get = getFromThrift(tGet);
    assertArrayEquals(get.getAttribute("attribute1"), attributeValue);
    List<TColumnValue> columnValues = new ArrayList<>(1);
    columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
    TPut tPut = new TPut(wrap(rowName), columnValues);
    tPut.setAttributes(attributes);
    Put put = putFromThrift(tPut);
    assertArrayEquals(put.getAttribute("attribute1"), attributeValue);
    TScan tScan = new TScan();
    tScan.setAttributes(attributes);
    Scan scan = scanFromThrift(tScan);
    assertArrayEquals(scan.getAttribute("attribute1"), attributeValue);
    List<TColumnIncrement> incrementColumns = new ArrayList<>(1);
    incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
    TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns);
    tIncrement.setAttributes(attributes);
    Increment increment = incrementFromThrift(tIncrement);
    assertArrayEquals(increment.getAttribute("attribute1"), attributeValue);
    TDelete tDelete = new TDelete(wrap(rowName));
    tDelete.setAttributes(attributes);
    Delete delete = deleteFromThrift(tDelete);
    assertArrayEquals(delete.getAttribute("attribute1"), attributeValue);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TDelete(org.apache.hadoop.hbase.thrift2.generated.TDelete) HashMap(java.util.HashMap) TGet(org.apache.hadoop.hbase.thrift2.generated.TGet) ArrayList(java.util.ArrayList) TDelete(org.apache.hadoop.hbase.thrift2.generated.TDelete) TIncrement(org.apache.hadoop.hbase.thrift2.generated.TIncrement) TColumnValue(org.apache.hadoop.hbase.thrift2.generated.TColumnValue) ByteBuffer(java.nio.ByteBuffer) TPut(org.apache.hadoop.hbase.thrift2.generated.TPut) Put(org.apache.hadoop.hbase.client.Put) Get(org.apache.hadoop.hbase.client.Get) TGet(org.apache.hadoop.hbase.thrift2.generated.TGet) TScan(org.apache.hadoop.hbase.thrift2.generated.TScan) TColumnIncrement(org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement) TIncrement(org.apache.hadoop.hbase.thrift2.generated.TIncrement) Increment(org.apache.hadoop.hbase.client.Increment) TColumnIncrement(org.apache.hadoop.hbase.thrift2.generated.TColumnIncrement) TScan(org.apache.hadoop.hbase.thrift2.generated.TScan) Scan(org.apache.hadoop.hbase.client.Scan) TPut(org.apache.hadoop.hbase.thrift2.generated.TPut) Test(org.junit.Test)

Example 67 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestVisibilityLabels method testLabelsWithIncrement.

@Test
public void testLabelsWithIncrement() throws Throwable {
    TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    try (Table table = TEST_UTIL.createTable(tableName, fam)) {
        byte[] row1 = Bytes.toBytes("row1");
        byte[] val = Bytes.toBytes(1L);
        Put put = new Put(row1);
        put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, val);
        put.setCellVisibility(new CellVisibility(SECRET + " & " + CONFIDENTIAL));
        table.put(put);
        Get get = new Get(row1);
        get.setAuthorizations(new Authorizations(SECRET));
        Result result = table.get(get);
        assertTrue(result.isEmpty());
        table.incrementColumnValue(row1, fam, qual, 2L);
        result = table.get(get);
        assertTrue(result.isEmpty());
        Increment increment = new Increment(row1);
        increment.addColumn(fam, qual, 2L);
        increment.setCellVisibility(new CellVisibility(SECRET));
        table.increment(increment);
        result = table.get(get);
        assertTrue(!result.isEmpty());
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Test(org.junit.Test)

Example 68 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class ThriftHBaseServiceHandler method increment.

@Override
public void increment(TIncrement tincrement) throws IOError, TException {
    if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) {
        throw new TException("Must supply a table and a row key; can't increment");
    }
    if (conf.getBoolean(COALESCE_INC_KEY, false)) {
        this.coalescer.queueIncrement(tincrement);
        return;
    }
    Table table = null;
    try {
        table = getTable(tincrement.getTable());
        Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
        table.increment(inc);
    } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw getIOError(e);
    } finally {
        closeTable(table);
    }
}
Also used : TException(org.apache.thrift.TException) Table(org.apache.hadoop.hbase.client.Table) Increment(org.apache.hadoop.hbase.client.Increment) TIncrement(org.apache.hadoop.hbase.thrift.generated.TIncrement) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Example 69 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestHRegion method testMutateRowInParallel.

@Test
public void testMutateRowInParallel() throws Exception {
    final int numReaderThreads = 100;
    final CountDownLatch latch = new CountDownLatch(numReaderThreads);
    final byte[] row = Bytes.toBytes("row");
    final byte[] q1 = Bytes.toBytes("q1");
    final byte[] q2 = Bytes.toBytes("q2");
    final byte[] q3 = Bytes.toBytes("q3");
    final byte[] q4 = Bytes.toBytes("q4");
    final String v1 = "v1";
    final String v2 = "v2";
    // We need to ensure the timestamp of the delete operation is more than the previous one
    final AtomicLong deleteTimestamp = new AtomicLong();
    region = initHRegion(tableName, method, CONF, fam1);
    // Initial values
    region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, Bytes.toBytes(1L)).addColumn(fam1, q4, Bytes.toBytes("a")) });
    final AtomicReference<AssertionError> assertionError = new AtomicReference<>();
    // Writer thread
    Thread writerThread = new Thread(() -> {
        try {
            while (true) {
                // If all the reader threads finish, then stop the writer thread
                if (latch.await(0, TimeUnit.MILLISECONDS)) {
                    return;
                }
                // Execute the mutations. This should be done atomically
                region.mutateRow(new RowMutations(row).add(Arrays.asList(new Put(row).addColumn(fam1, q1, Bytes.toBytes(v2)), new Delete(row).addColumns(fam1, q2, deleteTimestamp.getAndIncrement()), new Increment(row).addColumn(fam1, q3, 1L), new Append(row).addColumn(fam1, q4, Bytes.toBytes("b")))));
                // We need to ensure the timestamps of the Increment/Append operations are more than the
                // previous ones
                Result result = region.get(new Get(row).addColumn(fam1, q3).addColumn(fam1, q4));
                long tsIncrement = result.getColumnLatestCell(fam1, q3).getTimestamp();
                long tsAppend = result.getColumnLatestCell(fam1, q4).getTimestamp();
                // Put the initial values
                region.batchMutate(new Mutation[] { new Put(row).addColumn(fam1, q1, Bytes.toBytes(v1)).addColumn(fam1, q2, deleteTimestamp.getAndIncrement(), Bytes.toBytes(v2)).addColumn(fam1, q3, tsIncrement + 1, Bytes.toBytes(1L)).addColumn(fam1, q4, tsAppend + 1, Bytes.toBytes("a")) });
            }
        } catch (Exception e) {
            assertionError.set(new AssertionError(e));
        }
    });
    writerThread.start();
    // Reader threads
    for (int i = 0; i < numReaderThreads; i++) {
        new Thread(() -> {
            try {
                for (int j = 0; j < 10000; j++) {
                    // Verify the values
                    Result result = region.get(new Get(row));
                    // The values should be equals to either the initial values or the values after
                    // executing the mutations
                    String q1Value = Bytes.toString(result.getValue(fam1, q1));
                    if (v1.equals(q1Value)) {
                        assertEquals(v2, Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(1L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("a", Bytes.toString(result.getValue(fam1, q4)));
                    } else if (v2.equals(q1Value)) {
                        assertNull(Bytes.toString(result.getValue(fam1, q2)));
                        assertEquals(2L, Bytes.toLong(result.getValue(fam1, q3)));
                        assertEquals("ab", Bytes.toString(result.getValue(fam1, q4)));
                    } else {
                        fail("the qualifier " + Bytes.toString(q1) + " should be " + v1 + " or " + v2 + ", but " + q1Value);
                    }
                }
            } catch (Exception e) {
                assertionError.set(new AssertionError(e));
            } catch (AssertionError e) {
                assertionError.set(e);
            }
            latch.countDown();
        }).start();
    }
    writerThread.join();
    if (assertionError.get() != null) {
        throw assertionError.get();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ExpectedException(org.junit.rules.ExpectedException) RepeatingTestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread) TestThread(org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread) RowMutations(org.apache.hadoop.hbase.client.RowMutations) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) Result(org.apache.hadoop.hbase.client.Result) AtomicLong(java.util.concurrent.atomic.AtomicLong) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) Test(org.junit.Test)

Example 70 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestHRegion method testCheckAndMutate_wrongMutationType.

@Test
@Deprecated
public void testCheckAndMutate_wrongMutationType() throws Throwable {
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, fam1);
    try {
        region.checkAndMutate(row, fam1, qual1, CompareOperator.EQUAL, new BinaryComparator(value1), new Increment(row).addColumn(fam1, qual1, 1));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("Unsupported mutate type: INCREMENT", e.getMessage());
    }
    try {
        region.checkAndMutate(row, new SingleColumnValueFilter(fam1, qual1, CompareOperator.EQUAL, value1), new Increment(row).addColumn(fam1, qual1, 1));
        fail("should throw DoNotRetryIOException");
    } catch (DoNotRetryIOException e) {
        assertEquals("Unsupported mutate type: INCREMENT", e.getMessage());
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Increment(org.apache.hadoop.hbase.client.Increment) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Test(org.junit.Test)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5