Search in sources :

Example 11 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class HRegion method doPreBatchMutateHook.

private void doPreBatchMutateHook(BatchOperation<?> batchOp) throws IOException {
    /* Run coprocessor pre hook outside of locks to avoid deadlock */
    WALEdit walEdit = new WALEdit();
    if (coprocessorHost != null) {
        for (int i = 0; i < batchOp.operations.length; i++) {
            Mutation m = batchOp.getMutation(i);
            if (m instanceof Put) {
                if (coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
                    // pre hook says skip this Put
                    // mark as success and skip in doMiniBatchMutation
                    batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
                }
            } else if (m instanceof Delete) {
                Delete curDel = (Delete) m;
                if (curDel.getFamilyCellMap().isEmpty()) {
                    // handle deleting a row case
                    prepareDelete(curDel);
                }
                if (coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
                    // pre hook says skip this Delete
                    // mark as success and skip in doMiniBatchMutation
                    batchOp.retCodeDetails[i] = OperationStatus.SUCCESS;
                }
            } else {
                // In case of passing Append mutations along with the Puts and Deletes in batchMutate
                // mark the operation return code as failure so that it will not be considered in
                // the doMiniBatchMutation
                batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.FAILURE, "Put/Delete mutations only supported in batchMutate() now");
            }
            if (!walEdit.isEmpty()) {
                batchOp.walEditsFromCoprocessors[i] = walEdit;
                walEdit = new WALEdit();
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 12 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project storm by apache.

the class HBaseBolt method execute.

@Override
public void execute(Tuple tuple) {
    try {
        if (batchHelper.shouldHandle(tuple)) {
            byte[] rowKey = this.mapper.rowKey(tuple);
            ColumnList cols = this.mapper.columns(tuple);
            List<Mutation> mutations = hBaseClient.constructMutationReq(rowKey, cols, writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
            batchMutations.addAll(mutations);
            batchHelper.addBatch(tuple);
        }
        if (batchHelper.shouldFlush()) {
            this.hBaseClient.batchMutate(batchMutations);
            LOG.debug("acknowledging tuples after batchMutate");
            batchHelper.ack();
            batchMutations.clear();
        }
    } catch (Exception e) {
        batchHelper.fail(e);
        batchMutations.clear();
    }
}
Also used : ColumnList(org.apache.storm.hbase.common.ColumnList) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 13 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class TestHRegion method testBatchMutateWithWrongRegionException.

@Test
public void testBatchMutateWithWrongRegionException() throws Exception {
    final byte[] a = Bytes.toBytes("a");
    final byte[] b = Bytes.toBytes("b");
    // exclusive
    final byte[] c = Bytes.toBytes("c");
    int prevLockTimeout = CONF.getInt("hbase.rowlock.wait.duration", 30000);
    CONF.setInt("hbase.rowlock.wait.duration", 1000);
    final HRegion region = initHRegion(tableName, a, c, method, CONF, false, fam1);
    Mutation[] mutations = new Mutation[] { new Put(a).addImmutable(fam1, null, null), // this is outside the region boundary
    new Put(c).addImmutable(fam1, null, null), new Put(b).addImmutable(fam1, null, null) };
    OperationStatus[] status = region.batchMutate(mutations);
    assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS);
    assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SANITY_CHECK_FAILURE);
    assertEquals(status[2].getOperationStatusCode(), OperationStatusCode.SUCCESS);
    // test with a row lock held for a long time
    final CountDownLatch obtainedRowLock = new CountDownLatch(1);
    ExecutorService exec = Executors.newFixedThreadPool(2);
    Future<Void> f1 = exec.submit(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            LOG.info("Acquiring row lock");
            RowLock rl = region.getRowLock(b);
            obtainedRowLock.countDown();
            LOG.info("Waiting for 5 seconds before releasing lock");
            Threads.sleep(5000);
            LOG.info("Releasing row lock");
            rl.release();
            return null;
        }
    });
    obtainedRowLock.await(30, TimeUnit.SECONDS);
    Future<Void> f2 = exec.submit(new Callable<Void>() {

        @Override
        public Void call() throws Exception {
            Mutation[] mutations = new Mutation[] { new Put(a).addImmutable(fam1, null, null), new Put(b).addImmutable(fam1, null, null) };
            // this will wait for the row lock, and it will eventually succeed
            OperationStatus[] status = region.batchMutate(mutations);
            assertEquals(status[0].getOperationStatusCode(), OperationStatusCode.SUCCESS);
            assertEquals(status[1].getOperationStatusCode(), OperationStatusCode.SUCCESS);
            return null;
        }
    });
    f1.get();
    f2.get();
    CONF.setInt("hbase.rowlock.wait.duration", prevLockTimeout);
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) Test(org.junit.Test)

Example 14 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project hbase by apache.

the class TestHRegion method testMemstoreSizeAccountingWithFailedPostBatchMutate.

@Test
public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOException {
    String testName = "testMemstoreSizeAccountingWithFailedPostBatchMutate";
    FileSystem fs = FileSystem.get(CONF);
    Path rootDir = new Path(dir + testName);
    FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF);
    HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES);
    Store store = region.getStore(COLUMN_FAMILY_BYTES);
    assertEquals(0, region.getMemstoreSize());
    // Put one value
    byte[] value = Bytes.toBytes(method);
    Put put = new Put(value);
    put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
    region.put(put);
    long onePutSize = region.getMemstoreSize();
    assertTrue(onePutSize > 0);
    RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
    doThrow(new IOException()).when(mockedCPHost).postBatchMutate(Mockito.<MiniBatchOperationInProgress<Mutation>>any());
    region.setCoprocessorHost(mockedCPHost);
    put = new Put(value);
    put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("dfg"), value);
    try {
        region.put(put);
        fail("Should have failed with IOException");
    } catch (IOException expected) {
    }
    long expectedSize = onePutSize * 2;
    assertEquals("memstoreSize should be incremented", expectedSize, region.getMemstoreSize());
    assertEquals("flushable size should be incremented", expectedSize, store.getSizeToFlush().getDataSize());
    region.setCoprocessorHost(null);
    HBaseTestingUtility.closeRegionAndWAL(region);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) FaultyFileSystem(org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put) FSHLog(org.apache.hadoop.hbase.regionserver.wal.FSHLog) Test(org.junit.Test)

Example 15 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project honeycomb by altamiracorp.

the class MutationFactoryTest method countRowTypes.

private byte[] countRowTypes(List<? extends Mutation> mutations) {
    int numRowTypes = 9;
    byte[] rowCounts = new byte[numRowTypes];
    for (Mutation mutation : mutations) {
        rowCounts[mutation.getRow()[0]]++;
    }
    return rowCounts;
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12