Search in sources :

Example 16 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class RSRpcServices method checkAndMutate.

private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Action> actions, CellScanner cellScanner, Condition condition, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
    int countOfCompleteMutation = 0;
    try {
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        List<Mutation> mutations = new ArrayList<>();
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : actions) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto mutation = action.getMutation();
            MutationType type = mutation.getMutateType();
            switch(type) {
                case PUT:
                    Put put = ProtobufUtil.toPut(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, put);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
                    mutations.add(put);
                    break;
                case DELETE:
                    Delete del = ProtobufUtil.toDelete(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
                    mutations.add(del);
                    break;
                case INCREMENT:
                    Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, increment);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(increment);
                    mutations.add(increment);
                    break;
                case APPEND:
                    Append append = ProtobufUtil.toAppend(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, append);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(append);
                    mutations.add(append);
                    break;
                default:
                    throw new DoNotRetryIOException("invalid mutation type : " + type);
            }
        }
        if (mutations.size() == 0) {
            return new CheckAndMutateResult(true, null);
        } else {
            CheckAndMutate checkAndMutate = ProtobufUtil.toCheckAndMutate(condition, mutations);
            CheckAndMutateResult result = null;
            if (region.getCoprocessorHost() != null) {
                result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
            }
            if (result == null) {
                result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
                if (region.getCoprocessorHost() != null) {
                    result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
                }
            }
            return result;
        }
    } finally {
        // even if the malformed cells are not skipped.
        for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
            skipCellsForMutation(actions.get(i), cellScanner);
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 17 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class TestFromClientSide5 method testMultiRowMutationWithMultipleConditionsWhenConditionsMatch.

@Test
public void testMultiRowMutationWithMultipleConditionsWhenConditionsMatch() throws Exception {
    final TableName tableName = name.getTableName();
    final byte[] ROW1 = Bytes.toBytes("testRow1");
    final byte[] ROW2 = Bytes.toBytes("testRow2");
    final byte[] VALUE1 = Bytes.toBytes("testValue1");
    final byte[] VALUE2 = Bytes.toBytes("testValue2");
    try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) {
        // Add initial data
        t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2));
        // Execute MultiRowMutation with conditions
        Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE);
        MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1);
        Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1);
        MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2);
        Delete delete = new Delete(ROW2);
        MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete);
        MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
        mrmBuilder.addMutationRequest(m1);
        mrmBuilder.addMutationRequest(m2);
        mrmBuilder.addMutationRequest(m3);
        mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW, FAMILY, QUALIFIER, CompareOperator.EQUAL, null, null));
        mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2, null));
        CoprocessorRpcChannel channel = t.coprocessorService(ROW);
        MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
        MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build());
        // Assert
        assertTrue(response.getProcessed());
        Result r = t.get(new Get(ROW));
        assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
        r = t.get(new Get(ROW1));
        assertEquals(Bytes.toString(VALUE1), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
        r = t.get(new Get(ROW2));
        assertTrue(r.isEmpty());
    }
}
Also used : CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) TableName(org.apache.hadoop.hbase.TableName) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) MutateRowsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Test(org.junit.Test)

Example 18 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class TestFromClientSide5 method testMultiRowMutation.

@Test
public void testMultiRowMutation() throws Exception {
    LOG.info("Starting testMultiRowMutation");
    final TableName tableName = name.getTableName();
    final byte[] ROW1 = Bytes.toBytes("testRow1");
    final byte[] ROW2 = Bytes.toBytes("testRow2");
    final byte[] ROW3 = Bytes.toBytes("testRow3");
    try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) {
        // Add initial data
        t.batch(Arrays.asList(new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE), new Put(ROW2).addColumn(FAMILY, QUALIFIER, Bytes.toBytes(1L)), new Put(ROW3).addColumn(FAMILY, QUALIFIER, VALUE)), new Object[3]);
        // Execute MultiRowMutation
        Put put = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE);
        MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put);
        Delete delete = new Delete(ROW1);
        MutationProto m2 = ProtobufUtil.toMutation(MutationType.DELETE, delete);
        Increment increment = new Increment(ROW2).addColumn(FAMILY, QUALIFIER, 1L);
        MutationProto m3 = ProtobufUtil.toMutation(MutationType.INCREMENT, increment);
        Append append = new Append(ROW3).addColumn(FAMILY, QUALIFIER, VALUE);
        MutationProto m4 = ProtobufUtil.toMutation(MutationType.APPEND, append);
        MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
        mrmBuilder.addMutationRequest(m1);
        mrmBuilder.addMutationRequest(m2);
        mrmBuilder.addMutationRequest(m3);
        mrmBuilder.addMutationRequest(m4);
        CoprocessorRpcChannel channel = t.coprocessorService(ROW);
        MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
        MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build());
        // Assert
        assertTrue(response.getProcessed());
        Result r = t.get(new Get(ROW));
        assertEquals(Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
        r = t.get(new Get(ROW1));
        assertTrue(r.isEmpty());
        r = t.get(new Get(ROW2));
        assertEquals(2L, Bytes.toLong(r.getValue(FAMILY, QUALIFIER)));
        r = t.get(new Get(ROW3));
        assertEquals(Bytes.toString(VALUE) + Bytes.toString(VALUE), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
    }
}
Also used : CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) TableName(org.apache.hadoop.hbase.TableName) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) MutateRowsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Test(org.junit.Test)

Example 19 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class TestFromClientSide5 method testMultiRowMutationWithSingleConditionWhenConditionNotMatch.

@Test
public void testMultiRowMutationWithSingleConditionWhenConditionNotMatch() throws Exception {
    final TableName tableName = name.getTableName();
    final byte[] ROW1 = Bytes.toBytes("testRow1");
    final byte[] ROW2 = Bytes.toBytes("testRow2");
    final byte[] VALUE1 = Bytes.toBytes("testValue1");
    final byte[] VALUE2 = Bytes.toBytes("testValue2");
    try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) {
        // Add initial data
        t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2));
        // Execute MultiRowMutation with conditions
        Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE);
        MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1);
        Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1);
        MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2);
        Delete delete = new Delete(ROW2);
        MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete);
        MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
        mrmBuilder.addMutationRequest(m1);
        mrmBuilder.addMutationRequest(m2);
        mrmBuilder.addMutationRequest(m3);
        mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE1, null));
        CoprocessorRpcChannel channel = t.coprocessorService(ROW);
        MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
        MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build());
        // Assert
        assertFalse(response.getProcessed());
        Result r = t.get(new Get(ROW));
        assertTrue(r.isEmpty());
        r = t.get(new Get(ROW1));
        assertTrue(r.isEmpty());
        r = t.get(new Get(ROW2));
        assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
    }
}
Also used : CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) TableName(org.apache.hadoop.hbase.TableName) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) MutateRowsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Test(org.junit.Test)

Example 20 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class TestFromClientSide5 method testMultiRowMutationWithFilterConditionWhenConditionNotMatch.

@Test
public void testMultiRowMutationWithFilterConditionWhenConditionNotMatch() throws Exception {
    final TableName tableName = name.getTableName();
    final byte[] ROW1 = Bytes.toBytes("testRow1");
    final byte[] ROW2 = Bytes.toBytes("testRow2");
    final byte[] QUALIFIER2 = Bytes.toBytes("testQualifier2");
    final byte[] VALUE1 = Bytes.toBytes("testValue1");
    final byte[] VALUE2 = Bytes.toBytes("testValue2");
    final byte[] VALUE3 = Bytes.toBytes("testValue3");
    try (Table t = TEST_UTIL.createTable(tableName, FAMILY)) {
        // Add initial data
        t.put(new Put(ROW2).addColumn(FAMILY, QUALIFIER, VALUE2).addColumn(FAMILY, QUALIFIER2, VALUE3));
        // Execute MultiRowMutation with conditions
        Put put1 = new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE);
        MutationProto m1 = ProtobufUtil.toMutation(MutationType.PUT, put1);
        Put put2 = new Put(ROW1).addColumn(FAMILY, QUALIFIER, VALUE1);
        MutationProto m2 = ProtobufUtil.toMutation(MutationType.PUT, put2);
        Delete delete = new Delete(ROW2);
        MutationProto m3 = ProtobufUtil.toMutation(MutationType.DELETE, delete);
        MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
        mrmBuilder.addMutationRequest(m1);
        mrmBuilder.addMutationRequest(m2);
        mrmBuilder.addMutationRequest(m3);
        mrmBuilder.addCondition(ProtobufUtil.toCondition(ROW2, new FilterList(new SingleColumnValueFilter(FAMILY, QUALIFIER, CompareOperator.EQUAL, VALUE2), new SingleColumnValueFilter(FAMILY, QUALIFIER2, CompareOperator.EQUAL, VALUE2)), null));
        CoprocessorRpcChannel channel = t.coprocessorService(ROW);
        MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
        MutateRowsResponse response = service.mutateRows(null, mrmBuilder.build());
        // Assert
        assertFalse(response.getProcessed());
        Result r = t.get(new Get(ROW));
        assertTrue(r.isEmpty());
        r = t.get(new Get(ROW1));
        assertTrue(r.isEmpty());
        r = t.get(new Get(ROW2));
        assertEquals(Bytes.toString(VALUE2), Bytes.toString(r.getValue(FAMILY, QUALIFIER)));
    }
}
Also used : SingleColumnValueFilter(org.apache.hadoop.hbase.filter.SingleColumnValueFilter) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) FilterList(org.apache.hadoop.hbase.filter.FilterList) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) TableName(org.apache.hadoop.hbase.TableName) MutateRowsResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse) MutateRowsRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) Test(org.junit.Test)

Aggregations

MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)27 Test (org.junit.Test)13 TableName (org.apache.hadoop.hbase.TableName)10 Mutation (org.apache.hadoop.hbase.client.Mutation)10 ClientProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)10 IOException (java.io.IOException)9 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 Delete (org.apache.hadoop.hbase.client.Delete)8 Put (org.apache.hadoop.hbase.client.Put)8 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)8 Append (org.apache.hadoop.hbase.client.Append)7 Increment (org.apache.hadoop.hbase.client.Increment)7 Cell (org.apache.hadoop.hbase.Cell)6 CoprocessorRpcChannel (org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel)6 ColumnValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue)6 RegionAction (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction)6 MultiRowMutationService (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService)6 MutateRowsRequest (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest)6 MutateRowsResponse (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse)6 QualifierValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue)4