Search in sources :

Example 11 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class RequestConverter method buildNoDataRegionActions.

/**
 * Create a protocol buffer multirequest with NO data for a list of actions (data is carried
 * otherwise than via protobuf).  This means it just notes attributes, whether to write the
 * WAL, etc., and the presence in protobuf serves as place holder for the data which is
 * coming along otherwise.  Note that Get is different.  It does not contain 'data' and is always
 * carried by protobuf.  We return references to the data by adding them to the passed in
 * <code>data</code> param.
 * <p> Propagates Actions original index.
 * <p> The passed in multiRequestBuilder will be populated with region actions.
 * @param regionName The region name of the actions.
 * @param actions The actions that are grouped by the same region name.
 * @param cells Place to stuff references to actual data.
 * @param multiRequestBuilder The multiRequestBuilder to be populated with region actions.
 * @param regionActionBuilder regionActionBuilder to be used to build region action.
 * @param actionBuilder actionBuilder to be used to build action.
 * @param mutationBuilder mutationBuilder to be used to build mutation.
 * @param nonceGroup nonceGroup to be applied.
 * @param indexMap Map of created RegionAction to the original index for a
 *   RowMutations/CheckAndMutate within the original list of actions
 * @throws IOException
 */
public static void buildNoDataRegionActions(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map<Integer, Integer> indexMap) throws IOException {
    regionActionBuilder.clear();
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    boolean hasNonce = false;
    List<Action> rowMutationsList = new ArrayList<>();
    List<Action> checkAndMutates = new ArrayList<>();
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder);
        } else if (row instanceof Delete) {
            buildNoDataRegionAction((Delete) row, cells, builder, actionBuilder, mutationBuilder);
        } else if (row instanceof Append) {
            buildNoDataRegionAction((Append) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
            hasNonce = true;
        } else if (row instanceof Increment) {
            buildNoDataRegionAction((Increment) row, cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
            hasNonce = true;
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            rowMutationsList.add(action);
        } else if (row instanceof CheckAndMutate) {
            checkAndMutates.add(action);
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    if (builder.getActionCount() > 0) {
        multiRequestBuilder.addRegionAction(builder.build());
    }
    // We maintain a map to keep track of this RegionAction and the original Action index.
    for (Action action : rowMutationsList) {
        builder.clear();
        getRegionActionBuilderWithRegion(builder, regionName);
        boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) action.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
        if (hasIncrementOrAppend) {
            hasNonce = true;
        }
        builder.setAtomic(true);
        multiRequestBuilder.addRegionAction(builder.build());
        // This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1)
        // in the overall multiRequest.
        indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
    }
    // Action index.
    for (Action action : checkAndMutates) {
        builder.clear();
        getRegionActionBuilderWithRegion(builder, regionName);
        CheckAndMutate cam = (CheckAndMutate) action.getAction();
        builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange()));
        if (cam.getAction() instanceof Put) {
            actionBuilder.clear();
            mutationBuilder.clear();
            buildNoDataRegionAction((Put) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
        } else if (cam.getAction() instanceof Delete) {
            actionBuilder.clear();
            mutationBuilder.clear();
            buildNoDataRegionAction((Delete) cam.getAction(), cells, builder, actionBuilder, mutationBuilder);
        } else if (cam.getAction() instanceof Increment) {
            actionBuilder.clear();
            mutationBuilder.clear();
            buildNoDataRegionAction((Increment) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
            hasNonce = true;
        } else if (cam.getAction() instanceof Append) {
            actionBuilder.clear();
            mutationBuilder.clear();
            buildNoDataRegionAction((Append) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
            hasNonce = true;
        } else if (cam.getAction() instanceof RowMutations) {
            boolean hasIncrementOrAppend = buildNoDataRegionAction((RowMutations) cam.getAction(), cells, action.getNonce(), builder, actionBuilder, mutationBuilder);
            if (hasIncrementOrAppend) {
                hasNonce = true;
            }
            builder.setAtomic(true);
        } else {
            throw new DoNotRetryIOException("CheckAndMutate doesn't support " + cam.getAction().getClass().getName());
        }
        multiRequestBuilder.addRegionAction(builder.build());
        // This CheckAndMutate region action is at (multiRequestBuilder.getRegionActionCount() - 1)
        // in the overall multiRequest.
        indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex());
    }
    if (!multiRequestBuilder.hasNonceGroup() && hasNonce) {
        multiRequestBuilder.setNonceGroup(nonceGroup);
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 12 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestHRegion method testAppendWithReadOnlyTable.

@Test
public void testAppendWithReadOnlyTable() throws Exception {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    this.region = initHRegion(tableName, method, CONF, true, Bytes.toBytes("somefamily"));
    boolean exceptionCaught = false;
    Append append = new Append(Bytes.toBytes("somerow"));
    append.setDurability(Durability.SKIP_WAL);
    append.addColumn(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"), Bytes.toBytes("somevalue"));
    try {
        region.append(append);
    } catch (IOException e) {
        exceptionCaught = true;
    }
    assertTrue(exceptionCaught == true);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Append(org.apache.hadoop.hbase.client.Append) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Test(org.junit.Test)

Example 13 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestSyncReplicationStandBy method testStandby.

@Test
public void testStandby() throws Exception {
    MasterFileSystem mfs = UTIL2.getHBaseCluster().getMaster().getMasterFileSystem();
    Path remoteWALDir = getRemoteWALDir(mfs, PEER_ID);
    assertFalse(mfs.getWALFileSystem().exists(remoteWALDir));
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.STANDBY);
    assertTrue(mfs.getWALFileSystem().exists(remoteWALDir));
    try (Table table = UTIL2.getConnection().getTable(TABLE_NAME)) {
        assertDisallow(table, t -> t.get(new Get(Bytes.toBytes("row"))));
        assertDisallow(table, t -> t.put(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row"))));
        assertDisallow(table, t -> t.delete(new Delete(Bytes.toBytes("row"))));
        assertDisallow(table, t -> t.incrementColumnValue(Bytes.toBytes("row"), CF, CQ, 1));
        assertDisallow(table, t -> t.append(new Append(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row"))));
        assertDisallow(table, t -> t.get(Arrays.asList(new Get(Bytes.toBytes("row")), new Get(Bytes.toBytes("row1")))));
        assertDisallow(table, t -> t.put(Arrays.asList(new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row")), new Put(Bytes.toBytes("row1")).addColumn(CF, CQ, Bytes.toBytes("row1")))));
        assertDisallow(table, t -> t.mutateRow(new RowMutations(Bytes.toBytes("row")).add((Mutation) new Put(Bytes.toBytes("row")).addColumn(CF, CQ, Bytes.toBytes("row")))));
    }
    // We should still allow replication writes
    writeAndVerifyReplication(UTIL1, UTIL2, 0, 100);
    // Remove the peers in ACTIVE & STANDBY cluster.
    FileSystem fs2 = REMOTE_WAL_DIR2.getFileSystem(UTIL2.getConfiguration());
    Assert.assertTrue(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID, SyncReplicationState.DOWNGRADE_ACTIVE);
    Assert.assertFalse(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
    Assert.assertFalse(fs2.exists(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID)));
    UTIL1.getAdmin().removeReplicationPeer(PEER_ID);
    verifyRemovedPeer(PEER_ID, REMOTE_WAL_DIR1, UTIL1);
    // Peer remoteWAL dir will be renamed to replay WAL dir when transit from S to DA, and the
    // replay WAL dir will be removed after replaying all WALs, so create a emtpy dir here to test
    // whether the removeReplicationPeer would remove the remoteWAL dir.
    fs2.create(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID));
    fs2.create(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID));
    Assert.assertTrue(fs2.exists(getRemoteWALDir(REMOTE_WAL_DIR2, PEER_ID)));
    Assert.assertTrue(fs2.exists(getReplayRemoteWALs(REMOTE_WAL_DIR2, PEER_ID)));
    UTIL2.getAdmin().removeReplicationPeer(PEER_ID);
    verifyRemovedPeer(PEER_ID, REMOTE_WAL_DIR2, UTIL2);
}
Also used : MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Path(org.apache.hadoop.fs.Path) Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) FileSystem(org.apache.hadoop.fs.FileSystem) MasterFileSystem(org.apache.hadoop.hbase.master.MasterFileSystem) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Test(org.junit.Test)

Example 14 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class SpaceQuotaHelperForTests method verifyNoViolation.

/**
 * Verifies that no policy has been violated on the given table
 */
void verifyNoViolation(TableName tn, Mutation m) throws Exception {
    // But let's try a few times to write data before failing
    boolean sawSuccess = false;
    for (int i = 0; i < NUM_RETRIES && !sawSuccess; i++) {
        try (Table table = testUtil.getConnection().getTable(tn)) {
            if (m instanceof Put) {
                table.put((Put) m);
            } else if (m instanceof Delete) {
                table.delete((Delete) m);
            } else if (m instanceof Append) {
                table.append((Append) m);
            } else if (m instanceof Increment) {
                table.increment((Increment) m);
            } else {
                fail("Failed to apply " + m.getClass().getSimpleName() + " to the table." + " Programming error");
            }
            sawSuccess = true;
        } catch (Exception e) {
            LOG.info("Rejected the " + m.getClass().getSimpleName() + ", will sleep and retry");
            Thread.sleep(2000);
        }
    }
    if (!sawSuccess) {
        try (Table quotaTable = testUtil.getConnection().getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
            ResultScanner scanner = quotaTable.getScanner(new Scan());
            Result result = null;
            LOG.info("Dumping contents of hbase:quota table");
            while ((result = scanner.next()) != null) {
                LOG.info(Bytes.toString(result.getRow()) + " => " + result.toString());
            }
            scanner.close();
        }
    }
    assertTrue("Expected to succeed in writing data to a table not having quota ", sawSuccess);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result)

Example 15 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class SpaceQuotaHelperForTests method verifyViolation.

/**
 * Verifies that the given policy on the given table has been violated
 */
void verifyViolation(SpaceViolationPolicy policyToViolate, TableName tn, Mutation m) throws Exception {
    // But let's try a few times to get the exception before failing
    boolean sawError = false;
    String msg = "";
    for (int i = 0; i < NUM_RETRIES && !sawError; i++) {
        try (Table table = testUtil.getConnection().getTable(tn)) {
            if (m instanceof Put) {
                table.put((Put) m);
            } else if (m instanceof Delete) {
                table.delete((Delete) m);
            } else if (m instanceof Append) {
                table.append((Append) m);
            } else if (m instanceof Increment) {
                table.increment((Increment) m);
            } else {
                fail("Failed to apply " + m.getClass().getSimpleName() + " to the table. Programming error");
            }
            LOG.info("Did not reject the " + m.getClass().getSimpleName() + ", will sleep and retry");
            Thread.sleep(2000);
        } catch (Exception e) {
            msg = StringUtils.stringifyException(e);
            if ((policyToViolate.equals(SpaceViolationPolicy.DISABLE) && e instanceof TableNotEnabledException) || msg.contains(policyToViolate.name())) {
                LOG.info("Got the expected exception={}", msg);
                sawError = true;
                break;
            } else {
                LOG.warn("Did not get the expected exception, will sleep and retry", e);
                Thread.sleep(2000);
            }
        }
    }
    if (!sawError) {
        try (Table quotaTable = testUtil.getConnection().getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
            ResultScanner scanner = quotaTable.getScanner(new Scan());
            Result result = null;
            LOG.info("Dumping contents of hbase:quota table");
            while ((result = scanner.next()) != null) {
                LOG.info(Bytes.toString(result.getRow()) + " => " + result.toString());
            }
            scanner.close();
        }
    } else {
        if (policyToViolate.equals(SpaceViolationPolicy.DISABLE)) {
            assertTrue(msg.contains("TableNotEnabledException") || msg.contains(policyToViolate.name()));
        } else {
            assertTrue("Expected exception message to contain the word '" + policyToViolate.name() + "', but was " + msg, msg.contains(policyToViolate.name()));
        }
    }
    assertTrue("Expected to see an exception writing data to a table exceeding its quota", sawError);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Put(org.apache.hadoop.hbase.client.Put) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Scan(org.apache.hadoop.hbase.client.Scan) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Aggregations

Append (org.apache.hadoop.hbase.client.Append)62 Test (org.junit.Test)31 Result (org.apache.hadoop.hbase.client.Result)26 Increment (org.apache.hadoop.hbase.client.Increment)25 Put (org.apache.hadoop.hbase.client.Put)23 IOException (java.io.IOException)17 Get (org.apache.hadoop.hbase.client.Get)17 Delete (org.apache.hadoop.hbase.client.Delete)16 Table (org.apache.hadoop.hbase.client.Table)15 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 TableName (org.apache.hadoop.hbase.TableName)10 RowMutations (org.apache.hadoop.hbase.client.RowMutations)10 Cell (org.apache.hadoop.hbase.Cell)9 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ArrayList (java.util.ArrayList)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)5 List (java.util.List)4