Search in sources :

Example 6 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class VisibilityController method preBatchMutate.

@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) {
        return;
    }
    // TODO this can be made as a global LRU cache at HRS level?
    Map<String, List<Tag>> labelCache = new HashMap<>();
    for (int i = 0; i < miniBatchOp.size(); i++) {
        Mutation m = miniBatchOp.getOperation(i);
        CellVisibility cellVisibility = null;
        try {
            cellVisibility = m.getCellVisibility();
        } catch (DeserializationException de) {
            miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, de.getMessage()));
            continue;
        }
        boolean sanityFailure = false;
        boolean modifiedTagFound = false;
        Pair<Boolean, Tag> pair = new Pair<>(false, null);
        for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) {
            pair = checkForReservedVisibilityTagPresence(cellScanner.current(), pair);
            if (!pair.getFirst()) {
                // Don't disallow reserved tags if authorization is disabled
                if (authorizationEnabled) {
                    miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, "Mutation contains cell with reserved type tag"));
                    sanityFailure = true;
                }
                break;
            } else {
                // Indicates that the cell has a the tag which was modified in the src replication cluster
                Tag tag = pair.getSecond();
                if (cellVisibility == null && tag != null) {
                    // May need to store only the first one
                    cellVisibility = new CellVisibility(TagUtil.getValueAsString(tag));
                    modifiedTagFound = true;
                }
            }
        }
        if (!sanityFailure) {
            if (cellVisibility != null) {
                String labelsExp = cellVisibility.getExpression();
                List<Tag> visibilityTags = labelCache.get(labelsExp);
                if (visibilityTags == null) {
                    // Don't check user auths for labels with Mutations when the user is super user
                    boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser());
                    try {
                        visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, authCheck);
                    } catch (InvalidLabelException e) {
                        miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, e.getMessage()));
                    }
                    if (visibilityTags != null) {
                        labelCache.put(labelsExp, visibilityTags);
                    }
                }
                if (visibilityTags != null) {
                    List<Cell> updatedCells = new ArrayList<>();
                    for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) {
                        Cell cell = cellScanner.current();
                        List<Tag> tags = CellUtil.getTags(cell);
                        if (modifiedTagFound) {
                            // Rewrite the tags by removing the modified tags.
                            removeReplicationVisibilityTag(tags);
                        }
                        tags.addAll(visibilityTags);
                        Cell updatedCell = CellUtil.createCell(cell, tags);
                        updatedCells.add(updatedCell);
                    }
                    m.getFamilyCellMap().clear();
                    // Clear and add new Cells to the Mutation.
                    for (Cell cell : updatedCells) {
                        if (m instanceof Put) {
                            Put p = (Put) m;
                            p.add(cell);
                        } else if (m instanceof Delete) {
                            Delete d = (Delete) m;
                            d.addDeleteMarker(cell);
                        }
                    }
                }
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) CellScanner(org.apache.hadoop.hbase.CellScanner) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) DeserializationException(org.apache.hadoop.hbase.exceptions.DeserializationException) Put(org.apache.hadoop.hbase.client.Put) OperationStatus(org.apache.hadoop.hbase.regionserver.OperationStatus) List(java.util.List) FilterList(org.apache.hadoop.hbase.filter.FilterList) ArrayList(java.util.ArrayList) Mutation(org.apache.hadoop.hbase.client.Mutation) Tag(org.apache.hadoop.hbase.Tag) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair)

Example 7 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class RSRpcServices method replay.

/**
   * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
   * that the given mutations will be durable on the receiving RS if this method returns without any
   * exception.
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
    long before = EnvironmentEdgeManager.currentTime();
    CellScanner cells = ((HBaseRpcController) controller).cellScanner();
    try {
        checkOpen();
        List<WALEntry> entries = request.getEntryList();
        if (entries == null || entries.isEmpty()) {
            // empty input
            return ReplicateWALEntryResponse.newBuilder().build();
        }
        ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
        Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8());
        RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : // do not invoke coprocessors if this is a secondary region replica
        null;
        List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
        // Skip adding the edits to WAL if this is a secondary region replica
        boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
        Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
        for (WALEntry entry : entries) {
            if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
                throw new NotServingRegionException("Replay request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
            }
            if (regionServer.nonceManager != null && isPrimary) {
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                regionServer.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
            }
            Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
            List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry, cells, walEntry, durability);
            if (coprocessorHost != null) {
                // KeyValue.
                if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), walEntry.getSecond())) {
                    // if bypass this log entry, ignore it ...
                    continue;
                }
                walEntries.add(walEntry);
            }
            if (edits != null && !edits.isEmpty()) {
                long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
                OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
                // check if it's a partial success
                for (int i = 0; result != null && i < result.length; i++) {
                    if (result[i] != OperationStatus.SUCCESS) {
                        throw new IOException(result[i].getExceptionMsg());
                    }
                }
            }
        }
        //sync wal at the end because ASYNC_WAL is used above
        WAL wal = getWAL(region);
        if (wal != null) {
            wal.sync();
        }
        if (coprocessorHost != null) {
            for (Pair<WALKey, WALEdit> entry : walEntries) {
                coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), entry.getSecond());
            }
        }
        return ReplicateWALEntryResponse.newBuilder().build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            regionServer.metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before);
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Pair(org.apache.hadoop.hbase.util.Pair) NameInt64Pair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) Durability(org.apache.hadoop.hbase.client.Durability) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 8 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class RSRpcServices method multi.

/**
   * Execute multiple actions on a table: get, mutate, and/or execCoprocessor
   *
   * @param rpcc the RPC controller
   * @param request the multi request
   * @throws ServiceException
   */
@Override
public MultiResponse multi(final RpcController rpcc, final MultiRequest request) throws ServiceException {
    try {
        checkOpen();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    if (controller != null) {
        controller.setCellScanner(null);
    }
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    // this will contain all the cells that we need to return. It's created later, if needed.
    List<CellScannable> cellsToReturn = null;
    MultiResponse.Builder responseBuilder = MultiResponse.newBuilder();
    RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder();
    Boolean processed = null;
    RegionScannersCloseCallBack closeCallBack = null;
    RpcCallContext context = RpcServer.getCurrentCall();
    this.rpcMultiRequestCount.increment();
    Map<RegionSpecifier, ClientProtos.RegionLoadStats> regionStats = new HashMap<>(request.getRegionActionCount());
    for (RegionAction regionAction : request.getRegionActionList()) {
        this.requestCount.add(regionAction.getActionCount());
        OperationQuota quota;
        Region region;
        regionActionResultBuilder.clear();
        RegionSpecifier regionSpecifier = regionAction.getRegion();
        try {
            region = getRegion(regionSpecifier);
            quota = getQuotaManager().checkQuota(region, regionAction.getActionList());
        } catch (IOException e) {
            rpcServer.getMetrics().exception(e);
            regionActionResultBuilder.setException(ResponseConverter.buildException(e));
            responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
            // corresponding to these Mutations.
            if (cellScanner != null) {
                skipCellsForMutations(regionAction.getActionList(), cellScanner);
            }
            // For this region it's a failure.
            continue;
        }
        if (regionAction.hasAtomic() && regionAction.getAtomic()) {
            // Need to return an item per Action along w/ Action index.  TODO.
            try {
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    processed = checkAndRowMutate(region, regionAction.getActionList(), cellScanner, row, family, qualifier, compareOp, comparator, regionActionResultBuilder);
                } else {
                    mutateRows(region, regionAction.getActionList(), cellScanner, regionActionResultBuilder);
                    processed = Boolean.TRUE;
                }
            } catch (IOException e) {
                rpcServer.getMetrics().exception(e);
                // As it's atomic, we may expect it's a global failure.
                regionActionResultBuilder.setException(ResponseConverter.buildException(e));
            }
        } else {
            // doNonAtomicRegionMutation manages the exception internally
            if (context != null && closeCallBack == null) {
                // An RpcCallBack that creates a list of scanners that needs to perform callBack
                // operation on completion of multiGets.
                // Set this only once
                closeCallBack = new RegionScannersCloseCallBack();
                context.setCallBack(closeCallBack);
            }
            cellsToReturn = doNonAtomicRegionMutation(region, quota, regionAction, cellScanner, regionActionResultBuilder, cellsToReturn, nonceGroup, closeCallBack, context);
        }
        responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
        quota.close();
        ClientProtos.RegionLoadStats regionLoadStats = ((HRegion) region).getLoadStatistics();
        if (regionLoadStats != null) {
            regionStats.put(regionSpecifier, regionLoadStats);
        }
    }
    // Load the controller with the Cells to return.
    if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
        controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn));
    }
    if (processed != null) {
        responseBuilder.setProcessed(processed);
    }
    MultiRegionLoadStats.Builder builder = MultiRegionLoadStats.newBuilder();
    for (Entry<RegionSpecifier, ClientProtos.RegionLoadStats> stat : regionStats.entrySet()) {
        builder.addRegion(stat.getKey());
        builder.addStat(stat.getValue());
    }
    responseBuilder.setRegionStatistics(builder);
    return responseBuilder.build();
}
Also used : CellScannable(org.apache.hadoop.hbase.CellScannable) MultiResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) CellScanner(org.apache.hadoop.hbase.CellScanner) RegionSpecifier(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) MultiRegionLoadStats(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Condition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) MultiRegionLoadStats(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 9 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class TestPutDeleteEtcCellIteration method testIncrementIteration.

@Test
public void testIncrementIteration() throws IOException {
    Increment increment = new Increment(ROW);
    for (int i = 0; i < COUNT; i++) {
        byte[] bytes = Bytes.toBytes(i);
        increment.addColumn(bytes, bytes, i);
    }
    int index = 0;
    for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance(); ) {
        Cell cell = cellScanner.current();
        int value = index;
        byte[] bytes = Bytes.toBytes(index++);
        KeyValue kv = (KeyValue) cell;
        assertTrue(Bytes.equals(CellUtil.cloneFamily(kv), bytes));
        long a = Bytes.toLong(CellUtil.cloneValue(kv));
        assertEquals(value, a);
    }
    assertEquals(COUNT, index);
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Example 10 with CellScanner

use of org.apache.hadoop.hbase.CellScanner in project hbase by apache.

the class TestFromClientSideNoCodec method testBasics.

@Test
public void testBasics() throws IOException {
    final TableName tableName = TableName.valueOf(name.getMethodName());
    final byte[][] fs = new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") };
    Table ht = TEST_UTIL.createTable(tableName, fs);
    // Check put and get.
    final byte[] row = Bytes.toBytes("row");
    Put p = new Put(row);
    for (byte[] f : fs) {
        p.addColumn(f, f, f);
    }
    ht.put(p);
    Result r = ht.get(new Get(row));
    int i = 0;
    for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance(); ) {
        Cell cell = cellScanner.current();
        byte[] f = fs[i++];
        assertTrue(Bytes.toString(f), Bytes.equals(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength(), f, 0, f.length));
    }
    // Check getRowOrBefore
    byte[] f = fs[0];
    Get get = new Get(row);
    get.addFamily(f);
    r = ht.get(get);
    assertTrue(r.toString(), r.containsColumn(f, f));
    // Check scan.
    ResultScanner scanner = ht.getScanner(new Scan());
    int count = 0;
    while ((r = scanner.next()) != null) {
        assertTrue(r.listCells().size() == 3);
        count++;
    }
    assertTrue(count == 1);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) CellScanner(org.apache.hadoop.hbase.CellScanner) Cell(org.apache.hadoop.hbase.Cell) Test(org.junit.Test)

Aggregations

CellScanner (org.apache.hadoop.hbase.CellScanner)82 Cell (org.apache.hadoop.hbase.Cell)69 Test (org.junit.Test)58 Result (org.apache.hadoop.hbase.client.Result)47 Scan (org.apache.hadoop.hbase.client.Scan)43 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)42 Table (org.apache.hadoop.hbase.client.Table)41 IOException (java.io.IOException)40 TableName (org.apache.hadoop.hbase.TableName)40 InterruptedIOException (java.io.InterruptedIOException)35 Connection (org.apache.hadoop.hbase.client.Connection)34 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)32 Delete (org.apache.hadoop.hbase.client.Delete)32 Put (org.apache.hadoop.hbase.client.Put)15 KeyValue (org.apache.hadoop.hbase.KeyValue)14 ArrayList (java.util.ArrayList)10 RetriesExhaustedWithDetailsException (org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException)7 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)5