Search in sources :

Example 41 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class RSRpcServices method increment.

/**
   * Execute an increment mutation.
   *
   * @param region
   * @param mutation
   * @return the Result
   * @throws IOException
   */
private Result increment(final Region region, final OperationQuota quota, final MutationProto mutation, final CellScanner cells, long nonceGroup) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    Increment increment = ProtobufUtil.toIncrement(mutation, cells);
    quota.addMutation(increment);
    Result r = null;
    if (region.getCoprocessorHost() != null) {
        r = region.getCoprocessorHost().preIncrement(increment);
    }
    if (r == null) {
        boolean canProceed = startNonceOperation(mutation, nonceGroup);
        boolean success = false;
        try {
            long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
            if (canProceed) {
                r = region.increment(increment, nonceGroup, nonce);
            } else {
                // convert duplicate increment to get
                List<Cell> results = region.get(ProtobufUtil.toGet(mutation, cells), false, nonceGroup, nonce);
                r = Result.create(results);
            }
            success = true;
        } finally {
            if (canProceed) {
                endNonceOperation(mutation, nonceGroup, success);
            }
        }
        if (region.getCoprocessorHost() != null) {
            r = region.getCoprocessorHost().postIncrement(increment, r);
        }
    }
    if (regionServer.metricsRegionServer != null) {
        regionServer.metricsRegionServer.updateIncrement(EnvironmentEdgeManager.currentTime() - before);
    }
    return r;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) Cell(org.apache.hadoop.hbase.Cell) ByteBufferCell(org.apache.hadoop.hbase.ByteBufferCell) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result)

Example 42 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class HRegion method reckonDeltasByStore.

/**
   * Reckon the Cells to apply to WAL, memstore, and to return to the Client in passed
   * column family/Store.
   *
   * Does Get of current value and then adds passed in deltas for this Store returning the result.
   *
   * @param op Whether Increment or Append
   * @param mutation The encompassing Mutation object
   * @param deltas Changes to apply to this Store; either increment amount or data to append
   * @param results In here we accumulate all the Cells we are to return to the client; this List
   *  can be larger than what we return in case where delta is zero; i.e. don't write
   *  out new values, just return current value. If null, client doesn't want results returned.
   * @return Resulting Cells after <code>deltas</code> have been applied to current
   *  values. Side effect is our filling out of the <code>results</code> List.
   */
private List<Cell> reckonDeltasByStore(final Store store, final Operation op, final Mutation mutation, final Durability effectiveDurability, final long now, final List<Cell> deltas, final List<Cell> results) throws IOException {
    byte[] columnFamily = store.getFamily().getName();
    List<Cell> toApply = new ArrayList<>(deltas.size());
    // Get previous values for all columns in this family.
    List<Cell> currentValues = get(mutation, store, deltas, null, /*Default IsolationLevel*/
    op == Operation.INCREMENT ? ((Increment) mutation).getTimeRange() : null);
    // Iterate the input columns and update existing values if they were found, otherwise
    // add new column initialized to the delta amount
    int currentValuesIndex = 0;
    for (int i = 0; i < deltas.size(); i++) {
        Cell delta = deltas.get(i);
        Cell currentValue = null;
        boolean firstWrite = false;
        if (currentValuesIndex < currentValues.size() && CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta)) {
            currentValue = currentValues.get(currentValuesIndex);
            if (i < (deltas.size() - 1) && !CellUtil.matchingQualifier(delta, deltas.get(i + 1))) {
                currentValuesIndex++;
            }
        } else {
            firstWrite = true;
        }
        // Switch on whether this an increment or an append building the new Cell to apply.
        Cell newCell = null;
        MutationType mutationType = null;
        boolean apply = true;
        switch(op) {
            case INCREMENT:
                mutationType = MutationType.INCREMENT;
                // If delta amount to apply is 0, don't write WAL or MemStore.
                long deltaAmount = getLongValue(delta);
                apply = deltaAmount != 0;
                newCell = reckonIncrement(delta, deltaAmount, currentValue, columnFamily, now, (Increment) mutation);
                break;
            case APPEND:
                mutationType = MutationType.APPEND;
                // Always apply Append. TODO: Does empty delta value mean reset Cell? It seems to.
                newCell = reckonAppend(delta, currentValue, now, (Append) mutation);
                break;
            default:
                throw new UnsupportedOperationException(op.toString());
        }
        // Give coprocessors a chance to update the new cell
        if (coprocessorHost != null) {
            newCell = coprocessorHost.postMutationBeforeWAL(mutationType, mutation, currentValue, newCell);
        }
        // If apply, we need to update memstore/WAL with new value; add it toApply.
        if (apply || firstWrite) {
            toApply.add(newCell);
        }
        // Add to results to get returned to the Client. If null, cilent does not want results.
        if (results != null) {
            results.add(newCell);
        }
    }
    return toApply;
}
Also used : MutationType(org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell)

Example 43 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class RequestConverter method buildNoDataRegionAction.

/**
   * Create a protocol buffer multirequest with NO data for a list of actions (data is carried
   * otherwise than via protobuf).  This means it just notes attributes, whether to write the
   * WAL, etc., and the presence in protobuf serves as place holder for the data which is
   * coming along otherwise.  Note that Get is different.  It does not contain 'data' and is always
   * carried by protobuf.  We return references to the data by adding them to the passed in
   * <code>data</code> param.
   *
   * <p>Propagates Actions original index.
   *
   * @param regionName
   * @param actions
   * @param cells Place to stuff references to actual data.
   * @return a multi request that does not carry any data.
   * @throws IOException
   */
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            Put p = (Put) row;
            cells.add(p);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, p, mutationBuilder)));
        } else if (row instanceof Delete) {
            Delete d = (Delete) row;
            int size = d.size();
            // metadata only in the pb and then send the kv along the side in cells.
            if (size > 0) {
                cells.add(d);
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, d, mutationBuilder)));
            } else {
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, d, mutationBuilder)));
            }
        } else if (row instanceof Append) {
            Append a = (Append) row;
            cells.add(a);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.APPEND, a, mutationBuilder, action.getNonce())));
        } else if (row instanceof Increment) {
            Increment i = (Increment) row;
            cells.add(i);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.INCREMENT, i, mutationBuilder, action.getNonce())));
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    return builder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 44 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class ProtobufUtil method toIncrement.

/**
   * Convert a protocol buffer Mutate to an Increment
   *
   * @param proto the protocol buffer Mutate to convert
   * @return the converted client Increment
   * @throws IOException
   */
public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.INCREMENT : type.name();
    byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
    Increment increment = null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + TextFormat.shortDebugString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + TextFormat.shortDebugString(proto));
            }
            Cell cell = cellScanner.current();
            if (increment == null) {
                increment = new Increment(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
            }
            increment.add(cell);
        }
    } else {
        increment = new Increment(row);
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                byte[] qualifier = qv.getQualifier().toByteArray();
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                byte[] value = qv.getValue().toByteArray();
                byte[] tags = null;
                if (qv.hasTags()) {
                    tags = qv.getTags().toByteArray();
                }
                increment.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
            }
        }
    }
    if (proto.hasTimeRange()) {
        TimeRange timeRange = protoToTimeRange(proto.getTimeRange());
        increment.setTimeRange(timeRange.getMin(), timeRange.getMax());
    }
    increment.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        increment.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return increment;
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) MutationType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Increment(org.apache.hadoop.hbase.client.Increment) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Cell(org.apache.hadoop.hbase.Cell)

Example 45 with Increment

use of org.apache.hadoop.hbase.client.Increment in project phoenix by apache.

the class Sequence method newIncrement.

public Increment newIncrement(long timestamp, Sequence.ValueOp action, long numToAllocate) {
    byte[] incKey = key.getKey();
    byte[] incValue = Bytes.toBytes((long) action.ordinal());
    Increment inc = new Increment(incKey);
    // are returned with their current value
    try {
        inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp);
        inc.setAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE, Bytes.toBytes(numToAllocate));
    } catch (IOException e) {
        // Impossible
        throw new RuntimeException(e);
    }
    for (KeyValue kv : SEQUENCE_KV_COLUMNS) {
        try {
            // Store the timestamp on the cell as well as HBase 1.2 seems to not
            // be serializing over the time range (see HBASE-15698).
            Cell cell = new KeyValue(incKey, 0, incKey.length, kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), timestamp, KeyValue.Type.Put, incValue, 0, incValue.length);
            inc.add(cell);
        } catch (IOException e) {
            // Impossible
            throw new RuntimeException(e);
        }
    }
    return inc;
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) Increment(org.apache.hadoop.hbase.client.Increment) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5