Search in sources :

Example 26 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class HRegion method reckonDeltasByStore.

/**
   * Reckon the Cells to apply to WAL, memstore, and to return to the Client in passed
   * column family/Store.
   *
   * Does Get of current value and then adds passed in deltas for this Store returning the result.
   *
   * @param op Whether Increment or Append
   * @param mutation The encompassing Mutation object
   * @param deltas Changes to apply to this Store; either increment amount or data to append
   * @param results In here we accumulate all the Cells we are to return to the client; this List
   *  can be larger than what we return in case where delta is zero; i.e. don't write
   *  out new values, just return current value. If null, client doesn't want results returned.
   * @return Resulting Cells after <code>deltas</code> have been applied to current
   *  values. Side effect is our filling out of the <code>results</code> List.
   */
private List<Cell> reckonDeltasByStore(final Store store, final Operation op, final Mutation mutation, final Durability effectiveDurability, final long now, final List<Cell> deltas, final List<Cell> results) throws IOException {
    byte[] columnFamily = store.getFamily().getName();
    List<Cell> toApply = new ArrayList<>(deltas.size());
    // Get previous values for all columns in this family.
    List<Cell> currentValues = get(mutation, store, deltas, null, /*Default IsolationLevel*/
    op == Operation.INCREMENT ? ((Increment) mutation).getTimeRange() : null);
    // Iterate the input columns and update existing values if they were found, otherwise
    // add new column initialized to the delta amount
    int currentValuesIndex = 0;
    for (int i = 0; i < deltas.size(); i++) {
        Cell delta = deltas.get(i);
        Cell currentValue = null;
        boolean firstWrite = false;
        if (currentValuesIndex < currentValues.size() && CellUtil.matchingQualifier(currentValues.get(currentValuesIndex), delta)) {
            currentValue = currentValues.get(currentValuesIndex);
            if (i < (deltas.size() - 1) && !CellUtil.matchingQualifier(delta, deltas.get(i + 1))) {
                currentValuesIndex++;
            }
        } else {
            firstWrite = true;
        }
        // Switch on whether this an increment or an append building the new Cell to apply.
        Cell newCell = null;
        MutationType mutationType = null;
        boolean apply = true;
        switch(op) {
            case INCREMENT:
                mutationType = MutationType.INCREMENT;
                // If delta amount to apply is 0, don't write WAL or MemStore.
                long deltaAmount = getLongValue(delta);
                apply = deltaAmount != 0;
                newCell = reckonIncrement(delta, deltaAmount, currentValue, columnFamily, now, (Increment) mutation);
                break;
            case APPEND:
                mutationType = MutationType.APPEND;
                // Always apply Append. TODO: Does empty delta value mean reset Cell? It seems to.
                newCell = reckonAppend(delta, currentValue, now, (Append) mutation);
                break;
            default:
                throw new UnsupportedOperationException(op.toString());
        }
        // Give coprocessors a chance to update the new cell
        if (coprocessorHost != null) {
            newCell = coprocessorHost.postMutationBeforeWAL(mutationType, mutation, currentValue, newCell);
        }
        // If apply, we need to update memstore/WAL with new value; add it toApply.
        if (apply || firstWrite) {
            toApply.add(newCell);
        }
        // Add to results to get returned to the Client. If null, cilent does not want results.
        if (results != null) {
            results.add(newCell);
        }
    }
    return toApply;
}
Also used : MutationType(org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell)

Example 27 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestProtobufUtil method testAppend.

/**
   * Test Append Mutate conversions.
   *
   * @throws IOException
   */
@Test
public void testAppend() throws IOException {
    long timeStamp = 111111;
    MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
    mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
    mutateBuilder.setMutateType(MutationType.APPEND);
    mutateBuilder.setTimestamp(timeStamp);
    ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
    valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
    QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
    qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
    qualifierBuilder.setValue(ByteString.copyFromUtf8("v1"));
    qualifierBuilder.setTimestamp(timeStamp);
    valueBuilder.addQualifierValue(qualifierBuilder.build());
    qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
    qualifierBuilder.setValue(ByteString.copyFromUtf8("v2"));
    valueBuilder.addQualifierValue(qualifierBuilder.build());
    qualifierBuilder.setTimestamp(timeStamp);
    mutateBuilder.addColumnValue(valueBuilder.build());
    MutationProto proto = mutateBuilder.build();
    // default fields
    assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
    // set the default value for equal comparison
    mutateBuilder = MutationProto.newBuilder(proto);
    mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
    Append append = ProtobufUtil.toAppend(proto, null);
    // append always use the latest timestamp,
    // reset the timestamp to the original mutate
    mutateBuilder.setTimestamp(append.getTimeStamp());
    assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.APPEND, append));
}
Also used : Append(org.apache.hadoop.hbase.client.Append) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) Test(org.junit.Test)

Example 28 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class RequestConverter method buildNoDataRegionAction.

/**
   * Create a protocol buffer multirequest with NO data for a list of actions (data is carried
   * otherwise than via protobuf).  This means it just notes attributes, whether to write the
   * WAL, etc., and the presence in protobuf serves as place holder for the data which is
   * coming along otherwise.  Note that Get is different.  It does not contain 'data' and is always
   * carried by protobuf.  We return references to the data by adding them to the passed in
   * <code>data</code> param.
   *
   * <p>Propagates Actions original index.
   *
   * @param regionName
   * @param actions
   * @param cells Place to stuff references to actual data.
   * @return a multi request that does not carry any data.
   * @throws IOException
   */
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName, final Iterable<Action> actions, final List<CellScannable> cells, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(regionActionBuilder, regionName);
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            Put p = (Put) row;
            cells.add(p);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, p, mutationBuilder)));
        } else if (row instanceof Delete) {
            Delete d = (Delete) row;
            int size = d.size();
            // metadata only in the pb and then send the kv along the side in cells.
            if (size > 0) {
                cells.add(d);
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, d, mutationBuilder)));
            } else {
                builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, d, mutationBuilder)));
            }
        } else if (row instanceof Append) {
            Append a = (Append) row;
            cells.add(a);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.APPEND, a, mutationBuilder, action.getNonce())));
        } else if (row instanceof Increment) {
            Increment i = (Increment) row;
            cells.add(i);
            builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData(MutationType.INCREMENT, i, mutationBuilder, action.getNonce())));
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            builder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    return builder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 29 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class ProtobufUtil method toAppend.

/**
   * Convert a protocol buffer Mutate to an Append
   * @param cellScanner
   * @param proto the protocol buffer Mutate to convert
   * @return the converted client Append
   * @throws IOException
   */
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) throws IOException {
    MutationType type = proto.getMutateType();
    assert type == MutationType.APPEND : type.name();
    byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null;
    Append append = null;
    int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0;
    if (cellCount > 0) {
        // The proto has metadata only and the data is separate to be found in the cellScanner.
        if (cellScanner == null) {
            throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto));
        }
        for (int i = 0; i < cellCount; i++) {
            if (!cellScanner.advance()) {
                throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + " no cell returned: " + toShortString(proto));
            }
            Cell cell = cellScanner.current();
            if (append == null) {
                append = new Append(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength());
            }
            append.add(cell);
        }
    } else {
        append = new Append(row);
        for (ColumnValue column : proto.getColumnValueList()) {
            byte[] family = column.getFamily().toByteArray();
            for (QualifierValue qv : column.getQualifierValueList()) {
                byte[] qualifier = qv.getQualifier().toByteArray();
                if (!qv.hasValue()) {
                    throw new DoNotRetryIOException("Missing required field: qualifier value");
                }
                byte[] value = qv.getValue().toByteArray();
                byte[] tags = null;
                if (qv.hasTags()) {
                    tags = qv.getTags().toByteArray();
                }
                append.add(CellUtil.createCell(row, family, qualifier, qv.getTimestamp(), KeyValue.Type.Put, value, tags));
            }
        }
    }
    append.setDurability(toDurability(proto.getDurability()));
    for (NameBytesPair attribute : proto.getAttributeList()) {
        append.setAttribute(attribute.getName(), attribute.getValue().toByteArray());
    }
    return append;
}
Also used : MutationType(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType) Append(org.apache.hadoop.hbase.client.Append) NameBytesPair(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) Cell(org.apache.hadoop.hbase.Cell)

Example 30 with Append

use of org.apache.hadoop.hbase.client.Append in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnSequences.

@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
    for (SequenceKey key : keys) {
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, attempt to return the unused sequence values
        List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                Append append = sequence.newReturn(timestamp);
                toReturnList.add(sequence);
                mutations.add(append);
            } catch (EmptySequenceCacheException ignore) {
            // Nothing to return, so ignore
            }
        }
        if (toReturnList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(mutations);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toReturnList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                sequence.returnValue(result);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

Append (org.apache.hadoop.hbase.client.Append)62 Test (org.junit.Test)31 Result (org.apache.hadoop.hbase.client.Result)26 Increment (org.apache.hadoop.hbase.client.Increment)25 Put (org.apache.hadoop.hbase.client.Put)23 IOException (java.io.IOException)17 Get (org.apache.hadoop.hbase.client.Get)17 Delete (org.apache.hadoop.hbase.client.Delete)16 Table (org.apache.hadoop.hbase.client.Table)15 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 TableName (org.apache.hadoop.hbase.TableName)10 RowMutations (org.apache.hadoop.hbase.client.RowMutations)10 Cell (org.apache.hadoop.hbase.Cell)9 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ArrayList (java.util.ArrayList)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)5 List (java.util.List)4