Search in sources :

Example 21 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TableBasedReplicationQueuesImpl method safeQueueUpdate.

/**
   * See safeQueueUpdate(RowMutations mutate)
   *
   * @param delete Row mutation to perform on the queue
   */
private void safeQueueUpdate(Delete delete) throws ReplicationException, IOException {
    RowMutations mutations = new RowMutations(delete.getRow());
    mutations.add(delete);
    safeQueueUpdate(mutations);
}
Also used : RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 22 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project phoenix by apache.

the class UpgradeIT method removeBaseColumnCountKV.

private static void removeBaseColumnCountKV(String tenantId, String schemaName, String tableName) throws Exception {
    byte[] rowKey = SchemaUtil.getTableKey(tenantId == null ? new byte[0] : Bytes.toBytes(tenantId), schemaName == null ? new byte[0] : Bytes.toBytes(schemaName), Bytes.toBytes(tableName));
    Put viewColumnDefinitionPut = new Put(rowKey, HConstants.LATEST_TIMESTAMP);
    viewColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, HConstants.LATEST_TIMESTAMP, null);
    try (PhoenixConnection conn = (DriverManager.getConnection(getUrl())).unwrap(PhoenixConnection.class)) {
        try (HTableInterface htable = conn.getQueryServices().getTable(Bytes.toBytes(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME))) {
            RowMutations mutations = new RowMutations(rowKey);
            mutations.add(viewColumnDefinitionPut);
            htable.mutateRow(mutations);
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 23 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project gora by apache.

the class HBaseTableConnection method updateRow.

public void updateRow(byte[] keyRaw, Mutation put, Mutation delete) throws IOException {
    if (autoFlush) {
        Table tableInstance = getTable();
        if (put.size() > 0) {
            if (delete.size() > 0) {
                RowMutations update = new RowMutations(keyRaw);
                update.add(delete);
                update.add(put);
                tableInstance.mutateRow(update);
            } else {
                tableInstance.put((Put) put);
            }
        } else {
            if (delete.size() > 0) {
                tableInstance.delete((Delete) delete);
            }
        }
    } else {
        if (delete.size() > 0) {
            buffer.add(delete);
        }
        if (put.size() > 0) {
            buffer.add(put);
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 24 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class TableOperationSpanBuilder method unpackRowOperations.

private static Set<Operation> unpackRowOperations(final Row row) {
    final Set<Operation> ops = new HashSet<>();
    if (row instanceof CheckAndMutate) {
        final CheckAndMutate cam = (CheckAndMutate) row;
        ops.addAll(unpackRowOperations(cam));
    }
    if (row instanceof RowMutations) {
        final RowMutations mutations = (RowMutations) row;
        final List<Operation> operations = mutations.getMutations().stream().map(TableOperationSpanBuilder::valueFrom).collect(Collectors.toList());
        ops.addAll(operations);
    }
    return ops;
}
Also used : CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) Operation(org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.Operation) HashSet(java.util.HashSet) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 25 with RowMutations

use of org.apache.hadoop.hbase.client.RowMutations in project hbase by apache.

the class HRegion method checkAndMutateInternal.

private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutate, long nonceGroup, long nonce) throws IOException {
    byte[] row = checkAndMutate.getRow();
    Filter filter = null;
    byte[] family = null;
    byte[] qualifier = null;
    CompareOperator op = null;
    ByteArrayComparable comparator = null;
    if (checkAndMutate.hasFilter()) {
        filter = checkAndMutate.getFilter();
    } else {
        family = checkAndMutate.getFamily();
        qualifier = checkAndMutate.getQualifier();
        op = checkAndMutate.getCompareOp();
        comparator = new BinaryComparator(checkAndMutate.getValue());
    }
    TimeRange timeRange = checkAndMutate.getTimeRange();
    Mutation mutation = null;
    RowMutations rowMutations = null;
    if (checkAndMutate.getAction() instanceof Mutation) {
        mutation = (Mutation) checkAndMutate.getAction();
    } else {
        rowMutations = (RowMutations) checkAndMutate.getAction();
    }
    if (mutation != null) {
        checkMutationType(mutation);
        checkRow(mutation, row);
    } else {
        checkRow(rowMutations, row);
    }
    checkReadOnly();
    // TODO, add check for value length also move this check to the client
    checkResources();
    startRegionOperation();
    try {
        Get get = new Get(row);
        if (family != null) {
            checkFamily(family);
            get.addColumn(family, qualifier);
        }
        if (filter != null) {
            get.setFilter(filter);
        }
        if (timeRange != null) {
            get.setTimeRange(timeRange.getMin(), timeRange.getMax());
        }
        // Lock row - note that doBatchMutate will relock this row if called
        checkRow(row, "doCheckAndRowMutate");
        RowLock rowLock = getRowLock(get.getRow(), false, null);
        try {
            if (this.getCoprocessorHost() != null) {
                CheckAndMutateResult result = getCoprocessorHost().preCheckAndMutateAfterRowLock(checkAndMutate);
                if (result != null) {
                    return result;
                }
            }
            // NOTE: We used to wait here until mvcc caught up: mvcc.await();
            // Supposition is that now all changes are done under row locks, then when we go to read,
            // we'll get the latest on this row.
            boolean matches = false;
            long cellTs = 0;
            try (RegionScanner scanner = getScanner(new Scan(get))) {
                // NOTE: Please don't use HRegion.get() instead,
                // because it will copy cells to heap. See HBASE-26036
                List<Cell> result = new ArrayList<>(1);
                scanner.next(result);
                if (filter != null) {
                    if (!result.isEmpty()) {
                        matches = true;
                        cellTs = result.get(0).getTimestamp();
                    }
                } else {
                    boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
                    if (result.isEmpty() && valueIsNull) {
                        matches = op != CompareOperator.NOT_EQUAL;
                    } else if (result.size() > 0 && valueIsNull) {
                        matches = (result.get(0).getValueLength() == 0) == (op != CompareOperator.NOT_EQUAL);
                        cellTs = result.get(0).getTimestamp();
                    } else if (result.size() == 1) {
                        Cell kv = result.get(0);
                        cellTs = kv.getTimestamp();
                        int compareResult = PrivateCellUtil.compareValue(kv, comparator);
                        matches = matches(op, compareResult);
                    }
                }
            }
            // If matches, perform the mutation or the rowMutations
            if (matches) {
                // We have acquired the row lock already. If the system clock is NOT monotonically
                // non-decreasing (see HBASE-14070) we should make sure that the mutation has a
                // larger timestamp than what was observed via Get. doBatchMutate already does this, but
                // there is no way to pass the cellTs. See HBASE-14054.
                long now = EnvironmentEdgeManager.currentTime();
                // ensure write is not eclipsed
                long ts = Math.max(now, cellTs);
                byte[] byteTs = Bytes.toBytes(ts);
                if (mutation != null) {
                    if (mutation instanceof Put) {
                        updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs);
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                } else {
                    for (Mutation m : rowMutations.getMutations()) {
                        if (m instanceof Put) {
                            updateCellTimestamps(m.getFamilyCellMap().values(), byteTs);
                        }
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                }
                // All edits for the given row (across all column families) must happen atomically.
                Result r;
                if (mutation != null) {
                    r = mutate(mutation, true, nonceGroup, nonce).getResult();
                } else {
                    r = mutateRow(rowMutations, nonceGroup, nonce);
                }
                this.checkAndMutateChecksPassed.increment();
                return new CheckAndMutateResult(true, r);
            }
            this.checkAndMutateChecksFailed.increment();
            return new CheckAndMutateResult(false, null);
        } finally {
            rowLock.release();
        }
    } finally {
        closeRegionOperation();
    }
}
Also used : CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) CompareOperator(org.apache.hadoop.hbase.CompareOperator) TimeRange(org.apache.hadoop.hbase.io.TimeRange) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) Filter(org.apache.hadoop.hbase.filter.Filter) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Aggregations

RowMutations (org.apache.hadoop.hbase.client.RowMutations)36 Put (org.apache.hadoop.hbase.client.Put)28 Test (org.junit.Test)19 Delete (org.apache.hadoop.hbase.client.Delete)18 Get (org.apache.hadoop.hbase.client.Get)18 Result (org.apache.hadoop.hbase.client.Result)14 Append (org.apache.hadoop.hbase.client.Append)10 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)10 Increment (org.apache.hadoop.hbase.client.Increment)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 Table (org.apache.hadoop.hbase.client.Table)8 TableName (org.apache.hadoop.hbase.TableName)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)4 Action (org.apache.hadoop.hbase.client.Action)3 BinaryComparator (org.apache.hadoop.hbase.filter.BinaryComparator)3 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)3 IOException (java.io.IOException)2