Search in sources :

Example 31 with Append

use of org.apache.hadoop.hbase.client.Append in project phoenix by apache.

the class Sequence method newReturn.

private Append newReturn(SequenceValue value) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    byte[] opBuf = new byte[] { (byte) MetaOp.RETURN_SEQUENCE.ordinal() };
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, opBuf);
    append.setAttribute(SequenceRegionObserver.CURRENT_VALUE_ATTRIB, PLong.INSTANCE.toBytes(value.nextValue));
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList((Cell) KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PLong.INSTANCE.toBytes(value.currentValue)), (Cell) KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PBoolean.INSTANCE.toBytes(value.limitReached))));
    return append;
}
Also used : Append(org.apache.hadoop.hbase.client.Append) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell)

Example 32 with Append

use of org.apache.hadoop.hbase.client.Append in project phoenix by apache.

the class Sequence method dropSequence.

public Append dropSequence(long timestamp) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] { (byte) MetaOp.DROP_SEQUENCE.ordinal() });
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList((Cell) KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
    return append;
}
Also used : Append(org.apache.hadoop.hbase.client.Append) List(java.util.List) Cell(org.apache.hadoop.hbase.Cell)

Example 33 with Append

use of org.apache.hadoop.hbase.client.Append in project phoenix by apache.

the class Sequence method createSequence.

public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) {
    byte[] key = this.key.getKey();
    Append append = new Append(key);
    append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] { (byte) MetaOp.CREATE_SEQUENCE.ordinal() });
    if (timestamp != HConstants.LATEST_TIMESTAMP) {
        append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
    }
    Map<byte[], List<Cell>> familyMap = append.getFamilyCellMap();
    byte[] startWithBuf = PLong.INSTANCE.toBytes(startWith);
    familyMap.put(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, Arrays.<Cell>asList(KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.INCREMENT_BY_BYTES, timestamp, PLong.INSTANCE.toBytes(incrementBy)), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CACHE_SIZE_BYTES, timestamp, PLong.INSTANCE.toBytes(cacheSize)), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MIN_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(minValue)), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.MAX_VALUE_BYTES, timestamp, PLong.INSTANCE.toBytes(maxValue)), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CYCLE_FLAG_BYTES, timestamp, PBoolean.INSTANCE.toBytes(cycle)), KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, timestamp, PDataType.FALSE_BYTES)));
    return append;
}
Also used : Append(org.apache.hadoop.hbase.client.Append) List(java.util.List)

Example 34 with Append

use of org.apache.hadoop.hbase.client.Append in project phoenix by apache.

the class ConnectionQueryServicesImpl method dropSequence.

@Override
public long dropSequence(String tenantId, String schemaName, String sequenceName, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.dropSequence(timestamp);
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        try {
            Result result = htable.append(append);
            return sequence.dropSequence(result);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result)

Example 35 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class HRegion method doMiniBatchMutate.

/**
 * Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[])}
 * In here we also handle replay of edits on region recover. Also gets change in size brought
 * about by applying {@code batchOp}.
 */
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
    boolean success = false;
    WALEdit walEdit = null;
    WriteEntry writeEntry = null;
    boolean locked = false;
    // We try to set up a batch in the range [batchOp.nextIndexToProcess,lastIndexExclusive)
    MiniBatchOperationInProgress<Mutation> miniBatchOp = null;
    /**
     * Keep track of the locks we hold so we can release them in finally clause
     */
    List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size());
    // Check for thread interrupt status in case we have been signaled from
    // #interruptRegionOperation.
    checkInterrupt();
    try {
        // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with
        // locked rows
        miniBatchOp = batchOp.lockRowsAndBuildMiniBatch(acquiredRowLocks);
        // Ensure we acquire at least one.
        if (miniBatchOp.getReadyToWriteCount() <= 0) {
            // NoSuchColumnFamily?
            return;
        }
        // Check for thread interrupt status in case we have been signaled from
        // #interruptRegionOperation. Do it before we take the lock and disable interrupts for
        // the WAL append.
        checkInterrupt();
        lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount());
        locked = true;
        // From this point until memstore update this operation should not be interrupted.
        disableInterrupts();
        // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp
        // We should record the timestamp only after we have acquired the rowLock,
        // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer
        // timestamp
        long now = EnvironmentEdgeManager.currentTime();
        batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks);
        // STEP 3. Build WAL edit
        List<Pair<NonceKey, WALEdit>> walEdits = batchOp.buildWALEdits(miniBatchOp);
        for (Iterator<Pair<NonceKey, WALEdit>> it = walEdits.iterator(); it.hasNext(); ) {
            Pair<NonceKey, WALEdit> nonceKeyWALEditPair = it.next();
            walEdit = nonceKeyWALEditPair.getSecond();
            NonceKey nonceKey = nonceKeyWALEditPair.getFirst();
            if (walEdit != null && !walEdit.isEmpty()) {
                writeEntry = doWALAppend(walEdit, batchOp.durability, batchOp.getClusterIds(), now, nonceKey.getNonceGroup(), nonceKey.getNonce(), batchOp.getOrigLogSeqNum());
            }
            // Complete mvcc for all but last writeEntry (for replay case)
            if (it.hasNext() && writeEntry != null) {
                mvcc.complete(writeEntry);
                writeEntry = null;
            }
        }
        // STEP 5. Write back to memStore
        // NOTE: writeEntry can be null here
        writeEntry = batchOp.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry);
        // STEP 6. Complete MiniBatchOperations: If required calls postBatchMutate() CP hook and
        // complete mvcc for last writeEntry
        batchOp.completeMiniBatchOperations(miniBatchOp, writeEntry);
        writeEntry = null;
        success = true;
    } finally {
        // Call complete rather than completeAndWait because we probably had error if walKey != null
        if (writeEntry != null)
            mvcc.complete(writeEntry);
        if (locked) {
            this.updatesLock.readLock().unlock();
        }
        releaseRowLocks(acquiredRowLocks);
        enableInterrupts();
        final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size();
        final boolean finalSuccess = success;
        batchOp.visitBatchOperations(true, finalLastIndexExclusive, (int i) -> {
            Mutation mutation = batchOp.getMutation(i);
            if (mutation instanceof Increment || mutation instanceof Append) {
                if (finalSuccess) {
                    batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.SUCCESS, batchOp.results[i]);
                } else {
                    batchOp.retCodeDetails[i] = OperationStatus.FAILURE;
                }
            } else {
                batchOp.retCodeDetails[i] = finalSuccess ? OperationStatus.SUCCESS : OperationStatus.FAILURE;
            }
            return true;
        });
        batchOp.doPostOpCleanupForMiniBatch(miniBatchOp, walEdit, finalSuccess);
        batchOp.nextIndexToProcess = finalLastIndexExclusive;
    }
}
Also used : WriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) Append(org.apache.hadoop.hbase.client.Append) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) NonceKey(org.apache.hadoop.hbase.util.NonceKey) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) Pair(org.apache.hadoop.hbase.util.Pair)

Aggregations

Append (org.apache.hadoop.hbase.client.Append)62 Test (org.junit.Test)31 Result (org.apache.hadoop.hbase.client.Result)26 Increment (org.apache.hadoop.hbase.client.Increment)25 Put (org.apache.hadoop.hbase.client.Put)23 IOException (java.io.IOException)17 Get (org.apache.hadoop.hbase.client.Get)17 Delete (org.apache.hadoop.hbase.client.Delete)16 Table (org.apache.hadoop.hbase.client.Table)15 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 TableName (org.apache.hadoop.hbase.TableName)10 RowMutations (org.apache.hadoop.hbase.client.RowMutations)10 Cell (org.apache.hadoop.hbase.Cell)9 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ArrayList (java.util.ArrayList)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)5 List (java.util.List)4