Search in sources :

Example 1 with Sequence

use of org.apache.phoenix.schema.Sequence in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnAllSequences.

// Take no locks, as this only gets run when there are no open connections
// so there's no danger of contention.
@SuppressWarnings("deprecation")
private void returnAllSequences(ConcurrentMap<SequenceKey, Sequence> sequenceMap) throws SQLException {
    List<Append> mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size());
    for (Sequence sequence : sequenceMap.values()) {
        mutations.addAll(sequence.newReturns());
    }
    if (mutations.isEmpty()) {
        return;
    }
    HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
    SQLException sqlE = null;
    try {
        hTable.batch(mutations);
    } catch (IOException e) {
        sqlE = ServerUtil.parseServerException(e);
    } catch (InterruptedException e) {
        // restore the interrupt status
        Thread.currentThread().interrupt();
        sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
    } finally {
        try {
            hTable.close();
        } catch (IOException e) {
            if (sqlE == null) {
                sqlE = ServerUtil.parseServerException(e);
            } else {
                sqlE.setNextException(ServerUtil.parseServerException(e));
            }
        }
        if (sqlE != null) {
            throw sqlE;
        }
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 2 with Sequence

use of org.apache.phoenix.schema.Sequence in project phoenix by apache.

the class ConnectionQueryServicesImpl method createSequence.

@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        htable.setAutoFlush(true);
        try {
            Result result = htable.append(append);
            return sequence.createSequence(result, minValue, maxValue, cycle);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result)

Example 3 with Sequence

use of org.apache.phoenix.schema.Sequence in project phoenix by apache.

the class SequenceRegionObserver method preAppend.

/**
     * Override the preAppend for checkAndPut and checkAndDelete, as we need the ability to
     * a) set the TimeRange for the Get being done and
     * b) return something back to the client to indicate success/failure
     */
@SuppressWarnings("deprecation")
@Override
public Result preAppend(final ObserverContext<RegionCoprocessorEnvironment> e, final Append append) throws IOException {
    byte[] opBuf = append.getAttribute(OPERATION_ATTRIB);
    if (opBuf == null) {
        return null;
    }
    Sequence.MetaOp op = Sequence.MetaOp.values()[opBuf[0]];
    Cell keyValue = append.getFamilyCellMap().values().iterator().next().iterator().next();
    long clientTimestamp = HConstants.LATEST_TIMESTAMP;
    long minGetTimestamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
    long maxGetTimestamp = HConstants.LATEST_TIMESTAMP;
    boolean hadClientTimestamp;
    byte[] clientTimestampBuf = null;
    if (op == Sequence.MetaOp.RETURN_SEQUENCE) {
        // When returning sequences, this allows us to send the expected timestamp
        // of the sequence to make sure we don't reset any other sequence
        hadClientTimestamp = true;
        clientTimestamp = minGetTimestamp = keyValue.getTimestamp();
        maxGetTimestamp = minGetTimestamp + 1;
    } else {
        clientTimestampBuf = append.getAttribute(MAX_TIMERANGE_ATTRIB);
        if (clientTimestampBuf != null) {
            clientTimestamp = maxGetTimestamp = Bytes.toLong(clientTimestampBuf);
        }
        hadClientTimestamp = (clientTimestamp != HConstants.LATEST_TIMESTAMP);
        if (hadClientTimestamp) {
            // created.
            if (op == Sequence.MetaOp.CREATE_SEQUENCE) {
                maxGetTimestamp = clientTimestamp + 1;
            }
        } else {
            clientTimestamp = maxGetTimestamp = EnvironmentEdgeManager.currentTimeMillis();
            clientTimestampBuf = Bytes.toBytes(clientTimestamp);
        }
    }
    RegionCoprocessorEnvironment env = e.getEnvironment();
    // We need to set this to prevent region.append from being called
    e.bypass();
    e.complete();
    Region region = env.getRegion();
    byte[] row = append.getRow();
    List<RowLock> locks = Lists.newArrayList();
    region.startRegionOperation();
    try {
        acquireLock(region, row, locks);
        try {
            byte[] family = CellUtil.cloneFamily(keyValue);
            byte[] qualifier = CellUtil.cloneQualifier(keyValue);
            Get get = new Get(row);
            get.setTimeRange(minGetTimestamp, maxGetTimestamp);
            get.addColumn(family, qualifier);
            Result result = region.get(get);
            if (result.isEmpty()) {
                if (op == Sequence.MetaOp.DROP_SEQUENCE || op == Sequence.MetaOp.RETURN_SEQUENCE) {
                    return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
                }
            } else {
                if (op == Sequence.MetaOp.CREATE_SEQUENCE) {
                    return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode());
                }
            }
            Mutation m = null;
            switch(op) {
                case RETURN_SEQUENCE:
                    KeyValue currentValueKV = result.raw()[0];
                    long expectedValue = PLong.INSTANCE.getCodec().decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault());
                    long value = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
                    // Timestamp should match exactly, or we may have the wrong sequence
                    if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) {
                        return Result.create(Collections.singletonList((Cell) KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), ByteUtil.EMPTY_BYTE_ARRAY)));
                    }
                    m = new Put(row, currentValueKV.getTimestamp());
                    m.getFamilyCellMap().putAll(append.getFamilyCellMap());
                    break;
                case DROP_SEQUENCE:
                    m = new Delete(row, clientTimestamp);
                    break;
                case CREATE_SEQUENCE:
                    m = new Put(row, clientTimestamp);
                    m.getFamilyCellMap().putAll(append.getFamilyCellMap());
                    break;
            }
            if (!hadClientTimestamp) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        ((KeyValue) kv).updateLatestStamp(clientTimestampBuf);
                    }
                }
            }
            Mutation[] mutations = new Mutation[] { m };
            region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
            long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
            // when the mutation was actually performed (useful in the case of .
            return Result.create(Collections.singletonList((Cell) KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE)));
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
        // Impossible
        return null;
    } finally {
        region.closeRegionOperation();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) Sequence(org.apache.phoenix.schema.Sequence) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 4 with Sequence

use of org.apache.phoenix.schema.Sequence in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnSequences.

@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
    for (SequenceKey key : keys) {
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, attempt to return the unused sequence values
        List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                Append append = sequence.newReturn(timestamp);
                toReturnList.add(sequence);
                mutations.add(append);
            } catch (EmptySequenceCacheException ignore) {
            // Nothing to return, so ignore
            }
        }
        if (toReturnList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(mutations);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toReturnList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                sequence.returnValue(result);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 5 with Sequence

use of org.apache.phoenix.schema.Sequence in project phoenix by apache.

the class ConnectionQueryServicesImpl method currentSequenceValue.

/**
     * Gets the current sequence value
     * @throws SQLException if cached sequence cannot be found
     */
@Override
public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException {
    Sequence sequence = sequenceMap.get(sequenceKey);
    if (sequence == null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE).setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()).build().buildException();
    }
    sequence.getLock().lock();
    try {
        return sequence.currentValue(timestamp);
    } catch (EmptySequenceCacheException e) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE).setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()).build().buildException();
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) Sequence(org.apache.phoenix.schema.Sequence) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

Sequence (org.apache.phoenix.schema.Sequence)7 IOException (java.io.IOException)5 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)5 Result (org.apache.hadoop.hbase.client.Result)5 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)5 Append (org.apache.hadoop.hbase.client.Append)4 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)4 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)4 SequenceKey (org.apache.phoenix.schema.SequenceKey)4 SQLException (java.sql.SQLException)3 EmptySequenceCacheException (org.apache.phoenix.schema.EmptySequenceCacheException)3 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)2 PTinyint (org.apache.phoenix.schema.types.PTinyint)2 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 Cell (org.apache.hadoop.hbase.Cell)1 KeyValue (org.apache.hadoop.hbase.KeyValue)1 Delete (org.apache.hadoop.hbase.client.Delete)1 Get (org.apache.hadoop.hbase.client.Get)1 Increment (org.apache.hadoop.hbase.client.Increment)1