Search in sources :

Example 1 with EmptySequenceCacheException

use of org.apache.phoenix.schema.EmptySequenceCacheException in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnSequences.

@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
    for (SequenceKey key : keys) {
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, attempt to return the unused sequence values
        List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                Append append = sequence.newReturn(timestamp);
                toReturnList.add(sequence);
                mutations.add(append);
            } catch (EmptySequenceCacheException ignore) {
            // Nothing to return, so ignore
            }
        }
        if (toReturnList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(mutations);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toReturnList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                sequence.returnValue(result);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 2 with EmptySequenceCacheException

use of org.apache.phoenix.schema.EmptySequenceCacheException in project phoenix by apache.

the class ConnectionQueryServicesImpl method currentSequenceValue.

/**
     * Gets the current sequence value
     * @throws SQLException if cached sequence cannot be found
     */
@Override
public long currentSequenceValue(SequenceKey sequenceKey, long timestamp) throws SQLException {
    Sequence sequence = sequenceMap.get(sequenceKey);
    if (sequence == null) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE).setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()).build().buildException();
    }
    sequence.getLock().lock();
    try {
        return sequence.currentValue(timestamp);
    } catch (EmptySequenceCacheException e) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_CALL_CURRENT_BEFORE_NEXT_VALUE).setSchemaName(sequenceKey.getSchemaName()).setTableName(sequenceKey.getSequenceName()).build().buildException();
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) Sequence(org.apache.phoenix.schema.Sequence) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 3 with EmptySequenceCacheException

use of org.apache.phoenix.schema.EmptySequenceCacheException in project phoenix by apache.

the class ConnectionQueryServicesImpl method incrementSequenceValues.

@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
    for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
        SequenceKey key = sequenceAllocation.getSequenceKey();
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, increment the sequences
        List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
            } catch (EmptySequenceCacheException e) {
                indexes[toIncrementList.size()] = i;
                toIncrementList.add(sequence);
                Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
                incrementBatch.add(inc);
            } catch (SQLException e) {
                exceptions[i] = e;
            }
        }
        if (toIncrementList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(incrementBatch);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toIncrementList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
                values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) SequenceKey(org.apache.phoenix.schema.SequenceKey) Increment(org.apache.hadoop.hbase.client.Increment) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Aggregations

SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)3 EmptySequenceCacheException (org.apache.phoenix.schema.EmptySequenceCacheException)3 Sequence (org.apache.phoenix.schema.Sequence)3 IOException (java.io.IOException)2 SQLException (java.sql.SQLException)2 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)2 Result (org.apache.hadoop.hbase.client.Result)2 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)2 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)2 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)2 SequenceKey (org.apache.phoenix.schema.SequenceKey)2 PTinyint (org.apache.phoenix.schema.types.PTinyint)2 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)2 ThreadFactoryBuilder (com.google.common.util.concurrent.ThreadFactoryBuilder)1 Append (org.apache.hadoop.hbase.client.Append)1 Increment (org.apache.hadoop.hbase.client.Increment)1 NonTxIndexBuilder (org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder)1 KeyValueBuilder (org.apache.phoenix.hbase.index.util.KeyValueBuilder)1 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)1 SequenceAllocation (org.apache.phoenix.schema.SequenceAllocation)1