use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.
the class ConnectionlessQueryServicesImpl method incrementSequences.
@Override
public void incrementSequences(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions) throws SQLException {
int i = 0;
for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
SequenceKey key = sequenceAllocation.getSequenceKey();
SequenceInfo info = sequenceMap.get(key);
if (info == null) {
exceptions[i] = new SequenceNotFoundException(key.getSchemaName(), key.getSequenceName());
} else {
boolean increaseSeq = info.incrementBy > 0;
if (info.limitReached) {
SQLExceptionCode code = increaseSeq ? SQLExceptionCode.SEQUENCE_VAL_REACHED_MAX_VALUE : SQLExceptionCode.SEQUENCE_VAL_REACHED_MIN_VALUE;
exceptions[i] = new SQLExceptionInfo.Builder(code).build().buildException();
} else {
values[i] = info.sequenceValue;
info.sequenceValue += info.incrementBy * info.cacheSize;
info.limitReached = SequenceUtil.checkIfLimitReached(info);
if (info.limitReached && info.cycle) {
info.sequenceValue = increaseSeq ? info.minValue : info.maxValue;
info.limitReached = false;
}
}
}
i++;
}
i = 0;
for (SQLException e : exceptions) {
if (e != null) {
sequenceMap.remove(sequenceAllocations.get(i).getSequenceKey());
}
i++;
}
}
use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.
the class ConnectionQueryServicesImpl method createSequence.
@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) throws SQLException {
SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
Sequence newSequences = new Sequence(sequenceKey);
Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
if (sequence == null) {
sequence = newSequences;
}
try {
sequence.getLock().lock();
// Now that we have the lock we need, create the sequence
Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
htable.setAutoFlush(true);
try {
Result result = htable.append(append);
return sequence.createSequence(result, minValue, maxValue, cycle);
} catch (IOException e) {
throw ServerUtil.parseServerException(e);
} finally {
Closeables.closeQuietly(htable);
}
} finally {
sequence.getLock().unlock();
}
}
use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.
the class ConnectionlessQueryServicesImpl method createSequence.
@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) throws SQLException {
SequenceKey key = new SequenceKey(tenantId, schemaName, sequenceName, getSequenceSaltBuckets());
if (sequenceMap.get(key) != null) {
throw new SequenceAlreadyExistsException(schemaName, sequenceName);
}
sequenceMap.put(key, new SequenceInfo(startWith, incrementBy, minValue, maxValue, 1l, cycle));
return timestamp;
}
use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.
the class ConnectionQueryServicesImpl method returnSequences.
@SuppressWarnings("deprecation")
@Override
public void returnSequences(List<SequenceKey> keys, long timestamp, SQLException[] exceptions) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(keys.size());
for (SequenceKey key : keys) {
Sequence newSequences = new Sequence(key);
Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
if (sequence == null) {
sequence = newSequences;
}
sequences.add(sequence);
}
try {
for (Sequence sequence : sequences) {
sequence.getLock().lock();
}
// Now that we have all the locks we need, attempt to return the unused sequence values
List<Append> mutations = Lists.newArrayListWithExpectedSize(sequences.size());
List<Sequence> toReturnList = Lists.newArrayListWithExpectedSize(sequences.size());
int[] indexes = new int[sequences.size()];
for (int i = 0; i < sequences.size(); i++) {
Sequence sequence = sequences.get(i);
try {
Append append = sequence.newReturn(timestamp);
toReturnList.add(sequence);
mutations.add(append);
} catch (EmptySequenceCacheException ignore) {
// Nothing to return, so ignore
}
}
if (toReturnList.isEmpty()) {
return;
}
HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
resultObjects = hTable.batch(mutations);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
}
if (sqlE != null) {
throw sqlE;
}
}
for (int i = 0; i < resultObjects.length; i++) {
Sequence sequence = toReturnList.get(i);
Result result = (Result) resultObjects[i];
try {
sequence.returnValue(result);
} catch (SQLException e) {
exceptions[indexes[i]] = e;
}
}
} finally {
for (Sequence sequence : sequences) {
sequence.getLock().unlock();
}
}
}
use of org.apache.phoenix.schema.SequenceKey in project phoenix by apache.
the class ConnectionQueryServicesImpl method incrementSequenceValues.
@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
SequenceKey key = sequenceAllocation.getSequenceKey();
Sequence newSequences = new Sequence(key);
Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
if (sequence == null) {
sequence = newSequences;
}
sequences.add(sequence);
}
try {
for (Sequence sequence : sequences) {
sequence.getLock().lock();
}
// Now that we have all the locks we need, increment the sequences
List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
int[] indexes = new int[sequences.size()];
for (int i = 0; i < sequences.size(); i++) {
Sequence sequence = sequences.get(i);
try {
values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
} catch (EmptySequenceCacheException e) {
indexes[toIncrementList.size()] = i;
toIncrementList.add(sequence);
Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
incrementBatch.add(inc);
} catch (SQLException e) {
exceptions[i] = e;
}
}
if (toIncrementList.isEmpty()) {
return;
}
HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
resultObjects = hTable.batch(incrementBatch);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
}
if (sqlE != null) {
throw sqlE;
}
}
for (int i = 0; i < resultObjects.length; i++) {
Sequence sequence = toIncrementList.get(i);
Result result = (Result) resultObjects[i];
try {
long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
} catch (SQLException e) {
exceptions[indexes[i]] = e;
}
}
} finally {
for (Sequence sequence : sequences) {
sequence.getLock().unlock();
}
}
}
Aggregations