use of com.scalar.db.io.Value in project scalardb by scalar-labs.
the class PrepareMutationComposer method add.
private void add(Delete base, TransactionResult result) {
Put put = new Put(base.getPartitionKey(), getClusteringKey(base, result).orElse(null)).forNamespace(base.forNamespace().get()).forTable(base.forTable().get()).withConsistency(Consistency.LINEARIZABLE);
List<Value<?>> values = new ArrayList<>();
values.add(Attribute.toIdValue(id));
values.add(Attribute.toStateValue(TransactionState.DELETED));
values.add(Attribute.toPreparedAtValue(current));
if (result != null) {
values.addAll(createBeforeValues(base, result));
int version = result.getVersion();
values.add(Attribute.toVersionValue(version + 1));
// check if the record is not interrupted by other conflicting transactions
put.withCondition(new PutIf(new ConditionalExpression(VERSION, toVersionValue(version), Operator.EQ), new ConditionalExpression(ID, toIdValue(result.getId()), Operator.EQ)));
} else {
put.withValue(Attribute.toVersionValue(1));
// check if the record is not created by other conflicting transactions
put.withCondition(new PutIfNotExists());
}
put.withValues(values);
mutations.add(put);
}
use of com.scalar.db.io.Value in project scalardb by scalar-labs.
the class PrepareMutationComposer method createBeforeValues.
private List<Value<?>> createBeforeValues(Mutation base, TransactionResult result) {
Key partitionKey = base.getPartitionKey();
Optional<Key> clusteringKey = getClusteringKey(base, result);
List<Value<?>> values = new ArrayList<>();
result.getValues().values().forEach(v -> {
if (isBeforeRequired(v, partitionKey, clusteringKey)) {
values.add(v.copyWith(Attribute.BEFORE_PREFIX + v.getName()));
}
});
return values;
}
use of com.scalar.db.io.Value in project scalardb by scalar-labs.
the class RollbackMutationComposer method composePut.
private Put composePut(Operation base, TransactionResult result) throws ExecutionException {
assert result.getState().equals(TransactionState.PREPARED) || result.getState().equals(TransactionState.DELETED);
TransactionalTableMetadata metadata = tableMetadataManager.getTransactionalTableMetadata(base);
LinkedHashSet<String> beforeImageColumnNames = metadata.getBeforeImageColumnNames();
Map<String, Value<?>> map = new HashMap<>();
result.getValues().forEach((k, v) -> {
if (beforeImageColumnNames.contains(k)) {
String key = k.substring(Attribute.BEFORE_PREFIX.length());
map.put(key, v.copyWith(key));
}
});
return new Put(getPartitionKey(base, result), getClusteringKey(base, result).orElse(null)).forNamespace(base.forNamespace().get()).forTable(base.forTable().get()).withCondition(new PutIf(new ConditionalExpression(ID, toIdValue(id), Operator.EQ), new ConditionalExpression(STATE, toStateValue(result.getState()), Operator.EQ))).withConsistency(Consistency.LINEARIZABLE).withValues(map.values());
}
use of com.scalar.db.io.Value in project scalardb by scalar-labs.
the class PrepareMutationComposer method add.
// This prepares a record that was read but didn't exist to avoid anti-dependency for the record.
// This is only called when Serializable with Extra-write strategy is enabled.
private void add(Get base) {
Put put = new Put(base.getPartitionKey(), getClusteringKey(base, null).orElse(null)).forNamespace(base.forNamespace().get()).forTable(base.forTable().get()).withConsistency(Consistency.LINEARIZABLE);
List<Value<?>> values = new ArrayList<>();
values.add(Attribute.toIdValue(id));
values.add(Attribute.toStateValue(TransactionState.DELETED));
values.add(Attribute.toPreparedAtValue(current));
values.add(Attribute.toVersionValue(1));
// check if the record is not interrupted by other conflicting transactions
put.withCondition(new PutIfNotExists());
put.withValues(values);
mutations.add(put);
}
use of com.scalar.db.io.Value in project scalardb by scalar-labs.
the class DistributedStorageSingleClusteringKeyScanIntegrationTestBase method prepareRecords.
private List<Value<?>> prepareRecords(DataType clusteringKeyType, Order clusteringOrder) throws ExecutionException {
RANDOM.setSeed(seed);
List<Value<?>> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (clusteringKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(CLUSTERING_KEY).forEach(clusteringKeyValue -> {
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max clustering key values
Arrays.asList(getMinValue(CLUSTERING_KEY, clusteringKeyType), getMaxValue(CLUSTERING_KEY, clusteringKeyType)).forEach(clusteringKeyValue -> {
valueSet.add(clusteringKeyValue);
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
IntStream.range(0, CLUSTERING_KEY_NUM - 2).forEach(i -> {
Value<?> clusteringKeyValue;
while (true) {
clusteringKeyValue = getRandomValue(RANDOM, CLUSTERING_KEY, clusteringKeyType);
// reject duplication
if (!valueSet.contains(clusteringKeyValue)) {
valueSet.add(clusteringKeyValue);
break;
}
}
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
}
try {
List<Put> buffer = new ArrayList<>();
for (Put put : puts) {
buffer.add(put);
if (buffer.size() == 20) {
storage.mutate(buffer);
buffer.clear();
}
}
if (!buffer.isEmpty()) {
storage.mutate(buffer);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
ret.sort(clusteringOrder == Order.ASC ? com.google.common.collect.Ordering.natural() : com.google.common.collect.Ordering.natural().reverse());
return ret;
}
Aggregations