use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class StorageMultipleClusteringKeyScanIntegrationTestBase method prepareRecords.
private List<ClusteringKey> prepareRecords(DataType firstClusteringKeyType, Order firstClusteringOrder, DataType secondClusteringKeyType, Order secondClusteringOrder) throws ExecutionException {
RANDOM.setSeed(seed);
List<ClusteringKey> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (firstClusteringKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(FIRST_CLUSTERING_KEY).forEach(firstClusteringKeyValue -> prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret));
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max first clustering key values
Arrays.asList(getMinValue(FIRST_CLUSTERING_KEY, firstClusteringKeyType), getMaxValue(FIRST_CLUSTERING_KEY, firstClusteringKeyType)).forEach(firstClusteringKeyValue -> {
valueSet.add(firstClusteringKeyValue);
prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret);
});
IntStream.range(0, FIRST_CLUSTERING_KEY_NUM - 2).forEach(i -> {
Value<?> firstClusteringKeyValue;
while (true) {
firstClusteringKeyValue = getFirstClusteringKeyValue(firstClusteringKeyType);
// reject duplication
if (!valueSet.contains(firstClusteringKeyValue)) {
valueSet.add(firstClusteringKeyValue);
break;
}
}
prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret);
});
}
try {
List<Put> buffer = new ArrayList<>();
for (Put put : puts) {
buffer.add(put);
if (buffer.size() == 20) {
storage.mutate(buffer);
buffer.clear();
}
}
if (!buffer.isEmpty()) {
storage.mutate(buffer);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
ret.sort(getClusteringKeyComparator(firstClusteringOrder, secondClusteringOrder));
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class StorageSingleClusteringKeyScanIntegrationTestBase method prepareRecords.
private List<Value<?>> prepareRecords(DataType clusteringKeyType, Order clusteringOrder) throws ExecutionException {
RANDOM.setSeed(seed);
List<Value<?>> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (clusteringKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(CLUSTERING_KEY).forEach(clusteringKeyValue -> {
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max clustering key values
Arrays.asList(getMinValue(CLUSTERING_KEY, clusteringKeyType), getMaxValue(CLUSTERING_KEY, clusteringKeyType)).forEach(clusteringKeyValue -> {
valueSet.add(clusteringKeyValue);
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
IntStream.range(0, CLUSTERING_KEY_NUM - 2).forEach(i -> {
Value<?> clusteringKeyValue;
while (true) {
clusteringKeyValue = getRandomValue(RANDOM, CLUSTERING_KEY, clusteringKeyType);
// reject duplication
if (!valueSet.contains(clusteringKeyValue)) {
valueSet.add(clusteringKeyValue);
break;
}
}
ret.add(clusteringKeyValue);
puts.add(preparePut(clusteringKeyType, clusteringOrder, clusteringKeyValue));
});
}
try {
List<Put> buffer = new ArrayList<>();
for (Put put : puts) {
buffer.add(put);
if (buffer.size() == 20) {
storage.mutate(buffer);
buffer.clear();
}
}
if (!buffer.isEmpty()) {
storage.mutate(buffer);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
ret.sort(clusteringOrder == Order.ASC ? com.google.common.collect.Ordering.natural() : com.google.common.collect.Ordering.natural().reverse());
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class CosmosAdmin method addTableMetadata.
private void addTableMetadata(String namespace, String table, TableMetadata metadata) throws ExecutionException {
try {
createMetadataDatabaseAndContainerIfNotExists();
CosmosTableMetadata cosmosTableMetadata = convertToCosmosTableMetadata(getFullTableName(namespace, table), metadata);
getMetadataContainer().upsertItem(cosmosTableMetadata);
} catch (RuntimeException e) {
throw new ExecutionException("adding the table metadata failed", e);
}
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class DynamoAdmin method addTableMetadata.
private void addTableMetadata(String namespace, String table, TableMetadata metadata) throws ExecutionException {
createMetadataTableIfNotExists();
// Add metadata
Map<String, AttributeValue> itemValues = new HashMap<>();
itemValues.put(METADATA_ATTR_TABLE, AttributeValue.builder().s(getFullTableName(namespace, table)).build());
Map<String, AttributeValue> columns = new HashMap<>();
for (String columnName : metadata.getColumnNames()) {
columns.put(columnName, AttributeValue.builder().s(metadata.getColumnDataType(columnName).name().toLowerCase()).build());
}
itemValues.put(METADATA_ATTR_COLUMNS, AttributeValue.builder().m(columns).build());
itemValues.put(METADATA_ATTR_PARTITION_KEY, AttributeValue.builder().l(metadata.getPartitionKeyNames().stream().map(pKey -> AttributeValue.builder().s(pKey).build()).collect(Collectors.toList())).build());
if (!metadata.getClusteringKeyNames().isEmpty()) {
itemValues.put(METADATA_ATTR_CLUSTERING_KEY, AttributeValue.builder().l(metadata.getClusteringKeyNames().stream().map(pKey -> AttributeValue.builder().s(pKey).build()).collect(Collectors.toList())).build());
Map<String, AttributeValue> clusteringOrders = new HashMap<>();
for (String clusteringKeyName : metadata.getClusteringKeyNames()) {
clusteringOrders.put(clusteringKeyName, AttributeValue.builder().s(metadata.getClusteringOrder(clusteringKeyName).name()).build());
}
itemValues.put(METADATA_ATTR_CLUSTERING_ORDERS, AttributeValue.builder().m(clusteringOrders).build());
}
if (!metadata.getSecondaryIndexNames().isEmpty()) {
itemValues.put(METADATA_ATTR_SECONDARY_INDEX, AttributeValue.builder().ss(metadata.getSecondaryIndexNames()).build());
}
try {
client.putItem(PutItemRequest.builder().tableName(getMetadataTable()).item(itemValues).build());
} catch (Exception e) {
throw new ExecutionException("adding the meta data for table " + getFullTableName(namespace, table) + " failed", e);
}
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class PutStatementHandler method handle.
@Nonnull
@Override
public List<Map<String, AttributeValue>> handle(Operation operation) throws ExecutionException {
checkArgument(operation, Put.class);
Put put = (Put) operation;
TableMetadata tableMetadata = metadataManager.getTableMetadata(operation);
try {
execute(put, tableMetadata);
} catch (ConditionalCheckFailedException e) {
throw new NoMutationException("no mutation was applied.", e);
} catch (TransactionConflictException e) {
throw new RetriableExecutionException(e.getMessage(), e);
} catch (DynamoDbException e) {
throw new ExecutionException(e.getMessage(), e);
}
return Collections.emptyList();
}
Aggregations