use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class CosmosAdmin method deleteTableMetadata.
private void deleteTableMetadata(String namespace, String table) throws ExecutionException {
String fullTableName = getFullTableName(namespace, table);
try {
getMetadataContainer().deleteItem(fullTableName, new PartitionKey(fullTableName), new CosmosItemRequestOptions());
// Delete the metadata container and table if there is no more metadata stored
if (!getMetadataContainer().queryItems("SELECT 1 FROM " + METADATA_CONTAINER + " OFFSET 0 LIMIT 1", new CosmosQueryRequestOptions(), Object.class).stream().findFirst().isPresent()) {
getMetadataContainer().delete();
client.getDatabase(metadataDatabase).delete();
}
} catch (RuntimeException e) {
throw new ExecutionException("deleting the table metadata failed", e);
}
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class StorageSinglePartitionKeyIntegrationTestBase method prepareRecords.
private List<Value<?>> prepareRecords(DataType partitionKeyType) throws ExecutionException {
RANDOM.setSeed(seed);
List<Value<?>> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (partitionKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(PARTITION_KEY).forEach(partitionKeyValue -> {
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max partition key values
Arrays.asList(getMinValue(PARTITION_KEY, partitionKeyType), getMaxValue(PARTITION_KEY, partitionKeyType)).forEach(partitionKeyValue -> {
valueSet.add(partitionKeyValue);
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
IntStream.range(0, PARTITION_KEY_NUM - 2).forEach(i -> {
Value<?> partitionKeyValue;
while (true) {
partitionKeyValue = getRandomValue(RANDOM, PARTITION_KEY, partitionKeyType);
// reject duplication
if (!valueSet.contains(partitionKeyValue)) {
valueSet.add(partitionKeyValue);
break;
}
}
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
}
try {
for (Put put : puts) {
storage.put(put);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class StorageMultiplePartitionKeyIntegrationTestBase method prepareRecords.
private List<PartitionKey> prepareRecords(DataType firstPartitionKeyType, DataType secondPartitionKeyType) throws ExecutionException {
RANDOM.setSeed(seed);
List<Put> puts = new ArrayList<>();
List<PartitionKey> ret = new ArrayList<>();
if (firstPartitionKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(FIRST_PARTITION_KEY).forEach(firstPartitionKeyValue -> prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret));
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max partition key values
Arrays.asList(getMinValue(FIRST_PARTITION_KEY, firstPartitionKeyType), getMaxValue(FIRST_PARTITION_KEY, firstPartitionKeyType)).forEach(firstPartitionKeyValue -> {
valueSet.add(firstPartitionKeyValue);
prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret);
});
IntStream.range(0, FIRST_PARTITION_KEY_NUM - 2).forEach(i -> {
Value<?> firstPartitionKeyValue;
while (true) {
firstPartitionKeyValue = getRandomValue(RANDOM, FIRST_PARTITION_KEY, firstPartitionKeyType);
// reject duplication
if (!valueSet.contains(firstPartitionKeyValue)) {
valueSet.add(firstPartitionKeyValue);
break;
}
}
prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret);
});
}
try {
for (Put put : puts) {
storage.put(put);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class CassandraAdmin method createSecondaryIndex.
@VisibleForTesting
void createSecondaryIndex(String fullKeyspace, String table, Set<String> secondaryIndexNames) throws ExecutionException {
for (String index : secondaryIndexNames) {
String indexName = String.format("%s_%s_%s", table, INDEX_NAME_PREFIX, index);
SchemaStatement createIndex = SchemaBuilder.createIndex(indexName).onTable(quoteIfNecessary(fullKeyspace), quoteIfNecessary(table)).andColumn(quoteIfNecessary(index));
try {
clusterManager.getSession().execute(createIndex.getQueryString());
} catch (RuntimeException e) {
throw new ExecutionException(String.format("creating the secondary index for %s.%s.%s failed", fullKeyspace, table, index), e);
}
}
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class DynamoAdmin method enableAutoScaling.
private void enableAutoScaling(String namespace, String table, Set<String> secondaryIndexes, long ru) throws ExecutionException {
List<RegisterScalableTargetRequest> registerScalableTargetRequestList = new ArrayList<>();
List<PutScalingPolicyRequest> putScalingPolicyRequestList = new ArrayList<>();
// write, read scaling of table
for (String scalingType : TABLE_SCALING_TYPE_SET) {
registerScalableTargetRequestList.add(buildRegisterScalableTargetRequest(getTableResourceID(namespace, table), scalingType, (int) ru));
putScalingPolicyRequestList.add(buildPutScalingPolicyRequest(getTableResourceID(namespace, table), scalingType));
}
// write, read scaling of global indexes (secondary indexes)
for (String secondaryIndex : secondaryIndexes) {
for (String scalingType : SECONDARY_INDEX_SCALING_TYPE_SET) {
registerScalableTargetRequestList.add(buildRegisterScalableTargetRequest(getGlobalIndexResourceID(namespace, table, secondaryIndex), scalingType, (int) ru));
putScalingPolicyRequestList.add(buildPutScalingPolicyRequest(getGlobalIndexResourceID(namespace, table, secondaryIndex), scalingType));
}
}
// request
for (RegisterScalableTargetRequest registerScalableTargetRequest : registerScalableTargetRequestList) {
try {
applicationAutoScalingClient.registerScalableTarget(registerScalableTargetRequest);
} catch (Exception e) {
throw new ExecutionException("Unable to register scalable target for " + registerScalableTargetRequest.resourceId(), e);
}
}
for (PutScalingPolicyRequest putScalingPolicyRequest : putScalingPolicyRequestList) {
try {
applicationAutoScalingClient.putScalingPolicy(putScalingPolicyRequest);
} catch (Exception e) {
throw new ExecutionException("Unable to put scaling policy request for " + putScalingPolicyRequest.resourceId(), e);
}
}
}
Aggregations