use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class DistributedStorageMultipleClusteringKeyScanIntegrationTestBase method prepareRecords.
private List<ClusteringKey> prepareRecords(DataType firstClusteringKeyType, Order firstClusteringOrder, DataType secondClusteringKeyType, Order secondClusteringOrder) throws ExecutionException {
RANDOM.setSeed(seed);
List<ClusteringKey> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (firstClusteringKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(FIRST_CLUSTERING_KEY).forEach(firstClusteringKeyValue -> prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret));
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max first clustering key values
Arrays.asList(getMinValue(FIRST_CLUSTERING_KEY, firstClusteringKeyType), getMaxValue(FIRST_CLUSTERING_KEY, firstClusteringKeyType)).forEach(firstClusteringKeyValue -> {
valueSet.add(firstClusteringKeyValue);
prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret);
});
IntStream.range(0, FIRST_CLUSTERING_KEY_NUM - 2).forEach(i -> {
Value<?> firstClusteringKeyValue;
while (true) {
firstClusteringKeyValue = getFirstClusteringKeyValue(firstClusteringKeyType);
// reject duplication
if (!valueSet.contains(firstClusteringKeyValue)) {
valueSet.add(firstClusteringKeyValue);
break;
}
}
prepareRecords(firstClusteringKeyType, firstClusteringOrder, firstClusteringKeyValue, secondClusteringKeyType, secondClusteringOrder, puts, ret);
});
}
try {
List<Put> buffer = new ArrayList<>();
for (Put put : puts) {
buffer.add(put);
if (buffer.size() == 20) {
storage.mutate(buffer);
buffer.clear();
}
}
if (!buffer.isEmpty()) {
storage.mutate(buffer);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
ret.sort(getClusteringKeyComparator(firstClusteringOrder, secondClusteringOrder));
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class DistributedStorageMultiplePartitionKeyIntegrationTestBase method prepareRecords.
private List<PartitionKey> prepareRecords(DataType firstPartitionKeyType, DataType secondPartitionKeyType) throws ExecutionException {
RANDOM.setSeed(seed);
List<Put> puts = new ArrayList<>();
List<PartitionKey> ret = new ArrayList<>();
if (firstPartitionKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(FIRST_PARTITION_KEY).forEach(firstPartitionKeyValue -> prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret));
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max partition key values
Arrays.asList(getMinValue(FIRST_PARTITION_KEY, firstPartitionKeyType), getMaxValue(FIRST_PARTITION_KEY, firstPartitionKeyType)).forEach(firstPartitionKeyValue -> {
valueSet.add(firstPartitionKeyValue);
prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret);
});
IntStream.range(0, FIRST_PARTITION_KEY_NUM - 2).forEach(i -> {
Value<?> firstPartitionKeyValue;
while (true) {
firstPartitionKeyValue = getRandomValue(RANDOM, FIRST_PARTITION_KEY, firstPartitionKeyType);
// reject duplication
if (!valueSet.contains(firstPartitionKeyValue)) {
valueSet.add(firstPartitionKeyValue);
break;
}
}
prepareRecords(firstPartitionKeyType, firstPartitionKeyValue, secondPartitionKeyType, puts, ret);
});
}
try {
for (Put put : puts) {
storage.put(put);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class DistributedStorageSinglePartitionKeyIntegrationTestBase method prepareRecords.
private List<Value<?>> prepareRecords(DataType partitionKeyType) throws ExecutionException {
RANDOM.setSeed(seed);
List<Value<?>> ret = new ArrayList<>();
List<Put> puts = new ArrayList<>();
if (partitionKeyType == DataType.BOOLEAN) {
TestUtils.booleanValues(PARTITION_KEY).forEach(partitionKeyValue -> {
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
} else {
Set<Value<?>> valueSet = new HashSet<>();
// Add min and max partition key values
Arrays.asList(getMinValue(PARTITION_KEY, partitionKeyType), getMaxValue(PARTITION_KEY, partitionKeyType)).forEach(partitionKeyValue -> {
valueSet.add(partitionKeyValue);
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
IntStream.range(0, PARTITION_KEY_NUM - 2).forEach(i -> {
Value<?> partitionKeyValue;
while (true) {
partitionKeyValue = getRandomValue(RANDOM, PARTITION_KEY, partitionKeyType);
// reject duplication
if (!valueSet.contains(partitionKeyValue)) {
valueSet.add(partitionKeyValue);
break;
}
}
ret.add(partitionKeyValue);
puts.add(preparePut(partitionKeyType, partitionKeyValue));
});
}
try {
for (Put put : puts) {
storage.put(put);
}
} catch (ExecutionException e) {
throw new ExecutionException("put data to database failed", e);
}
return ret;
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class CommitHandlerTest method commit_RetriableExecutionExceptionThrownInPrepareRecords_ShouldThrowCCException.
@Test
public void commit_RetriableExecutionExceptionThrownInPrepareRecords_ShouldThrowCCException() throws ExecutionException, CoordinatorException {
// Arrange
Snapshot snapshot = prepareSnapshotWithDifferentPartitionPut();
ExecutionException toThrow = mock(RetriableExecutionException.class);
doThrow(toThrow).when(storage).mutate(anyList());
doNothing().when(coordinator).putState(any(Coordinator.State.class));
doNothing().when(handler).rollbackRecords(any(Snapshot.class));
// Act
assertThatThrownBy(() -> handler.commit(snapshot)).isInstanceOf(CommitConflictException.class).hasCause(toThrow);
// Assert
verify(coordinator).putState(new Coordinator.State(ANY_ID, TransactionState.ABORTED));
verify(coordinator, never()).putState(new Coordinator.State(ANY_ID, TransactionState.COMMITTED));
verify(handler).rollbackRecords(snapshot);
}
use of com.scalar.db.exception.storage.ExecutionException in project scalardb by scalar-labs.
the class CommitHandlerTest method commit_ExceptionThrownInPrepareRecordsAndFailedInCoordinatorAbortThenAbortedReturnedInGetState_ShouldRollbackRecords.
@Test
public void commit_ExceptionThrownInPrepareRecordsAndFailedInCoordinatorAbortThenAbortedReturnedInGetState_ShouldRollbackRecords() throws ExecutionException, CoordinatorException {
// Arrange
Snapshot snapshot = prepareSnapshotWithDifferentPartitionPut();
ExecutionException toThrow1 = mock(ExecutionException.class);
CoordinatorException toThrow2 = mock(CoordinatorException.class);
doThrow(toThrow1).when(storage).mutate(anyList());
doThrow(toThrow2).when(coordinator).putState(new Coordinator.State(ANY_ID, TransactionState.ABORTED));
// assume that it will call Coordinator.getState() if Coordinator.putState() failed
doReturn(Optional.of(new Coordinator.State(ANY_ID, TransactionState.ABORTED))).when(coordinator).getState(ANY_ID);
doNothing().when(handler).rollbackRecords(any(Snapshot.class));
// Act
assertThatThrownBy(() -> handler.commit(snapshot)).isInstanceOf(CommitException.class);
// Assert
verify(coordinator).putState(new Coordinator.State(ANY_ID, TransactionState.ABORTED));
verify(coordinator, never()).putState(new Coordinator.State(ANY_ID, TransactionState.COMMITTED));
verify(coordinator).getState(ANY_ID);
verify(handler).rollbackRecords(snapshot);
}
Aggregations