use of io.pravega.test.common.AssertExtensions.assertFutureThrows in project pravega by pravega.
the class StreamMetadataTasksTest method truncateStreamTest.
@Test(timeout = 30000)
public void truncateStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(1L), newRanges, 30, 0L).get();
assertTrue(scaleOpResult.getStatus().equals(ScaleStreamStatus.STARTED));
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
assertTrue(Futures.await(scaleTask.execute((ScaleOpEvent) requestEventWriter.eventQueue.take())));
// start truncation
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertFalse(truncProp.isUpdating());
// 1. happy day test
// update.. should succeed
Map<Long, Long> streamCut = new HashMap<>();
streamCut.put(0L, 1L);
streamCut.put(1L, 11L);
CompletableFuture<UpdateStreamStatus.Status> truncateFuture = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateFuture.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut));
assertTrue(truncProp.getStreamCut().equals(streamCut));
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, "test", State.SCALING, null, executor).get();
// call update should fail without posting the event
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
Map<Long, Long> streamCut2 = new HashMap<>();
streamCut2.put(0L, 1L);
streamCut2.put(two, 1L);
streamCut2.put(three, 1L);
streamMetadataTasks.truncateStream(SCOPE, "test", streamCut2, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> Futures.delayedFuture(() -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor), 1000, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
TruncateStreamTask truncateStreamTask = new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
TruncateStreamEvent taken = (TruncateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", truncateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(truncateStreamTask.execute(taken)));
// 3. multiple back to back updates.
Map<Long, Long> streamCut3 = new HashMap<>();
streamCut3.put(0L, 12L);
streamCut3.put(two, 12L);
streamCut3.put(three, 12L);
CompletableFuture<UpdateStreamStatus.Status> truncateOp1 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut3, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && truncProp.isUpdating());
// post the second update request. This should fail here itself as previous one has started.
Map<Long, Long> streamCut4 = new HashMap<>();
streamCut4.put(0L, 14L);
streamCut4.put(two, 14L);
streamCut4.put(three, 14L);
CompletableFuture<UpdateStreamStatus.Status> truncateOpFuture2 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut4, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, truncateOpFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateOp1.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && !truncProp.isUpdating());
streamStorePartialMock.setState(SCOPE, "test", State.TRUNCATING, null, executor).join();
TruncateStreamEvent event = new TruncateStreamEvent(SCOPE, "test", System.nanoTime());
assertTrue(Futures.await(truncateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
truncateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, "test", true, null, executor).join());
doReturn(CompletableFuture.completedFuture(true)).when(streamStorePartialMock).isScopeSealed(anyString(), any(), any());
CompletableFuture<CreateStreamResponse> streamResponse = streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor);
CreateStreamResponse.CreateStatus s = streamResponse.get().getStatus();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, streamResponse.get().getStatus());
}
use of io.pravega.test.common.AssertExtensions.assertFutureThrows in project pravega by pravega.
the class StreamMetadataTasksTest method updateStreamTest.
@Test(timeout = 30000)
public void updateStreamTest() throws Exception {
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
StreamConfigurationRecord configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertFalse(configProp.isUpdating());
// 1. happy day test
// update.. should succeed
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture.join());
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration));
streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(6)).build();
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, stream1, State.SCALING, null, executor).get();
// call update should fail without posting the event
streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
UpdateStreamTask updateStreamTask = new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, bucketStore, executor);
UpdateStreamEvent taken = (UpdateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", updateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(updateStreamTask.execute(taken)));
// 3. multiple back to back updates.
StreamConfiguration streamConfiguration1 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 1, 2)).build();
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture1 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration1, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && configProp.isUpdating());
StreamConfiguration streamConfiguration2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(7)).build();
// post the second update request. This should fail here itself as previous one has started.
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture2 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration2, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, updateOperationFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture1.join());
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && !configProp.isUpdating());
streamStorePartialMock.setState(SCOPE, stream1, State.UPDATING, null, executor).join();
UpdateStreamEvent event = new UpdateStreamEvent(SCOPE, stream1, System.nanoTime());
assertTrue(Futures.await(updateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
updateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, stream1, true, null, executor).join());
}
use of io.pravega.test.common.AssertExtensions.assertFutureThrows in project pravega by pravega.
the class StreamMetadataTasksTest method sealStreamWithTxnTest.
@Test(timeout = 30000)
public void sealStreamWithTxnTest() throws Exception {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String streamWithTxn = "streamWithTxn";
// region seal a stream with transactions
long start = System.currentTimeMillis();
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, streamWithTxn, config, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, streamWithTxn, State.ACTIVE, null, executor).get();
// create txn
VersionedTransactionData openTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData committingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData abortingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
// set transaction to committing
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, committingTxn.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// set transaction to aborting
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// Mock getActiveTransactions call such that we return committing txn as OPEN txn.
Map<UUID, ActiveTxnRecord> activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
Map<UUID, ActiveTxnRecord> retVal = activeTxns.entrySet().stream().map(tx -> {
if (!tx.getValue().getTxnStatus().equals(TxnStatus.OPEN) && !tx.getValue().getTxnStatus().equals(TxnStatus.ABORTING)) {
ActiveTxnRecord txRecord = tx.getValue();
return new AbstractMap.SimpleEntry<>(tx.getKey(), new ActiveTxnRecord(txRecord.getTxCreationTimestamp(), txRecord.getLeaseExpiryTime(), txRecord.getMaxExecutionExpiryTime(), TxnStatus.OPEN));
} else {
return tx;
}
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
List<AbortEvent> abortListBefore = abortWriter.getEventList();
streamMetadataTasks.sealStream(SCOPE, streamWithTxn, 0L);
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
requestEventWriter.eventQueue.take();
reset(streamStorePartialMock);
// verify that the txn status is set to aborting
VersionedTransactionData txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.ABORTING);
assertEquals(requestEventWriter.getEventQueue().size(), 1);
// verify that events are posted for the abort txn.
List<AbortEvent> abortListAfter = abortWriter.getEventList();
assertEquals(abortListAfter.size(), abortListBefore.size() + 2);
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(openTxn.getId())));
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(abortingTxn.getId())));
txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.COMMITTING);
// Mock getActiveTransactions call such that we return some non existent transaction id so that DataNotFound is simulated.
// returning a random transaction with list of active txns such that when its abort is attempted, Data Not Found Exception gets thrown
retVal = new HashMap<>();
retVal.put(UUID.randomUUID(), new ActiveTxnRecord(1L, 1L, 1L, TxnStatus.OPEN));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
reset(streamStorePartialMock);
// Now complete all existing transactions and verify that seal completes
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), null, executor).join();
((AbstractStreamMetadataStore) streamStorePartialMock).commitTransaction(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
assertTrue(activeTxns.isEmpty());
assertTrue(Futures.await(processEvent(requestEventWriter)));
// endregion
}
Aggregations