use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask in project pravega by pravega.
the class RequestHandlersTest method testScaleIgnoreFairness.
@Test
public void testScaleIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start scale --> this should fail with a retryable exception while talking to segment store!
ScaleOpEvent scaleEvent = new ScaleOpEvent(fairness, fairness, Collections.singletonList(0L), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// verify that scale was started
assertEquals(State.SCALING, streamStore.getState(fairness, fairness, true, null, executor).join());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(true)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(scaleEvent, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(fairness, fairness, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new scale. it should fail because of waiting processor.
ScaleOpEvent scaleEvent2 = new ScaleOpEvent(fairness, fairness, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask in project pravega by pravega.
the class RequestHandlersTest method testTruncateIgnoreFairness.
@Test
public void testTruncateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startTruncation(fairness, fairness, Collections.singletonMap(0L, 0L), null, executor).join();
streamStore.setState(fairness, fairness, State.TRUNCATING, null, executor).join();
assertEquals(State.TRUNCATING, streamStore.getState(fairness, fairness, true, null, executor).join());
TruncateStreamEvent event = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
TruncateStreamEvent event2 = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask in project pravega by pravega.
the class RequestHandlersTest method testSealIgnoreFairness.
@Test
public void testSealIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.setState(fairness, fairness, State.SEALING, null, executor).join();
assertEquals(State.SEALING, streamStore.getState(fairness, fairness, true, null, executor).join());
SealStreamEvent event = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.SEALED, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
SealStreamEvent event2 = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask in project pravega by pravega.
the class RequestHandlersTest method testUpdateIgnoreFairness.
@Test
public void testUpdateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startUpdateConfiguration(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), null, executor).join();
streamStore.setState(fairness, fairness, State.UPDATING, null, executor).join();
assertEquals(State.UPDATING, streamStore.getState(fairness, fairness, true, null, executor).join());
UpdateStreamEvent event = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor and our state does not allow us to ignore waiting processor
UpdateStreamEvent event2 = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteScopeTask in project pravega by pravega.
the class RequestHandlersTest method scopeDeleteTest.
@Test
public void scopeDeleteTest() {
final String testScope = "testScope";
final String testStream = "testStream";
final String testRG = "_RGTestRG";
final String testKVT = "testKVT";
StreamMetadataStore streamStoreSpied = spy(getStore());
KVTableMetadataStore kvtStoreSpied = spy(getKvtStore());
StreamMetadataTasks streamMetadataTasks1 = mock(StreamMetadataTasks.class);
TableMetadataTasks kvtTasksMocked = mock(TableMetadataTasks.class);
streamStoreSpied.createScope(testScope, null, executor).join();
OperationContext ctx = new OperationContext() {
@Override
public long getOperationStartTime() {
return 0;
}
@Override
public long getRequestId() {
return 0;
}
};
UUID scopeId = streamStoreSpied.getScopeId(testScope, ctx, executor).join();
doAnswer(x -> {
CompletableFuture<UUID> cf = new CompletableFuture<>();
cf.complete(scopeId);
return cf;
}).when(streamStoreSpied).getScopeId(eq(testScope), eq(ctx), eq(executor));
doAnswer(invocation -> {
CompletableFuture<Boolean> cf = new CompletableFuture<>();
cf.complete(true);
return cf;
}).when(streamStoreSpied).isScopeSealed(eq(testScope), any(), any());
createStreamInStore(testStream, testScope);
createStreamInStore(testRG, testScope);
assertTrue(streamStore.checkStreamExists(testScope, testStream, ctx, executor).join());
doAnswer(invocation -> {
CompletableFuture<Controller.UpdateStreamStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.UpdateStreamStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).sealStream(anyString(), anyString(), anyLong());
doAnswer(invocation -> {
CompletableFuture<Controller.DeleteStreamStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteStreamStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).deleteStream(anyString(), anyString(), anyLong());
// Create Reader Group
ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().stream(NameUtils.getScopedStreamName(testScope, testStream)).build();
final ReaderGroupConfig config = ReaderGroupConfig.cloneConfig(rgConfig, UUID.randomUUID(), 123L);
Controller.ReaderGroupConfiguration expectedConfig = ModelHelper.decode(testScope, testRG, config);
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.CreateReaderGroupResponse.Status> createRG = new CompletableFuture<>();
createRG.complete(Controller.CreateReaderGroupResponse.Status.SUCCESS);
return createRG;
}).when(streamMetadataTasks1).createReaderGroup(anyString(), any(), any(), anyLong(), anyLong());
doAnswer(invocation -> CompletableFuture.completedFuture(Controller.ReaderGroupConfigResponse.newBuilder().setStatus(Controller.ReaderGroupConfigResponse.Status.SUCCESS).setConfig(expectedConfig).build())).when(streamMetadataTasks1).getReaderGroupConfig(eq(testScope), anyString(), anyLong());
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.DeleteReaderGroupStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteReaderGroupStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).deleteReaderGroup(anyString(), anyString(), anyString(), anyLong());
// Create KVT
KeyValueTableConfiguration kvtConfig = KeyValueTableConfiguration.builder().partitionCount(1).primaryKeyLength(1).secondaryKeyLength(1).build();
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.CreateKeyValueTableStatus.Status> fut = new CompletableFuture<>();
fut.complete(Controller.CreateKeyValueTableStatus.Status.SUCCESS);
return fut;
}).when(kvtTasksMocked).createKeyValueTable(anyString(), anyString(), any(), anyLong(), anyLong());
List<String> tableList = new ArrayList<>();
tableList.add(testKVT);
Pair<List<String>, String> listOfKVTables = new ImmutablePair<>(tableList, "");
doAnswer(invocationOnMock -> CompletableFuture.completedFuture(listOfKVTables)).doAnswer(invocationOnMock -> CompletableFuture.completedFuture(new ImmutablePair<>(Collections.emptyList(), invocationOnMock.getArgument(0)))).when(kvtStoreSpied).listKeyValueTables(anyString(), any(), anyInt(), any(), any());
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.DeleteKVTableStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteKVTableStatus.Status.SUCCESS);
return future;
}).when(kvtTasksMocked).deleteKeyValueTable(anyString(), anyString(), anyLong());
Controller.CreateKeyValueTableStatus.Status status = kvtTasksMocked.createKeyValueTable(testScope, testKVT, kvtConfig, System.currentTimeMillis(), 123L).join();
assertEquals(status, Controller.CreateKeyValueTableStatus.Status.SUCCESS);
DeleteScopeTask requestHandler = new DeleteScopeTask(streamMetadataTasks1, streamStoreSpied, kvtStoreSpied, kvtTasksMocked, executor);
DeleteScopeEvent event = new DeleteScopeEvent(testScope, 123L, scopeId);
CompletableFuture<Void> future = requestHandler.execute(event);
future.join();
}
Aggregations