use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask in project pravega by pravega.
the class RequestHandlersTest method testTruncateIgnoreFairness.
@Test
public void testTruncateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startTruncation(fairness, fairness, Collections.singletonMap(0L, 0L), null, executor).join();
streamStore.setState(fairness, fairness, State.TRUNCATING, null, executor).join();
assertEquals(State.TRUNCATING, streamStore.getState(fairness, fairness, true, null, executor).join());
TruncateStreamEvent event = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
TruncateStreamEvent event2 = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask in project pravega by pravega.
the class RequestHandlersTest method testSealIgnoreFairness.
@Test
public void testSealIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.setState(fairness, fairness, State.SEALING, null, executor).join();
assertEquals(State.SEALING, streamStore.getState(fairness, fairness, true, null, executor).join());
SealStreamEvent event = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.SEALED, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
SealStreamEvent event2 = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask in project pravega by pravega.
the class RequestHandlersTest method testUpdateIgnoreFairness.
@Test
public void testUpdateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startUpdateConfiguration(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), null, executor).join();
streamStore.setState(fairness, fairness, State.UPDATING, null, executor).join();
assertEquals(State.UPDATING, streamStore.getState(fairness, fairness, true, null, executor).join());
UpdateStreamEvent event = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor and our state does not allow us to ignore waiting processor
UpdateStreamEvent event2 = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask in project pravega by pravega.
the class ZKControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() throws Exception {
final HostControllerStore hostStore;
final SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetrics.initialize();
TransactionMetrics.initialize();
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
storeClient = StoreClientFactory.createZKStoreClient(zkClient);
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactoryForTests.createStore(storeClient, executorService);
hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
streamStore = StreamStoreFactory.createZKStore(zkClient, executorService);
kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
cluster = new ClusterZKImpl(zkClient, ClusterType.CONTROLLER);
final CountDownLatch latch = new CountDownLatch(1);
cluster.addListener((type, host) -> latch.countDown());
cluster.registerHost(new Host("localhost", 9090, null));
latch.await();
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, executorService, cluster, requestTracker);
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteReaderGroupTask in project pravega by pravega.
the class InMemoryControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() {
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactoryForTests.createInMemoryStore(executorService);
streamStore = StreamStoreFactory.createInMemoryStore();
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
StreamMetrics.initialize();
TransactionMetrics.initialize();
segmentHelper = SegmentHelperMock.getSegmentHelperMockForTables(executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.kvtStore = KVTableStoreFactory.createInMemoryStore(streamStore, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
Cluster mockCluster = mock(Cluster.class);
when(mockCluster.getClusterMembers()).thenReturn(Collections.singleton(new Host("localhost", 9090, null)));
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, StreamStoreFactory.createInMemoryBucketStore(), streamMetadataTasks, streamTransactionMetadataTasks, SegmentHelperMock.getSegmentHelperMock(), executorService, mockCluster, requestTracker);
}
Aggregations