use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask in project pravega by pravega.
the class RequestHandlersTest method testDeleteStreamReplay.
@Test
public void testDeleteStreamReplay() {
String stream = "delete";
createStreamInStore(stream, scope);
SealStreamTask sealStreamTask = new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor);
DeleteStreamTask deleteStreamTask = new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor);
// seal stream.
SealStreamEvent sealEvent = new SealStreamEvent(scope, stream, 0L);
// set state to sealing and send event to the processor
streamStore.setState(scope, stream, State.SEALING, null, executor).join();
sealStreamTask.execute(sealEvent).join();
assertEquals(State.SEALED, streamStore.getState(scope, stream, true, null, executor).join());
// delete the stream
long creationTime = streamStore.getCreationTime(scope, stream, null, executor).join();
DeleteStreamEvent firstDeleteEvent = new DeleteStreamEvent(scope, stream, 0L, creationTime);
deleteStreamTask.execute(firstDeleteEvent).join();
// recreate stream with same name in the store
createStreamInStore(stream, scope);
long newCreationTime = streamStore.getCreationTime(scope, stream, null, executor).join();
assertNotEquals(creationTime, newCreationTime);
// seal stream.
sealEvent = new SealStreamEvent(scope, stream, 0L);
// set state to sealing and send event to the processor
streamStore.setState(scope, stream, State.SEALING, null, executor).join();
sealStreamTask.execute(sealEvent).join();
assertEquals(State.SEALED, streamStore.getState(scope, stream, true, null, executor).join());
// replay old event. it should not seal the stream
AssertExtensions.assertFutureThrows("Replaying older delete event should have no effect", deleteStreamTask.execute(firstDeleteEvent), e -> Exceptions.unwrap(e) instanceof IllegalArgumentException);
DeleteStreamEvent secondDeleteEvent = new DeleteStreamEvent(scope, stream, 0L, newCreationTime);
// now delete should succeed
deleteStreamTask.execute(secondDeleteEvent).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask in project pravega by pravega.
the class RequestHandlersTest method testSealIgnoreFairness.
@Test
public void testSealIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.setState(fairness, fairness, State.SEALING, null, executor).join();
assertEquals(State.SEALING, streamStore.getState(fairness, fairness, true, null, executor).join());
SealStreamEvent event = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.SEALED, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
SealStreamEvent event2 = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask in project pravega by pravega.
the class RequestHandlersTest method testDeleteAssociatedStream.
@Test
public void testDeleteAssociatedStream() {
String stream = "deleteAssociated";
createStreamInStore(stream, scope);
String markStream = NameUtils.getMarkStreamForStream(stream);
createStreamInStore(markStream, scope);
SealStreamTask sealStreamTask = new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor);
DeleteStreamTask deleteStreamTask = new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor);
// create mark stream
// seal stream.
SealStreamEvent sealEvent = new SealStreamEvent(scope, stream, 0L);
// set state to sealing and send event to the processor
streamStore.setState(scope, stream, State.SEALING, null, executor).join();
sealStreamTask.execute(sealEvent).join();
assertEquals(State.SEALED, streamStore.getState(scope, stream, true, null, executor).join());
// mark stream should still be present and active
assertTrue(streamStore.checkStreamExists(scope, markStream, null, executor).join());
assertEquals(streamStore.getState(scope, markStream, true, null, executor).join(), State.ACTIVE);
// delete the stream
long creationTime = streamStore.getCreationTime(scope, stream, null, executor).join();
DeleteStreamEvent firstDeleteEvent = new DeleteStreamEvent(scope, stream, 0L, creationTime);
deleteStreamTask.execute(firstDeleteEvent).join();
// verify that mark stream is also deleted
assertFalse(streamStore.checkStreamExists(scope, markStream, null, executor).join());
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask in project pravega by pravega.
the class RequestHandlersTest method testUpdateIgnoreFairness.
@Test
public void testUpdateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startUpdateConfiguration(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), null, executor).join();
streamStore.setState(fairness, fairness, State.UPDATING, null, executor).join();
assertEquals(State.UPDATING, streamStore.getState(fairness, fairness, true, null, executor).join());
UpdateStreamEvent event = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor and our state does not allow us to ignore waiting processor
UpdateStreamEvent event2 = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.DeleteStreamTask in project pravega by pravega.
the class ZKControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() throws Exception {
final HostControllerStore hostStore;
final SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetrics.initialize();
TransactionMetrics.initialize();
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
storeClient = StoreClientFactory.createZKStoreClient(zkClient);
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactoryForTests.createStore(storeClient, executorService);
hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
streamStore = StreamStoreFactory.createZKStore(zkClient, executorService);
kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
cluster = new ClusterZKImpl(zkClient, ClusterType.CONTROLLER);
final CountDownLatch latch = new CountDownLatch(1);
cluster.addListener((type, host) -> latch.countDown());
cluster.registerHost(new Host("localhost", 9090, null));
latch.await();
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, executorService, cluster, requestTracker);
}
Aggregations