use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class ControllerEventProcessorsTest method testHandleOrphaned.
@Test(timeout = 10000)
public void testHandleOrphaned() throws CheckpointStoreException {
LocalController localController = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
EventProcessorGroup<ControllerEvent> processor = getProcessor();
EventProcessorGroup<ControllerEvent> mockProcessor = spy(processor);
doThrow(new CheckpointStoreException("host not found")).when(mockProcessor).notifyProcessFailure("host3");
when(system.createEventProcessorGroup(any(), any(), any())).thenReturn(mockProcessor);
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, localController, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// check for a case where init is not initialized so that kvtRequestProcessors don't get initialized and will be null
assertTrue(Futures.await(processors.sweepFailedProcesses(() -> Sets.newHashSet("host1"))));
Assert.assertFalse(processors.isReady());
Assert.assertFalse(processors.isBootstrapCompleted());
Assert.assertFalse(processors.isMetadataServiceConnected());
processors.startAsync();
processors.awaitRunning();
assertTrue(Futures.await(processors.sweepFailedProcesses(() -> Sets.newHashSet("host1"))));
assertTrue(Futures.await(processors.handleFailedProcess("host1")));
AssertExtensions.assertFutureThrows("host not found", processors.handleFailedProcess("host3"), e -> e instanceof CheckpointStoreException);
processors.shutDown();
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentIdenticalScaleRun.
private void concurrentIdenticalScaleRun(String stream, String func, boolean isManual, Predicate<Throwable> firstExceptionPredicate, boolean expectFailureOnSecondJob, Predicate<Throwable> secondExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, func, signal, wait);
// the processing will stall at start scale
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
if (!expectFailureOnSecondJob) {
scaleRequestHandler2.execute(event).join();
} else {
AssertExtensions.assertSuppliedFutureThrows("second job should fail", () -> scaleRequestHandler2.execute(event), secondExceptionPredicate);
}
// verify that scale is complete
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(2, getVersionNumber(versioned));
assertEquals(1, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class ControllerGrpcAuthFocusedTest method setup.
@Before
public void setup() throws IOException {
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(EXECUTOR);
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore();
this.kvtStore = spy(KVTableStoreFactory.createInMemoryStore(streamStore, EXECUTOR));
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
RequestTracker requestTracker = new RequestTracker(true);
StreamMetrics.initialize();
TransactionMetrics.initialize();
GrpcAuthHelper authHelper = new GrpcAuthHelper(true, "secret", 300);
EventHelper helper = EventHelperMock.getEventHelperMock(EXECUTOR, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, EXECUTOR, "host", authHelper, helper);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, EXECUTOR, "host", authHelper);
kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, EXECUTOR, EXECUTOR, "host", authHelper, helper);
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, EXECUTOR), new ScaleOperationTask(streamMetadataTasks, streamStore, EXECUTOR), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, EXECUTOR), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, EXECUTOR), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, EXECUTOR), new TruncateStreamTask(streamMetadataTasks, streamStore, EXECUTOR), new CreateReaderGroupTask(streamMetadataTasks, streamStore, EXECUTOR), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, EXECUTOR), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, EXECUTOR), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, EXECUTOR), EXECUTOR);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, EXECUTOR));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
Cluster mockCluster = mock(Cluster.class);
when(mockCluster.getClusterMembers()).thenReturn(Collections.singleton(new Host("localhost", 9090, null)));
ControllerServiceGrpc.ControllerServiceImplBase controllerServiceImplBase = new ControllerServiceImpl(new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, EXECUTOR, mockCluster, requestTracker), authHelper, requestTracker, true, true, 2);
ControllerServiceGrpc.ControllerServiceImplBase controllerServiceImplBaseStrict = new ControllerServiceImpl(new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, EXECUTOR, mockCluster, requestTracker), authHelper, requestTracker, true, false, 2);
PasswordAuthHandler authHandler = new PasswordAuthHandler();
authHandler.initialize(AUTH_FILE.getAbsolutePath());
String uniqueServerName = String.format("Test server name: %s", getClass());
String uniqueServerNameStrict = String.format("Test server name: %sStrict", getClass());
// Using a builder that creates a server for servicing in-process requests.
// Also, using a direct executor which executes app code directly in transport thread. See
// https://grpc.io/grpc-java/javadoc/io/grpc/inprocess/InProcessServerBuilder.html for more information.
grpcServer = InProcessServerBuilder.forName(uniqueServerName).addService(ServerInterceptors.intercept(controllerServiceImplBase, new AuthInterceptor(authHandler))).directExecutor().build().start();
grpcServerStrict = InProcessServerBuilder.forName(uniqueServerNameStrict).addService(ServerInterceptors.intercept(controllerServiceImplBaseStrict, new AuthInterceptor(authHandler))).directExecutor().build().start();
inProcessChannel = InProcessChannelBuilder.forName(uniqueServerName).directExecutor().build();
inProcessChannelStrict = InProcessChannelBuilder.forName(uniqueServerNameStrict).directExecutor().build();
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class StreamMetadataTasksTest method deleteScopeRecursiveTest.
@Test
public void deleteScopeRecursiveTest() {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
StreamMetadataStore storeSpy = spy(getStore());
final String testDeleteScope = "testDelete";
// Call deleteScopeRecursive() without creating a scope
Controller.DeleteScopeStatus.Status status = streamMetadataTasks.deleteScopeRecursive(testDeleteScope, 123L).join();
assertEquals(status, Controller.DeleteScopeStatus.Status.SUCCESS);
streamStorePartialMock.createScope(testDeleteScope, null, executor).join();
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
DeleteScopeEvent deleteScopeEvent = new DeleteScopeEvent(SCOPE, 2L, UUID.randomUUID());
requestEventWriter.writeEvent(deleteScopeEvent);
doAnswer(x -> {
CompletableFuture<Boolean> future = new CompletableFuture<>();
future.complete(true);
return future;
}).when(spy(storeSpy)).isScopeSealed(testDeleteScope, null, executor);
doAnswer(x -> {
CompletableFuture<UUID> future = new CompletableFuture<>();
future.complete(UUID.randomUUID());
return future;
}).when(spy(storeSpy)).getScopeId(testDeleteScope, null, executor);
consumer.deleteScopeRecursive(SCOPE, 123L).join();
}
use of io.pravega.controller.store.stream.StreamMetadataStore in project pravega by pravega.
the class StreamMetadataTasksTest method deletePartiallyCreatedStreamTest.
@Test(timeout = 30000)
public void deletePartiallyCreatedStreamTest() throws InterruptedException {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
StreamMetadataStore store = streamStorePartialMock;
final String scopeName = "RecreationScopePartial";
final String streamName = "RecreatedStreamPartial";
store.createScope(scopeName, null, executor).join();
Controller.DeleteStreamStatus.Status deleteStatus;
// region case 1: only add stream to scope without any additional metadata
StreamMetadataStoreTestHelper.addStreamToScope(store, scopeName, streamName);
assertTrue(store.checkStreamExists(scopeName, streamName, null, executor).join());
deleteStatus = streamMetadataTasks.deleteStream(scopeName, streamName, 0L).join();
assertEquals(Controller.DeleteStreamStatus.Status.SUCCESS, deleteStatus);
// verify that event is not posted
assertTrue(requestEventWriter.eventQueue.isEmpty());
// endregion
// region case 2: only add creation time for the stream and then delete it.
StreamMetadataStoreTestHelper.partiallyCreateStream(store, scopeName, streamName, Optional.of(100L), false);
assertTrue(store.checkStreamExists(scopeName, streamName, null, executor).join());
deleteStatus = streamMetadataTasks.deleteStream(scopeName, streamName, 0L).join();
assertEquals(Controller.DeleteStreamStatus.Status.SUCCESS, deleteStatus);
// verify that event is not posted
assertTrue(requestEventWriter.eventQueue.isEmpty());
// endregion
// region case 3: create stream again but this time create the `state` but not history record.
// this should result in delete workflow being invoked as segments also have to be deleted.
StreamMetadataStoreTestHelper.partiallyCreateStream(store, scopeName, streamName, Optional.of(100L), true);
assertTrue(store.checkStreamExists(scopeName, streamName, null, executor).join());
CompletableFuture<Controller.DeleteStreamStatus.Status> future = streamMetadataTasks.deleteStream(scopeName, streamName, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.DeleteStreamStatus.Status.SUCCESS, future.join());
// endregion
// region case 4: now create full stream metadata.
// now create full stream metadata without setting state to active
// since there was no active segments, so we should have segments created from segment 0.
// configuration 2 has 3 segments. So highest segment number should be 2.
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build();
store.createStream(scopeName, streamName, configuration, 101L, null, executor).join();
assertTrue(store.checkStreamExists(scopeName, streamName, null, executor).join());
assertEquals(store.getActiveEpoch(scopeName, streamName, null, true, executor).join().getSegmentIds().stream().max(Long::compareTo).get().longValue(), 2L);
// delete stream should succeed
future = streamMetadataTasks.deleteStream(scopeName, streamName, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(Controller.DeleteStreamStatus.Status.SUCCESS, future.join());
store.createStream(scopeName, streamName, configuration, 102L, null, executor).join();
assertEquals(store.getActiveEpoch(scopeName, streamName, null, true, executor).join().getSegmentIds().stream().max(Long::compareTo).get().longValue(), 5L);
// endregion
}
Aggregations