use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class BucketServiceTest method setup.
@Before
public void setup() throws Exception {
executor = ExecutorServiceHelpers.newScheduledThreadPool(10, "test");
hostId = UUID.randomUUID().toString();
streamMetadataStore = createStreamStore(executor);
bucketStore = createBucketStore(3);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor);
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamMetadataStore, bucketStore, taskMetadataStore, segmentHelper, executor, hostId, GrpcAuthHelper.getDisabledAuthHelper());
BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore, 2);
PeriodicRetention periodicRetention = new PeriodicRetention(streamMetadataStore, streamMetadataTasks, executor, requestTracker);
retentionService = bucketStoreFactory.createRetentionService(Duration.ofMillis(5), periodicRetention::retention, executor);
retentionService.startAsync();
retentionService.awaitRunning();
ClientConfig clientConfig = ClientConfig.builder().build();
periodicWatermarking = new PeriodicWatermarking(streamMetadataStore, bucketStore, clientConfig, executor, requestTracker);
watermarkingService = bucketStoreFactory.createWatermarkingService(Duration.ofMillis(5), periodicWatermarking::watermark, executor);
watermarkingService.startAsync();
watermarkingService.awaitRunning();
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ZkStoreBucketServiceTest method testOwnershipOfExistingBucket.
@Test(timeout = 60000)
public void testOwnershipOfExistingBucket() throws Exception {
RequestTracker requestTracker = new RequestTracker(true);
TestingServer zkServer2 = new TestingServerStarter().start();
zkServer2.start();
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
@Cleanup("shutdownNow") ScheduledExecutorService executor2 = ExecutorServiceHelpers.newScheduledThreadPool(10, "test");
String hostId = UUID.randomUUID().toString();
BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1), zkClient2, executor2);
StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, executor2);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, bucketStore2, taskMetadataStore, segmentHelper, executor2, hostId, GrpcAuthHelper.getDisabledAuthHelper());
String scope = "scope1";
String streamName = "stream1";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor2).join();
String scope2 = "scope2";
String streamName2 = "stream2";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope2, streamName2, executor2).join();
BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore2, 5);
BucketManager service2 = bucketStoreFactory.createRetentionService(Duration.ofMillis(5000), stream -> CompletableFuture.completedFuture(null), executor2);
service2.startAsync();
service2.awaitRunning();
Thread.sleep(10000);
assertTrue(service2.getBucketServices().values().stream().allMatch(x -> x.getKnownStreams().size() == 2));
service2.stopAsync();
service2.awaitTerminated();
zkClient2.close();
zkServer2.close();
streamMetadataTasks2.close();
ExecutorServiceHelpers.shutdown(executor2);
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerClusterListenerTest method clusterListenerStarterTest.
@Test(timeout = 60000L)
@SuppressWarnings("unchecked")
public void clusterListenerStarterTest() throws InterruptedException, ExecutionException {
String hostName = "localhost";
Host host = new Host(hostName, 10, "originalhost");
// Following futures are used as latches. When awaitRunning a sweeper, we wait on a latch by calling
// Futures.await across the test case.
// Future for ensuring that task sweeper is ready and we let the sweep happen.
CompletableFuture<Void> taskSweep = new CompletableFuture<>();
// Future for when taskSweeper.failedHost is called once
CompletableFuture<Void> taskHostSweep1 = new CompletableFuture<>();
// Future for when taskSweeper.failedHost is called second time
CompletableFuture<Void> taskHostSweep2 = new CompletableFuture<>();
// Future for txn sweeper to get ready.
CompletableFuture<Void> txnSweep = new CompletableFuture<>();
// Future for txnsweeper.failedProcess to be called the first time
CompletableFuture<Void> txnHostSweepIgnore = new CompletableFuture<>();
CompletableFuture<Void> txnHostSweep2 = new CompletableFuture<>();
// Create task sweeper.
TaskMetadataStore taskStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
TaskSweeper taskSweeper = spy(new TaskSweeper(taskStore, host.getHostId(), executor, new TestTasks(taskStore, executor, host.getHostId())));
doAnswer(invocation -> {
if (!taskSweep.isDone()) {
// we complete the future when this method is called for the first time.
taskSweep.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(taskSweeper).sweepFailedProcesses(any(Supplier.class));
doAnswer(invocation -> {
if (!taskHostSweep1.isDone()) {
// we complete this future when task sweeper for a failed host is called for the first time.
taskHostSweep1.complete(null);
} else if (!taskHostSweep2.isDone()) {
// we complete this future when task sweeper for a failed host is called for the second time
taskHostSweep2.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(taskSweeper).handleFailedProcess(anyString());
// Create txn sweeper.
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore();
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
// create streamtransactionmetadatatasks but dont initialize it with writers. this will not be
// ready until writers are supplied.
StreamTransactionMetadataTasks txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, host.getHostId(), GrpcAuthHelper.getDisabledAuthHelper());
TxnSweeper txnSweeper = spy(new TxnSweeper(streamStore, txnTasks, 100, executor));
// any attempt to sweep txnHost should have been ignored
AtomicBoolean txnSweeperRealMethod = new AtomicBoolean(false);
doAnswer(invocation -> {
if (txnSweeperRealMethod.get()) {
return invocation.callRealMethod();
}
txnHostSweepIgnore.complete(null);
return false;
}).when(txnSweeper).isReady();
doAnswer(invocation -> {
if (!txnSweep.isDone()) {
txnSweep.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(txnSweeper).sweepFailedProcesses(any());
doAnswer(invocation -> {
if (!txnHostSweep2.isDone()) {
txnHostSweep2.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(txnSweeper).handleFailedProcess(anyString());
// Create request sweeper.
StreamMetadataTasks streamMetadataTasks = new StreamMetadataTasks(streamStore, mock(BucketStore.class), taskStore, segmentHelper, executor, host.getHostId(), GrpcAuthHelper.getDisabledAuthHelper(), mock(EventHelper.class));
RequestSweeper requestSweeper = spy(new RequestSweeper(streamStore, executor, streamMetadataTasks));
// any attempt to sweep requests should have been ignored
CompletableFuture<Void> requestSweep = new CompletableFuture<>();
// Future for txnsweeper.failedProcess to be called the first time
CompletableFuture<Void> requestHostSweepIgnore = new CompletableFuture<>();
CompletableFuture<Void> requestHostSweep2 = new CompletableFuture<>();
AtomicBoolean requestSweeperRealMethod = new AtomicBoolean(false);
doAnswer(invocation -> {
if (requestSweeperRealMethod.get()) {
return invocation.callRealMethod();
}
requestHostSweepIgnore.complete(null);
return false;
}).when(requestSweeper).isReady();
doAnswer(invocation -> {
if (!requestSweep.isDone()) {
requestSweep.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(requestSweeper).sweepFailedProcesses(any());
doAnswer(invocation -> {
if (!requestHostSweep2.isDone()) {
requestHostSweep2.complete(null);
}
return CompletableFuture.completedFuture(null);
}).when(requestSweeper).handleFailedProcess(anyString());
// Create ControllerClusterListener.
ControllerClusterListener clusterListener = new ControllerClusterListener(host, clusterZK, executor, Lists.newArrayList(taskSweeper, txnSweeper, requestSweeper));
clusterListener.startAsync();
clusterListener.awaitRunning();
log.info("cluster started");
// ensure that task sweep happens after cluster listener becomes ready.
assertTrue(Futures.await(taskSweep, 3000));
log.info("task sweeper completed");
// ensure only tasks are swept
verify(taskSweeper, times(1)).sweepFailedProcesses(any(Supplier.class));
verify(txnSweeper, times(0)).sweepFailedProcesses(any());
verify(requestSweeper, times(0)).sweepFailedProcesses(any());
verify(taskSweeper, times(0)).handleFailedProcess(anyString());
verify(txnSweeper, times(0)).handleFailedProcess(anyString());
verify(requestSweeper, times(0)).handleFailedProcess(anyString());
validateAddedNode(host.getHostId());
log.info("adding new host");
// now add and remove a new host
Host newHost = new Host(hostName, 20, "newHost1");
clusterZK.registerHost(newHost);
validateAddedNode(newHost.getHostId());
clusterZK.deregisterHost(newHost);
validateRemovedNode(newHost.getHostId());
log.info("deregistering new host");
assertTrue(Futures.await(taskHostSweep1, 3000));
assertTrue(Futures.await(txnHostSweepIgnore, 10000));
log.info("task sweep for new host done");
// verify that all tasks are not swept again.
verify(taskSweeper, times(1)).sweepFailedProcesses(any(Supplier.class));
// verify that host specific sweep happens once.
verify(taskSweeper, atLeast(1)).handleFailedProcess(anyString());
// verify that txns are not yet swept as txnsweeper is not yet ready.
verify(txnSweeper, times(0)).sweepFailedProcesses(any());
verify(txnSweeper, times(0)).handleFailedProcess(anyString());
// verify that txn sweeper was checked to be ready. It would have found it not ready at this point
verify(txnSweeper, atLeast(1)).isReady();
// request sweeper
// verify that txns are not yet swept as txnsweeper is not yet ready.
verify(requestSweeper, times(0)).sweepFailedProcesses(any());
verify(requestSweeper, times(0)).handleFailedProcess(anyString());
// verify that txn sweeper was checked to be ready. It would have found it not ready at this point
verify(requestSweeper, atLeast(1)).isReady();
// Reset the mock to call real method on txnsweeper.isReady.
txnSweeperRealMethod.set(true);
// Complete txn sweeper initialization by adding event writers.
txnTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
txnSweeper.awaitInitialization();
assertTrue(Futures.await(txnSweep, 3000));
// verify that post initialization txns are swept. And host specific txn sweep is also performed.
verify(txnSweeper, times(1)).sweepFailedProcesses(any());
// Reset the mock to call real method on requestSweeper.isReady.
requestSweeperRealMethod.set(true);
// Complete requestSweeper initialization by adding event writers.
streamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
assertTrue(Futures.await(requestSweep, 3000));
// verify that post initialization requests are swept. And host specific request sweep is also performed.
verify(requestSweeper, times(1)).sweepFailedProcesses(any());
// now add another host
newHost = new Host(hostName, 20, "newHost2");
clusterZK.registerHost(newHost);
validateAddedNode(newHost.getHostId());
clusterZK.deregisterHost(newHost);
log.info("removing newhost2");
validateRemovedNode(newHost.getHostId());
assertTrue(Futures.await(taskHostSweep2, 3000));
assertTrue(Futures.await(txnHostSweep2, 3000));
assertTrue(Futures.await(requestHostSweep2, 3000));
verify(taskSweeper, atLeast(2)).handleFailedProcess(anyString());
verify(txnSweeper, atLeast(1)).handleFailedProcess(anyString());
verify(requestSweeper, atLeast(1)).handleFailedProcess(anyString());
clusterListener.stopAsync();
clusterListener.awaitTerminated();
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class PravegaTablesControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() throws Exception {
StreamMetrics.initialize();
TransactionMetrics.initialize();
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
segmentHelper = SegmentHelperMock.getSegmentHelperMockForTables(executorService);
taskMetadataStore = TaskStoreFactoryForTests.createStore(PRAVEGA_ZK_CURATOR_RESOURCE.storeClient, executorService);
streamStore = StreamStoreFactory.createPravegaTablesStore(segmentHelper, GrpcAuthHelper.getDisabledAuthHelper(), PRAVEGA_ZK_CURATOR_RESOURCE.client, executorService);
// KVTable
kvtStore = KVTableStoreFactory.createPravegaTablesStore(segmentHelper, GrpcAuthHelper.getDisabledAuthHelper(), PRAVEGA_ZK_CURATOR_RESOURCE.client, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.streamRequestHandler = spy(new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService));
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
cluster = new ClusterZKImpl(PRAVEGA_ZK_CURATOR_RESOURCE.client, ClusterType.CONTROLLER);
final CountDownLatch latch = new CountDownLatch(1);
cluster.addListener((type, host) -> latch.countDown());
cluster.registerHost(new Host("localhost", 9090, null));
latch.await();
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, executorService, cluster, requestTracker);
}
use of io.pravega.controller.task.Stream.StreamMetadataTasks in project pravega by pravega.
the class ControllerServiceTest method setup.
@Before
public void setup() throws Exception {
final TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
final HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(connectionPool, hostStore, executor), executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// createScope
streamStore.createScope(SCOPE, null, executor).get();
// region createStream
startTs = System.currentTimeMillis();
OperationContext context = streamStore.createStreamContext(SCOPE, stream1, 0L);
streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor).get();
OperationContext context2 = streamStore.createStreamContext(SCOPE, stream2, 0L);
streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor).get();
// endregion
// region scaleSegments
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
scaleTs = System.currentTimeMillis();
VersionedMetadata<EpochTransitionRecord> record = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs, null, null, executor).get();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream1, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream1, record, null, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
sealedSegments = Arrays.asList(0L, 1L, 2L);
record = streamStore.submitScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs, null, null, executor).get();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream2, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream2, record, null, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).get();
// endregion
}
Aggregations