use of io.pravega.common.tracing.RequestTracker in project pravega by pravega.
the class WatermarkWorkflowTest method testWatermarkClientClose.
@Test(timeout = 10000L)
public void testWatermarkClientClose() {
String scope = "scope1";
String streamName = "stream1";
StreamImpl stream = new StreamImpl(scope, streamName);
SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
String markStreamName = NameUtils.getMarkStreamForStream(streamName);
@Cleanup MockRevisionedStreamClient revisionedClient = new MockRevisionedStreamClient();
doAnswer(x -> revisionedClient).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
doNothing().when(clientFactory).close();
PeriodicWatermarking.WatermarkClient client = new PeriodicWatermarking.WatermarkClient(stream, clientFactory);
client.close();
verify(clientFactory, never()).close();
client = new PeriodicWatermarking.WatermarkClient(stream, clientFactory);
client.close();
verify(clientFactory, never()).close();
String s = "failing creation";
doThrow(new RuntimeException(s)).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
AssertExtensions.assertThrows("constructor should throw", () -> new PeriodicWatermarking.WatermarkClient(stream, clientFactory), e -> e instanceof RuntimeException && s.equals(e.getMessage()));
@Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStore, bucketStore, sp -> clientFactory, executor, new RequestTracker(false));
streamMetadataStore.createScope(scope, null, executor).join();
streamMetadataStore.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).timestampAggregationTimeout(10000L).build(), System.currentTimeMillis(), null, executor).join();
streamMetadataStore.createStream(scope, markStreamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), null, executor).join();
streamMetadataStore.setState(scope, markStreamName, State.ACTIVE, null, executor).join();
streamMetadataStore.setState(scope, streamName, State.ACTIVE, null, executor).join();
String writer1 = "writer1";
Map<Long, Long> map1 = ImmutableMap.of(0L, 100L, 1L, 100L);
streamMetadataStore.noteWriterMark(scope, streamName, writer1, 100L, map1, null, executor).join();
// 2. run watermarking workflow.
periodicWatermarking.watermark(stream).join();
assertTrue(periodicWatermarking.checkExistsInCache(scope));
periodicWatermarking.evictFromCache(scope);
// verify that the syncfactory was closed
verify(clientFactory, times(1)).close();
}
use of io.pravega.common.tracing.RequestTracker in project pravega by pravega.
the class WatermarkWorkflowTest method testWriterTimeout.
@Test(timeout = 30000L)
public void testWriterTimeout() {
SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
doAnswer(x -> {
String streamName = x.getArgument(0);
return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
if (rsc != null) {
return rsc;
} else {
return new MockRevisionedStreamClient();
}
});
}).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
StreamMetadataStore streamMetadataStoreSpied = spy(this.streamMetadataStore);
BucketStore bucketStoreSpied = spy(this.bucketStore);
@Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStoreSpied, bucketStoreSpied, sp -> clientFactory, executor, new RequestTracker(false));
String streamName = "stream";
String scope = "scope";
streamMetadataStoreSpied.createScope(scope, null, executor).join();
streamMetadataStoreSpied.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(3000L).build(), System.currentTimeMillis(), null, executor).join();
streamMetadataStoreSpied.setState(scope, streamName, State.ACTIVE, null, executor).join();
// 2. note writer1, writer2, writer3 marks
// writer 1 reports segments 0, 1.
// writer 2 reports segments 1, 2,
// writer 3 reports segment 0, 2
String writer1 = "writer1";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 102L, ImmutableMap.of(0L, 100L, 1L, 0L, 2L, 0L), null, executor).join();
String writer2 = "writer2";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 101L, ImmutableMap.of(0L, 0L, 1L, 100L, 2L, 0L), null, executor).join();
String writer3 = "writer3";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 100L, ImmutableMap.of(0L, 0L, 1L, 0L, 2L, 100L), null, executor).join();
// 3. run watermarking workflow.
StreamImpl stream = new StreamImpl(scope, streamName);
periodicWatermarking.watermark(stream).join();
// verify that a watermark has been emitted.
MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
assertEquals(revisionedClient.watermarks.size(), 1);
// Don't report time from writer3
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 200L, ImmutableMap.of(0L, 200L, 1L, 0L, 2L, 0L), null, executor).join();
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 200L, ImmutableMap.of(0L, 0L, 1L, 200L, 2L, 0L), null, executor).join();
// no new watermark should be emitted, writers should be tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call again. Still no new watermark should be emitted as writers have not timed out
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call watermark after a delay of 5 more seconds. The writer3 should timeout because it has a timeout of 3 seconds.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// watermark should be emitted. without considering writer3
assertEquals(revisionedClient.watermarks.size(), 2);
Watermark watermark = revisionedClient.watermarks.get(1).getValue();
assertEquals(watermark.getLowerTimeBound(), 200L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 200L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
// call watermark workflow again so that both writers are tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 2);
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// now introduce more delays and see all writers are removed and stream is discontinued from watermarking computation.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
// verify that stream is discontinued from tracking for watermarking
verify(streamMetadataStoreSpied, times(3)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, times(1)).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call note time for writer3 and verify that watermark is emitted.
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 300L, ImmutableMap.of(0L, 300L, 1L, 0L, 2L, 0L), null, executor).join();
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 3);
watermark = revisionedClient.watermarks.get(2).getValue();
assertEquals(watermark.getLowerTimeBound(), 300L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 300L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
}
use of io.pravega.common.tracing.RequestTracker in project pravega by pravega.
the class ZkStoreBucketServiceTest method testOwnershipOfExistingBucket.
@Test(timeout = 60000)
public void testOwnershipOfExistingBucket() throws Exception {
RequestTracker requestTracker = new RequestTracker(true);
TestingServer zkServer2 = new TestingServerStarter().start();
zkServer2.start();
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer2.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
@Cleanup("shutdownNow") ScheduledExecutorService executor2 = ExecutorServiceHelpers.newScheduledThreadPool(10, "test");
String hostId = UUID.randomUUID().toString();
BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1), zkClient2, executor2);
StreamMetadataStore streamMetadataStore2 = StreamStoreFactory.createZKStore(zkClient2, executor2);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createInMemoryStore(executor2);
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamMetadataStore2, bucketStore2, taskMetadataStore, segmentHelper, executor2, hostId, GrpcAuthHelper.getDisabledAuthHelper());
String scope = "scope1";
String streamName = "stream1";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor2).join();
String scope2 = "scope2";
String streamName2 = "stream2";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope2, streamName2, executor2).join();
BucketServiceFactory bucketStoreFactory = new BucketServiceFactory(hostId, bucketStore2, 5);
BucketManager service2 = bucketStoreFactory.createRetentionService(Duration.ofMillis(5000), stream -> CompletableFuture.completedFuture(null), executor2);
service2.startAsync();
service2.awaitRunning();
Thread.sleep(10000);
assertTrue(service2.getBucketServices().values().stream().allMatch(x -> x.getKnownStreams().size() == 2));
service2.stopAsync();
service2.awaitTerminated();
zkClient2.close();
zkServer2.close();
streamMetadataTasks2.close();
ExecutorServiceHelpers.shutdown(executor2);
}
use of io.pravega.common.tracing.RequestTracker in project pravega by pravega.
the class ControllerServiceTest method setup.
@Before
public void setup() throws Exception {
final TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
final HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(connectionPool, hostStore, executor), executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// createScope
streamStore.createScope(SCOPE, null, executor).get();
// region createStream
startTs = System.currentTimeMillis();
OperationContext context = streamStore.createStreamContext(SCOPE, stream1, 0L);
streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor).get();
OperationContext context2 = streamStore.createStreamContext(SCOPE, stream2, 0L);
streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor).get();
// endregion
// region scaleSegments
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
scaleTs = System.currentTimeMillis();
VersionedMetadata<EpochTransitionRecord> record = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs, null, null, executor).get();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream1, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream1, record, null, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
sealedSegments = Arrays.asList(0L, 1L, 2L);
record = streamStore.submitScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs, null, null, executor).get();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream2, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream2, record, null, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).get();
// endregion
}
Aggregations