use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class WatermarkWorkflowTest method testWriterTimeout.
@Test(timeout = 30000L)
public void testWriterTimeout() {
SynchronizerClientFactory clientFactory = spy(SynchronizerClientFactory.class);
ConcurrentHashMap<String, MockRevisionedStreamClient> revisionedStreamClientMap = new ConcurrentHashMap<>();
doAnswer(x -> {
String streamName = x.getArgument(0);
return revisionedStreamClientMap.compute(streamName, (s, rsc) -> {
if (rsc != null) {
return rsc;
} else {
return new MockRevisionedStreamClient();
}
});
}).when(clientFactory).createRevisionedStreamClient(anyString(), any(), any());
StreamMetadataStore streamMetadataStoreSpied = spy(this.streamMetadataStore);
BucketStore bucketStoreSpied = spy(this.bucketStore);
@Cleanup PeriodicWatermarking periodicWatermarking = new PeriodicWatermarking(streamMetadataStoreSpied, bucketStoreSpied, sp -> clientFactory, executor, new RequestTracker(false));
String streamName = "stream";
String scope = "scope";
streamMetadataStoreSpied.createScope(scope, null, executor).join();
streamMetadataStoreSpied.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).timestampAggregationTimeout(3000L).build(), System.currentTimeMillis(), null, executor).join();
streamMetadataStoreSpied.setState(scope, streamName, State.ACTIVE, null, executor).join();
// 2. note writer1, writer2, writer3 marks
// writer 1 reports segments 0, 1.
// writer 2 reports segments 1, 2,
// writer 3 reports segment 0, 2
String writer1 = "writer1";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 102L, ImmutableMap.of(0L, 100L, 1L, 0L, 2L, 0L), null, executor).join();
String writer2 = "writer2";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 101L, ImmutableMap.of(0L, 0L, 1L, 100L, 2L, 0L), null, executor).join();
String writer3 = "writer3";
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 100L, ImmutableMap.of(0L, 0L, 1L, 0L, 2L, 100L), null, executor).join();
// 3. run watermarking workflow.
StreamImpl stream = new StreamImpl(scope, streamName);
periodicWatermarking.watermark(stream).join();
// verify that a watermark has been emitted.
MockRevisionedStreamClient revisionedClient = revisionedStreamClientMap.get(NameUtils.getMarkStreamForStream(streamName));
assertEquals(revisionedClient.watermarks.size(), 1);
// Don't report time from writer3
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer1, 200L, ImmutableMap.of(0L, 200L, 1L, 0L, 2L, 0L), null, executor).join();
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer2, 200L, ImmutableMap.of(0L, 0L, 1L, 200L, 2L, 0L), null, executor).join();
// no new watermark should be emitted, writers should be tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call again. Still no new watermark should be emitted as writers have not timed out
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 1);
verify(streamMetadataStoreSpied, never()).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call watermark after a delay of 5 more seconds. The writer3 should timeout because it has a timeout of 3 seconds.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// watermark should be emitted. without considering writer3
assertEquals(revisionedClient.watermarks.size(), 2);
Watermark watermark = revisionedClient.watermarks.get(1).getValue();
assertEquals(watermark.getLowerTimeBound(), 200L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 200L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
// call watermark workflow again so that both writers are tracked for inactivity
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 2);
verify(streamMetadataStoreSpied, times(1)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, never()).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// now introduce more delays and see all writers are removed and stream is discontinued from watermarking computation.
Futures.delayedFuture(() -> periodicWatermarking.watermark(stream), 5000L, executor).join();
// verify that stream is discontinued from tracking for watermarking
verify(streamMetadataStoreSpied, times(3)).removeWriter(anyString(), anyString(), anyString(), any(), any(), any());
verify(bucketStoreSpied, times(1)).removeStreamFromBucketStore(any(), anyString(), anyString(), any());
// call note time for writer3 and verify that watermark is emitted.
streamMetadataStoreSpied.noteWriterMark(scope, streamName, writer3, 300L, ImmutableMap.of(0L, 300L, 1L, 0L, 2L, 0L), null, executor).join();
periodicWatermarking.watermark(stream).join();
assertEquals(revisionedClient.watermarks.size(), 3);
watermark = revisionedClient.watermarks.get(2).getValue();
assertEquals(watermark.getLowerTimeBound(), 300L);
assertEquals(watermark.getStreamCut().size(), 3);
assertEquals(getSegmentOffset(watermark, 0L), 300L);
assertEquals(getSegmentOffset(watermark, 1L), 200L);
assertEquals(getSegmentOffset(watermark, 2L), 100L);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class StreamMetadataTest method testMetadataOperations.
@Test(timeout = 60000)
public void testMetadataOperations() throws Exception {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
int servicePort = TestUtils.getAvailableListenPort();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
int controllerPort = TestUtils.getAvailableListenPort();
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, "localhost", servicePort, 4);
Controller controller = controllerWrapper.getController();
final String scope1 = "scope1";
final String streamName1 = "stream1";
final String scopeSeal = "scopeSeal";
final String streamNameSeal = "streamSeal";
final String scope2 = "scope2";
final String streamName2 = "stream2";
assertEquals(CreateScopeStatus.Status.SUCCESS, controllerWrapper.getControllerService().createScope(scope1, 0L).get().getStatus());
final ScalingPolicy scalingPolicy = ScalingPolicy.fixed(2);
final StreamConfiguration config1 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
// create stream and seal stream
// CS1:create a stream :given a streamName, scope and config
assertTrue(controller.createStream(scope1, streamName1, config1).get());
// Seal a stream given a streamName and scope.
controllerWrapper.getControllerService().createScope(scopeSeal, 0L).get();
final StreamConfiguration configSeal = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
assertTrue(controller.createStream(scopeSeal, streamNameSeal, configSeal).get());
controller.getCurrentSegments(scopeSeal, streamNameSeal).get();
assertTrue(controller.sealStream(scopeSeal, streamNameSeal).get());
assertTrue("FAILURE: No active segments should be present in a sealed stream", controller.getCurrentSegments(scopeSeal, streamNameSeal).get().getSegments().isEmpty());
// Seal an already sealed stream.
assertTrue(controller.sealStream(scopeSeal, streamNameSeal).get());
assertTrue("FAILURE: No active segments should be present in a sealed stream", controller.getCurrentSegments(scopeSeal, streamNameSeal).get().getSegments().isEmpty());
assertFutureThrows("FAILURE: Seal operation on a non-existent stream returned ", controller.sealStream(scopeSeal, "nonExistentStream"), t -> true);
// CS2:stream duplication not allowed
assertFalse(controller.createStream(scope1, streamName1, config1).get());
// CS3:create a stream with same stream name in different scopes
controllerWrapper.getControllerService().createScope(scope2, 0L).get();
final StreamConfiguration config2 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
assertTrue(controller.createStream(scope2, streamName1, config2).get());
// CS4:create a stream with different stream name and config in same scope
final StreamConfiguration config3 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build();
assertTrue(controller.createStream(scope1, streamName2, config3).get());
// update stream config(update Stream)
// AS3:update the type of scaling policy
final StreamConfiguration config6 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(100, 2, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config6).get());
// AS4:update the target rate of scaling policy
final StreamConfiguration config7 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 2, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config7).get());
// AS5:update the scale factor of scaling policy
final StreamConfiguration config8 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 4, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config8).get());
// AS6:update the minNumsegments of scaling policy
final StreamConfiguration config9 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 4, 3)).build();
assertTrue(controller.updateStream(scope1, streamName1, config9).get());
// the number of segments in the stream should now be 3.
// AS7:Update configuration of non-existent stream.
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
CompletableFuture<Boolean> updateStatus = controller.updateStream("scope", "streamName", config);
assertFutureThrows("FAILURE: Updating the configuration of a non-existent stream", updateStatus, t -> true);
// get currently active segments
// GCS1:get active segments of the stream
assertFalse(controller.getCurrentSegments(scope1, streamName1).get().getSegments().isEmpty());
// GCS2:Get active segments for a non-existent stream.
assertFutureThrows("Active segments cannot be fetched for non existent stream", controller.getCurrentSegments("scope", "streamName"), t -> true);
// get positions at a given time stamp
// PS1:get positions at a given time stamp:given stream, time stamp, count
Stream stream1 = new StreamImpl(scope1, streamName1);
CompletableFuture<Map<Segment, Long>> segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis());
assertEquals(2, segments.get().size());
// PS2:get positions of a stream with different count
Stream stream2 = new StreamImpl(scope1, streamName2);
segments = controller.getSegmentsAtTime(stream2, System.currentTimeMillis());
assertEquals(3, segments.get().size());
// PS4:get positions at a given timestamp for non-existent stream.
Stream stream = new StreamImpl("scope", "streamName");
assertFutureThrows("Fetching segments at given time stamp for non existent stream ", controller.getSegmentsAtTime(stream, System.currentTimeMillis()), t -> true);
// PS5:Get position at time before stream creation
segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis() - 36000);
assertEquals(segments.join().size(), 2);
assertEquals(controller.getCurrentSegments(scope1, streamName1).get().getSegments().size(), 3);
// PS6:Get positions at a time in future after stream creation
segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis() + 3600);
assertTrue(!segments.get().isEmpty());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndCBRTest method testReaderGroupAutoRetention.
@Test(timeout = 60000)
public void testReaderGroupAutoRetention() throws Exception {
String scope = "test";
String streamName = "testReaderGroupAutoRetention";
String groupName = "testReaderGroupAutoRetention-group";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).retentionPolicy(RetentionPolicy.bySizeBytes(10, Long.MAX_VALUE)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
Stream stream = Stream.of(scope, streamName);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// write events
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
writer.writeEvent("1", "e1").join();
writer.writeEvent("2", "e2").join();
// Create a ReaderGroup
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(groupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).stream(stream).build());
// Create a Reader
AtomicLong clock = new AtomicLong();
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader1", groupName, serializer, ReaderConfig.builder().build(), clock::get, clock::get);
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
EventRead<String> read = reader.readNextEvent(60000);
assertEquals("e1", read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
@Cleanup("shutdown") final InlineExecutor backgroundExecutor = new InlineExecutor();
ReaderGroup readerGroup = groupManager.getReaderGroup(groupName);
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("Checkpoint", backgroundExecutor);
assertFalse(checkpoint.isDone());
read = reader.readNextEvent(60000);
assertTrue(read.isCheckpoint());
assertEquals("Checkpoint", read.getCheckpointName());
assertNull(read.getEvent());
clock.addAndGet(CLOCK_ADVANCE_INTERVAL);
read = reader.readNextEvent(60000);
assertEquals("e2", read.getEvent());
Checkpoint cpResult = checkpoint.get(5, TimeUnit.SECONDS);
assertTrue(checkpoint.isDone());
assertEquals("Checkpoint", cpResult.getName());
read = reader.readNextEvent(100);
assertNull(read.getEvent());
assertFalse(read.isCheckpoint());
AssertExtensions.assertEventuallyEquals(true, () -> controller.getSegmentsAtTime(new StreamImpl(scope, streamName), 0L).join().values().stream().anyMatch(off -> off > 0), 30 * 1000L);
String group2 = groupName + "2";
groupManager.createReaderGroup(group2, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(NameUtils.getScopedStreamName(scope, streamName)).build());
EventStreamReader<String> reader2 = clientFactory.createReader("reader2", group2, serializer, ReaderConfig.builder().build());
EventRead<String> eventRead2 = reader2.readNextEvent(10000);
assertEquals("e2", eventRead2.getEvent());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndTransactionOrderTest method testOrder.
@Ignore
@Test(timeout = 100000)
public void testOrder() throws Exception {
final AtomicBoolean done = new AtomicBoolean(false);
CompletableFuture<Void> writer1 = startWriter("1", clientFactory, done);
CompletableFuture<Void> writer2 = startWriter("2", clientFactory, done);
CompletableFuture<Void> writer3 = startWriter("3", clientFactory, done);
CompletableFuture<Void> writer4 = startWriter("4", clientFactory, done);
// perform multiple scale stream operations so that rolling transactions may happen
Stream s = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "order");
controller.scaleStream(s, Collections.singletonList(0L), map, executor).getFuture().get();
controller.scaleStream(s, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), map, executor).getFuture().get();
controller.scaleStream(s, Collections.singletonList(NameUtils.computeSegmentId(2, 2)), map, executor).getFuture().get();
// stop writers
done.set(true);
CompletableFuture.allOf(writer1, writer2, writer3, writer4).join();
// wait for all transactions to commit
Futures.allOf(eventToTxnMap.entrySet().stream().map(x -> waitTillCommitted(controller, s, x.getValue(), uncommitted)).collect(Collectors.toList())).join();
assertTrue(uncommitted.isEmpty());
// read all events using a single reader and verify the order
List<Triple<Integer, UUID, String>> eventOrder = new LinkedList<>();
// create a reader
while (!eventToTxnMap.isEmpty()) {
EventRead<Integer> integerEventRead = reader.readNextEvent(SECONDS.toMillis(60));
if (integerEventRead.getEvent() != null) {
int event1 = integerEventRead.getEvent();
UUID txnId = eventToTxnMap.remove(event1);
String writerId = txnToWriter.get(txnId);
UUID first = writersList.get(writerId).remove(0);
eventOrder.add(new ImmutableTriple<>(event1, txnId, writerId));
assertEquals(first, txnId);
}
}
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndTruncationTest method testSegmentTruncationWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) based on whether segment truncation takes place
* while reading (first part of the test) or before starting reading (second part).
*
* @throws InterruptedException If the current thread is interrupted while waiting for the Controller service.
*/
@Test(timeout = 60000)
public void testSegmentTruncationWhileReading() throws InterruptedException {
final int totalEvents = 100;
final String scope = "truncationTests";
final String streamName = "testSegmentTruncationWhileReading";
final String readerGroupName = "RGTestSegmentTruncationWhileReading";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, config).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// Write half of totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents / 2);
// Seal current segment (0) and split it into two segments (1,2).
Stream stream = new StreamImpl(scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
assertTrue(controller.scaleStream(stream, Lists.newArrayList(0L), map, executorService()).getFuture().join());
long one = computeSegmentId(1, 1);
long two = computeSegmentId(2, 1);
// Write rest of events to the new Stream segments.
ReadWriteUtils.writeEvents(clientFactory, streamName, totalEvents, totalEvents / 2);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(100).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
int read = 0;
while (read < 75) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
// Let readers to consume some events and truncate segment while readers are consuming events
Exceptions.handleInterrupted(() -> Thread.sleep(500));
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(one, 0L);
streamCutPositions.put(two, 0L);
assertTrue(controller.truncateStream(scope, streamName, streamCutPositions).join());
// Wait for readers to complete and assert that they have read all the events (totalEvents).
while (read < totalEvents) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
assertEquals(read, totalEvents);
assertEquals(null, reader.readNextEvent(0).getEvent());
// Assert that from the truncation call onwards, the available segments are the ones after scaling.
List<Long> currentSegments = controller.getCurrentSegments(scope, streamName).join().getSegments().stream().map(Segment::getSegmentId).sorted().collect(toList());
currentSegments.removeAll(Lists.newArrayList(one, two));
assertTrue(currentSegments.isEmpty());
// The new set of readers, should only read the events beyond truncation point (segments 1 and 2).
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
List<CompletableFuture<Integer>> futures = readEvents(clientFactory, newReaderGroupName, 1);
Futures.allOf(futures).join();
assertEquals((int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get(), totalEvents / 2);
}
Aggregations