use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class StreamCutBucketService method processNotification.
private CompletableFuture<Void> processNotification() {
return CompletableFuture.runAsync(() -> {
StreamNotification notification = Exceptions.handleInterrupted(() -> notifications.poll(1, TimeUnit.SECONDS));
if (notification != null) {
final StreamImpl stream;
switch(notification.getType()) {
case StreamAdded:
log.info("New stream {}/{} added to bucket {} ", notification.getScope(), notification.getStream(), bucketId);
stream = new StreamImpl(notification.getScope(), notification.getStream());
retentionFutureMap.computeIfAbsent(stream, x -> getStreamRetentionFuture(stream));
break;
case StreamRemoved:
log.info("Stream {}/{} removed from bucket {} ", notification.getScope(), notification.getStream(), bucketId);
stream = new StreamImpl(notification.getScope(), notification.getStream());
retentionFutureMap.remove(stream).cancel(true);
break;
case StreamUpdated:
// For now we will do nothing.
break;
case ConnectivityError:
log.info("Retention.StreamNotification for connectivity error");
break;
}
}
}, executor);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ZKStreamMetadataStore method registerBucketChangeListener.
@Override
@SneakyThrows
public void registerBucketChangeListener(int bucket, BucketChangeListener listener) {
Preconditions.checkNotNull(listener);
PathChildrenCacheListener bucketListener = (client, event) -> {
StreamImpl stream;
switch(event.getType()) {
case CHILD_ADDED:
stream = getStreamFromPath(event.getData().getPath());
listener.notify(new StreamNotification(stream.getScope(), stream.getStreamName(), NotificationType.StreamAdded));
break;
case CHILD_REMOVED:
stream = getStreamFromPath(event.getData().getPath());
listener.notify(new StreamNotification(stream.getScope(), stream.getStreamName(), NotificationType.StreamRemoved));
break;
case CHILD_UPDATED:
stream = getStreamFromPath(event.getData().getPath());
listener.notify(new StreamNotification(stream.getScope(), stream.getStreamName(), NotificationType.StreamUpdated));
break;
case CONNECTION_LOST:
listener.notify(new StreamNotification(null, null, NotificationType.ConnectivityError));
break;
default:
log.warn("Received unknown event {} on bucket", event.getType(), bucket);
}
};
String bucketRoot = String.format(ZKStoreHelper.BUCKET_PATH, bucket);
bucketCacheMap.put(bucket, new PathChildrenCache(storeHelper.getClient(), bucketRoot, true));
PathChildrenCache pathChildrenCache = bucketCacheMap.get(bucket);
pathChildrenCache.getListenable().addListener(bucketListener);
pathChildrenCache.start(PathChildrenCache.StartMode.NORMAL);
log.info("bucket {} change notification listener registered", bucket);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class MockController method deleteStream.
@Override
@Synchronized
public CompletableFuture<Boolean> deleteStream(String scope, String streamName) {
Stream stream = new StreamImpl(scope, streamName);
if (createdStreams.get(stream) == null) {
return CompletableFuture.completedFuture(false);
}
for (Segment segment : getSegmentsForStream(stream)) {
deleteSegment(segment.getScopedName(), new PravegaNodeUri(endpoint, port));
}
createdStreams.remove(stream);
createdScopes.get(scope).remove(stream);
return CompletableFuture.completedFuture(true);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndTxnWithScaleTest method testTxnWithScale.
@Test(timeout = 10000)
public void testTxnWithScale() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope("test").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test").get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutScaleGracePeriod(10000).transactionTimeoutTime(10000).build());
Transaction<String> transaction = test.beginTxn();
transaction.writeEvent("0", "txntest1");
transaction.commit();
// scale
Stream stream = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executorService).getFuture().get();
assertTrue(result);
transaction = test.beginTxn();
transaction.writeEvent("0", "txntest2");
transaction.commit();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("txntest1", event.getEvent());
event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("txntest2", event.getEvent());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class BatchClientTest method testBatchClient.
@Test(timeout = 50000)
public void testBatchClient() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM).scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
// create reader and writer.
@Cleanup ClientFactory clientFactory = ClientFactory.withScope(SCOPE, controllerUri);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, serializer, EventWriterConfig.builder().build());
// write events to stream with 1 segment.
writeEvents(writer);
// scale up and write events.
Stream stream = new StreamImpl(SCOPE, STREAM);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue("Scale up operation", result);
writeEvents(writer);
// scale down and write events.
map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
result = controller.scaleStream(stream, Arrays.asList(1, 2, 3), map, executor).getFuture().get();
assertTrue("Scale down operation result", result);
writeEvents(writer);
BatchClient batchClient = clientFactory.createBatchClient();
// List out all the segments in the stream.
ArrayList<SegmentRange> segments = Lists.newArrayList(batchClient.getSegments(stream, null, null).getIterator());
assertEquals("Expected number of segments", 6, segments.size());
// Batch read all events from stream.
List<String> batchEventList = new ArrayList<>();
segments.forEach(segInfo -> {
@Cleanup SegmentIterator<String> segmentIterator = batchClient.readSegment(segInfo, serializer);
batchEventList.addAll(Lists.newArrayList(segmentIterator));
});
assertEquals("Event count", 9, batchEventList.size());
// read from a given offset.
Segment seg0 = new Segment(SCOPE, STREAM, 0);
SegmentRange seg0Info = SegmentRangeImpl.builder().segment(seg0).startOffset(60).endOffset(90).build();
@Cleanup SegmentIterator<String> seg0Iterator = batchClient.readSegment(seg0Info, serializer);
ArrayList<String> dataAtOffset = Lists.newArrayList(seg0Iterator);
assertEquals(1, dataAtOffset.size());
assertEquals(DATA_OF_SIZE_30, dataAtOffset.get(0));
}
Aggregations