use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class StreamSeekTest method testStreamSeek.
@Test(timeout = 50000)
public void testStreamSeek() throws Exception {
createScope(SCOPE);
createStream(STREAM1);
createStream(STREAM2);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM1, serializer, EventWriterConfig.builder().build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
groupManager.createReaderGroup("group", ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(SCOPE, STREAM1)).stream(Stream.of(SCOPE, STREAM2)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("group");
// Prep the stream with data.
// 1.Write two events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(1)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(2)).get();
// 2.Scale stream
Map<Double, Double> newKeyRanges = new HashMap<>();
newKeyRanges.put(0.0, 0.33);
newKeyRanges.put(0.33, 0.66);
newKeyRanges.put(0.66, 1.0);
scaleStream(STREAM1, newKeyRanges);
// 3.Write three events with event size of 30
writer1.writeEvent(keyGenerator.get(), getEventData.apply(3)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(4)).get();
writer1.writeEvent(keyGenerator.get(), getEventData.apply(5)).get();
// Create a reader
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// Offset of a streamCut is always set to zero.
// Stream cut 1
Map<Stream, StreamCut> streamCut1 = readerGroup.getStreamCuts();
readAndVerify(reader, 1, 2);
// Sees the segments are empty prior to scaling
assertNull(reader.readNextEvent(100).getEvent());
// Checkpoint to move past the scale
readerGroup.initiateCheckpoint("cp1", executorService());
// Old segments are released and new ones can be read
readAndVerify(reader, 3, 4, 5);
// Stream cut 2
Map<Stream, StreamCut> streamCut2 = readerGroup.getStreamCuts();
// reset the readers to offset 0.
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut1).build());
verifyReinitializationRequiredException(reader);
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// verify that we are at streamCut1
readAndVerify(reader1, 1, 2);
// reset readers to post scale offset 0
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromStreamCuts(streamCut2).build());
verifyReinitializationRequiredException(reader1);
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("readerId", "group", serializer, ReaderConfig.builder().build());
// verify that we are at streamCut2
readAndVerify(reader2, 3, 4, 5);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class UnreadBytesTest method testUnreadBytesWithEndStreamCuts.
@Test(timeout = 50000)
public void testUnreadBytesWithEndStreamCuts() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
String streamName = "testUnreadBytesWithEndStreamCuts";
Controller controller = PRAVEGA.getLocalController();
controller.createScope("unreadbytes").get();
controller.createStream("unreadbytes", streamName, config).get();
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope("unreadbytes", ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// Write just 2 events to simplify simulating a checkpoint.
writer.writeEvent("0", "data of size 30").get();
writer.writeEvent("0", "data of size 30").get();
String group = "testUnreadBytesWithEndStreamCuts-group";
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope("unreadbytes", ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
// create a bounded reader group.
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("unreadbytes/" + streamName, StreamCut.UNBOUNDED, getStreamCut(streamName, 90L, 0)).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(group);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> firstEvent = reader.readNextEvent(15000);
EventRead<String> secondEvent = reader.readNextEvent(15000);
assertNotNull(firstEvent);
assertEquals("data of size 30", firstEvent.getEvent());
assertNotNull(secondEvent);
assertEquals("data of size 30", secondEvent.getEvent());
// trigger a checkpoint.
CompletableFuture<Checkpoint> chkPointResult = readerGroup.initiateCheckpoint("test", executorService());
EventRead<String> chkpointEvent = reader.readNextEvent(15000);
assertEquals("test", chkpointEvent.getCheckpointName());
EventRead<String> emptyEvent = reader.readNextEvent(100);
assertEquals(false, emptyEvent.isCheckpoint());
assertEquals(null, emptyEvent.getEvent());
chkPointResult.join();
// Writer events, to ensure 120Bytes are written.
writer.writeEvent("0", "data of size 30").get();
writer.writeEvent("0", "data of size 30").get();
long unreadBytes = readerGroup.getMetrics().unreadBytes();
// Ensure the endoffset of 90 Bytes is taken into consideration when computing unread
assertTrue("Unread bvtes: " + unreadBytes, unreadBytes == 30);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class DebugStreamSegmentsTest method testOutOfSequence.
@Test(timeout = 30000)
public void testOutOfSequence() throws Exception {
// 1. Prepare
createScope(SCOPE);
createStream(STREAM);
SegmentOutputStreamFactory streamFactory = Mockito.mock(SegmentOutputStreamFactory.class);
when(streamFactory.createOutputStreamForSegment(any(), any(), any(), any())).thenReturn(mock(SegmentOutputStream.class));
SegmentSelector selector = new SegmentSelector(Stream.of(SCOPE, STREAM), controllerWrapper.getController(), streamFactory, EventWriterConfig.builder().build(), DelegationTokenProviderFactory.createWithEmptyToken());
// 2.Create clientFactory.
@Cleanup EventStreamClientFactory clientFactoryInternal = EventStreamClientFactory.withScope("_system", ClientConfig.builder().controllerURI(controllerUri).build());
@Cleanup final Controller controller = controllerWrapper.getController();
Segment[] lastSegments = new Segment[100];
for (int i = 0; i < 10; i++) {
randomScaleUpScaleDown(clientFactoryInternal, controller);
selector.refreshSegmentEventWriters(segment -> {
});
for (int key = 0; key < 100; key++) {
Segment segment = selector.getSegmentForEvent("key-" + key);
if (lastSegments[key] != null) {
int lastEpoch = NameUtils.getEpoch(lastSegments[key].getSegmentId());
int thisEpoch = NameUtils.getEpoch(segment.getSegmentId());
assertTrue(thisEpoch >= lastEpoch);
if (thisEpoch == lastEpoch) {
assertEquals(lastSegments[key], segment);
}
}
lastSegments[key] = segment;
}
}
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class DelegationTokenTest method writeAnEvent.
private void writeAnEvent(int tokenTtlInSeconds) throws ExecutionException, InterruptedException {
ClusterWrapper pravegaCluster = ClusterWrapper.builder().authEnabled(true).tokenTtlInSeconds(600).build();
try {
pravegaCluster.start();
String scope = "testscope";
String streamName = "teststream";
int numSegments = 1;
String message = "test message";
ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create(pravegaCluster.controllerUri())).credentials(new DefaultCredentials("1111_aaaa", "admin")).build();
log.debug("Done creating client config.");
createScopeStream(scope, streamName, numSegments, clientConfig);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build());
// Note: A TokenException is thrown here if token verification fails on the server.
writer.writeEvent(message).get();
log.debug("Done writing message '{}' to stream '{} / {}'", message, scope, streamName);
} finally {
pravegaCluster.close();
}
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class StreamMetricsTest method testRollingTxnMetrics.
@Test(timeout = 30000)
public void testRollingTxnMetrics() throws Exception {
String scaleRollingTxnScopeName = "scaleRollingTxnScope";
String scaleRollingTxnStreamName = "scaleRollingTxnStream";
controllerWrapper.getControllerService().createScope(scaleRollingTxnScopeName, 0L).get();
if (!controller.createStream(scaleRollingTxnScopeName, scaleRollingTxnStreamName, config).get()) {
fail("Stream " + scaleRollingTxnScopeName + "/" + scaleRollingTxnStreamName + " for scale testing already existed, test failed");
}
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scaleRollingTxnScopeName, ClientConfig.builder().controllerURI(URI.create("tcp://localhost:" + controllerPort)).build());
@Cleanup TransactionalEventStreamWriter<String> writer = clientFactory.createTransactionalEventWriter(Stream.of(scaleRollingTxnScopeName, scaleRollingTxnStreamName).getStreamName(), new JavaSerializer<>(), EventWriterConfig.builder().build());
Transaction<String> transaction = writer.beginTxn();
transaction.writeEvent("Transactional content");
// split to 3 segments
Map<Double, Double> keyRanges = new HashMap<>();
keyRanges.put(0.0, 0.25);
keyRanges.put(0.25, 0.75);
keyRanges.put(0.75, 1.0);
Stream scaleRollingTxnStream = new StreamImpl(scaleRollingTxnScopeName, scaleRollingTxnStreamName);
if (!controller.scaleStream(scaleRollingTxnStream, Collections.singletonList(0L), keyRanges, executor).getFuture().get()) {
fail("Scale stream: splitting segment into three failed, exiting");
}
assertEquals(3, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value());
assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value());
assertEquals(0, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value());
transaction.flush();
transaction.commit();
String message = "Inconsistency found between metadata and metrics";
AssertExtensions.assertEventuallyEquals(message, 3L, () -> (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value(), 500, 30000);
AssertExtensions.assertEventuallyEquals(message, 2L, () -> (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value(), 200, 30000);
AssertExtensions.assertEventuallyEquals(message, 1L, () -> (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleRollingTxnScopeName, scaleRollingTxnStreamName)).value(), 200, 30000);
}
Aggregations