use of io.pravega.client.connection.impl.SocketConnectionFactoryImpl in project pravega by pravega.
the class EndToEndTransactionOrderTest method setUp.
@Before
public void setUp() throws Exception {
zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, serviceHost, servicePort, Config.HOST_STORE_CONTAINER_COUNT);
controller = controllerWrapper.getController();
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
server = new PravegaConnectionListener(false, false, "localhost", servicePort, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), SecurityConfigDefaults.TLS_PROTOCOL_VERSION);
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(2000).groupRefreshTimeMillis(1000).stream("test/test").build());
reader = clientFactory.createReader("1", "readergrp", new IntegerSerializer(), ReaderConfig.builder().build());
}
use of io.pravega.client.connection.impl.SocketConnectionFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testSegmentTruncationWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) based on whether segment truncation takes place
* while reading (first part of the test) or before starting reading (second part).
*
* @throws InterruptedException If the current thread is interrupted while waiting for the Controller service.
*/
@Test(timeout = 60000)
public void testSegmentTruncationWhileReading() throws InterruptedException {
final int totalEvents = 100;
final String scope = "truncationTests";
final String streamName = "testSegmentTruncationWhileReading";
final String readerGroupName = "RGTestSegmentTruncationWhileReading";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, config).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// Write half of totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents / 2);
// Seal current segment (0) and split it into two segments (1,2).
Stream stream = new StreamImpl(scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
assertTrue(controller.scaleStream(stream, Lists.newArrayList(0L), map, executorService()).getFuture().join());
long one = computeSegmentId(1, 1);
long two = computeSegmentId(2, 1);
// Write rest of events to the new Stream segments.
ReadWriteUtils.writeEvents(clientFactory, streamName, totalEvents, totalEvents / 2);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(100).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
int read = 0;
while (read < 75) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
// Let readers to consume some events and truncate segment while readers are consuming events
Exceptions.handleInterrupted(() -> Thread.sleep(500));
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(one, 0L);
streamCutPositions.put(two, 0L);
assertTrue(controller.truncateStream(scope, streamName, streamCutPositions).join());
// Wait for readers to complete and assert that they have read all the events (totalEvents).
while (read < totalEvents) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
assertEquals(read, totalEvents);
assertEquals(null, reader.readNextEvent(0).getEvent());
// Assert that from the truncation call onwards, the available segments are the ones after scaling.
List<Long> currentSegments = controller.getCurrentSegments(scope, streamName).join().getSegments().stream().map(Segment::getSegmentId).sorted().collect(toList());
currentSegments.removeAll(Lists.newArrayList(one, two));
assertTrue(currentSegments.isEmpty());
// The new set of readers, should only read the events beyond truncation point (segments 1 and 2).
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
List<CompletableFuture<Integer>> futures = readEvents(clientFactory, newReaderGroupName, 1);
Futures.allOf(futures).join();
assertEquals((int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get(), totalEvents / 2);
}
use of io.pravega.client.connection.impl.SocketConnectionFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testTruncationOffsets.
@Test(timeout = 7000)
public void testTruncationOffsets() throws InterruptedException, ExecutionException, TimeoutException, TruncatedDataException, ReinitializationRequiredException {
String scope = "scope";
String streamName = "testTruncationOffsets";
String testString = "Hello world\n";
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, StreamConfiguration.builder().build()).join();
ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
Serializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
Future<Void> ack = producer.writeEvent(testString);
ack.get(5, TimeUnit.SECONDS);
SegmentMetadataClientFactory metadataClientFactory = new SegmentMetadataClientFactoryImpl(controller, clientFactory.getConnectionPool());
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
Segment segment = new Segment(scope, streamName, 0);
@Cleanup SegmentMetadataClient metadataClient = metadataClientFactory.createSegmentMetadataClient(segment, DelegationTokenProviderFactory.createWithEmptyToken());
assertEquals(0, metadataClient.getSegmentInfo().join().getStartingOffset());
long writeOffset = metadataClient.getSegmentInfo().join().getWriteOffset();
assertEquals(writeOffset, metadataClient.fetchCurrentSegmentLength().join().longValue());
assertTrue(metadataClient.getSegmentInfo().join().getWriteOffset() > testString.length());
metadataClient.truncateSegment(writeOffset).join();
assertEquals(writeOffset, metadataClient.getSegmentInfo().join().getStartingOffset());
assertEquals(writeOffset, metadataClient.getSegmentInfo().join().getWriteOffset());
assertEquals(writeOffset, metadataClient.fetchCurrentSegmentLength().join().longValue());
ack = producer.writeEvent(testString);
ack.get(5, TimeUnit.SECONDS);
String group = "testTruncationOffsets-group";
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(new StreamImpl(scope, streamName)).build();
readerGroupManager.createReaderGroup(group, groupConfig);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader", group, serializer, ReaderConfig.builder().build());
AssertExtensions.assertThrows(TruncatedDataException.class, () -> reader.readNextEvent(2000));
EventRead<String> event = reader.readNextEvent(2000);
assertEquals(testString, event.getEvent());
event = reader.readNextEvent(100);
assertEquals(null, event.getEvent());
}
use of io.pravega.client.connection.impl.SocketConnectionFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testTruncation.
@Test(timeout = 60000)
public void testTruncation() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
String streamName = "testTruncation";
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
String scope = "test";
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, config);
ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "truncationTest1").get();
// scale
Stream stream = new StreamImpl("test", streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
LocalController controller = (LocalController) PRAVEGA.getLocalController();
Boolean result = controller.scaleStream(stream, Lists.newArrayList(0L, 1L), map, executorService()).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "truncationTest2").get();
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(2, 1), 0L);
streamCutPositions.put(computeSegmentId(3, 1), 0L);
streamCutPositions.put(computeSegmentId(4, 1), 0L);
controller.truncateStream(stream.getScope(), stream.getStreamName(), streamCutPositions).join();
String group = "testTruncation-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", clientConfig, connectionFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event);
assertEquals("truncationTest2", event.getEvent());
event = reader.readNextEvent(1000);
assertNull(event.getEvent());
}
use of io.pravega.client.connection.impl.SocketConnectionFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testWriteOnSealedStream.
@Test(timeout = 50000)
public void testWriteOnSealedStream() throws Exception {
JavaSerializer<String> serializer = new JavaSerializer<>();
EventWriterConfig writerConfig = EventWriterConfig.builder().build();
String scope = "testSeal";
String streamName = "testWriteOnSealedStream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream(scope, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, writerConfig);
// write an event.
writer.writeEvent("0", "data").get();
// Seal Stream.
assertTrue(controller.sealStream(scope, streamName).get());
// Write by an existing writer to a sealed stream should complete exceptionally.
assertFutureThrows("Should throw IllegalStateException", writer.writeEvent("2", "Write to sealed stream"), e -> IllegalStateException.class.isAssignableFrom(e.getClass()));
// Subsequent writes will throw an exception.
assertThrows(IllegalStateException.class, () -> writer.writeEvent("testEvent"));
// Creating a writer against a sealed stream throws an exception.
assertThrows(IllegalStateException.class, () -> clientFactory.createEventWriter(streamName, serializer, writerConfig));
}
Aggregations