use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testTruncationOffsets.
@Test(timeout = 7000)
public void testTruncationOffsets() throws InterruptedException, ExecutionException, TimeoutException, TruncatedDataException, ReinitializationRequiredException {
String scope = "scope";
String streamName = "testTruncationOffsets";
String testString = "Hello world\n";
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, StreamConfiguration.builder().build()).join();
ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
Serializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
Future<Void> ack = producer.writeEvent(testString);
ack.get(5, TimeUnit.SECONDS);
SegmentMetadataClientFactory metadataClientFactory = new SegmentMetadataClientFactoryImpl(controller, clientFactory.getConnectionPool());
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, clientConfig);
Segment segment = new Segment(scope, streamName, 0);
@Cleanup SegmentMetadataClient metadataClient = metadataClientFactory.createSegmentMetadataClient(segment, DelegationTokenProviderFactory.createWithEmptyToken());
assertEquals(0, metadataClient.getSegmentInfo().join().getStartingOffset());
long writeOffset = metadataClient.getSegmentInfo().join().getWriteOffset();
assertEquals(writeOffset, metadataClient.fetchCurrentSegmentLength().join().longValue());
assertTrue(metadataClient.getSegmentInfo().join().getWriteOffset() > testString.length());
metadataClient.truncateSegment(writeOffset).join();
assertEquals(writeOffset, metadataClient.getSegmentInfo().join().getStartingOffset());
assertEquals(writeOffset, metadataClient.getSegmentInfo().join().getWriteOffset());
assertEquals(writeOffset, metadataClient.fetchCurrentSegmentLength().join().longValue());
ack = producer.writeEvent(testString);
ack.get(5, TimeUnit.SECONDS);
String group = "testTruncationOffsets-group";
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(new StreamImpl(scope, streamName)).build();
readerGroupManager.createReaderGroup(group, groupConfig);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("reader", group, serializer, ReaderConfig.builder().build());
AssertExtensions.assertThrows(TruncatedDataException.class, () -> reader.readNextEvent(2000));
EventRead<String> event = reader.readNextEvent(2000);
assertEquals(testString, event.getEvent());
event = reader.readNextEvent(100);
assertEquals(null, event.getEvent());
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testWriteOnSealedStream.
@Test(timeout = 50000)
public void testWriteOnSealedStream() throws Exception {
JavaSerializer<String> serializer = new JavaSerializer<>();
EventWriterConfig writerConfig = EventWriterConfig.builder().build();
String scope = "testSeal";
String streamName = "testWriteOnSealedStream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream(scope, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, serializer, writerConfig);
// write an event.
writer.writeEvent("0", "data").get();
// Seal Stream.
assertTrue(controller.sealStream(scope, streamName).get());
// Write by an existing writer to a sealed stream should complete exceptionally.
assertFutureThrows("Should throw IllegalStateException", writer.writeEvent("2", "Write to sealed stream"), e -> IllegalStateException.class.isAssignableFrom(e.getClass()));
// Subsequent writes will throw an exception.
assertThrows(IllegalStateException.class, () -> writer.writeEvent("testEvent"));
// Creating a writer against a sealed stream throws an exception.
assertThrows(IllegalStateException.class, () -> clientFactory.createEventWriter(streamName, serializer, writerConfig));
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndTruncationTest method testWriteDuringScaleAndTruncation.
@Test(timeout = 50000)
public void testWriteDuringScaleAndTruncation() throws Exception {
String streamName = "testWriteDuringScaleAndTruncation";
Stream stream = new StreamImpl("test", streamName);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 2)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
controller.updateStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
// routing key "0" translates to key 0.8. This write happens to segment 1.
writer.writeEvent("0", "truncationTest1").get();
// Peform scaling operations on the stream.
ImmutableMap<Double, Double> singleSegmentKeyRange = ImmutableMap.of(0.0, 1.0);
ImmutableMap<Double, Double> twoSegmentKeyRange = ImmutableMap.of(0.0, 0.5, 0.5, 1.0);
// scale down to 1 segment.
assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(0L, 1L), singleSegmentKeyRange, executorService()).getFuture().get());
// scale up to 2 segments.
assertTrue("Stream Scale up", controller.scaleStream(stream, Lists.newArrayList(computeSegmentId(2, 1)), twoSegmentKeyRange, executorService()).getFuture().get());
// scale down to 1 segment.
assertTrue("Stream Scale down", controller.scaleStream(stream, Lists.newArrayList(computeSegmentId(3, 2), computeSegmentId(4, 2)), singleSegmentKeyRange, executorService()).getFuture().get());
// scale up to 2 segments.
assertTrue("Stream Scale up", controller.scaleStream(stream, Lists.newArrayList(computeSegmentId(5, 3)), twoSegmentKeyRange, executorService()).getFuture().get());
// truncateStream.
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(3, 2), 0L);
streamCutPositions.put(computeSegmentId(4, 2), 0L);
assertTrue("Truncate stream", controller.truncateStream("test", streamName, streamCutPositions).get());
// write an event.
writer.writeEvent("0", "truncationTest3");
writer.flush();
// Read the event back.
String group = "testWriteDuringScaleAndTruncation-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(200);
assertNull(event.getEvent());
groupManager.getReaderGroup(group).initiateCheckpoint("cp1", executorService());
event = reader.readNextEvent(2000);
assertEquals("cp1", event.getCheckpointName());
event = reader.readNextEvent(200);
assertNull(event.getEvent());
groupManager.getReaderGroup(group).initiateCheckpoint("cp2", executorService());
event = reader.readNextEvent(2000);
assertEquals("cp2", event.getCheckpointName());
event = reader.readNextEvent(10000);
assertEquals("truncationTest3", event.getEvent());
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class EndToEndWithScaleTest method testScale.
@Test(timeout = 90000)
public void testScale() throws Exception {
final String scope = "test";
final String streamName = "test";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
// Test scale both in a new stream and in a re-created one.
for (int i = 0; i < 2; i++) {
@Cleanup Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(scope, 0L).get();
controller.createStream(scope, streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "txntest1" + i).get();
// scale
Stream stream = new StreamImpl(scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(i * 4L), map, executorService()).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "txntest2" + i).get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup("reader" + i, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream(Stream.of(scope, streamName).getScopedName()).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId" + i, "reader" + i, new JavaSerializer<>(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(10000);
assertNotNull(event.getEvent());
assertEquals("txntest1" + i, event.getEvent());
event = reader.readNextEvent(100);
assertNull(event.getEvent());
groupManager.getReaderGroup("reader" + i).initiateCheckpoint("cp" + i, executorService());
event = reader.readNextEvent(10000);
assertEquals("cp" + i, event.getCheckpointName());
event = reader.readNextEvent(10000);
assertEquals("txntest2" + i, event.getEvent());
assertTrue(controller.sealStream(scope, streamName).join());
assertTrue(controller.deleteStream(scope, streamName).join());
}
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class SynchronizerClientFactory method withScope.
/**
* Creates a new instance of Client Factory.
*
* @param scope The scope string.
* @param config Configuration for the client.
* @return Instance of ClientFactory implementation.
*/
static SynchronizerClientFactory withScope(String scope, ClientConfig config) {
// Change the max number of number of allowed connections to the segment store to 1.
val updatedConfig = config.toBuilder().maxConnectionsPerSegmentStore(1).enableTlsToSegmentStore(config.isEnableTlsToSegmentStore()).enableTlsToController(config.isEnableTlsToController()).build();
val connectionFactory = new SocketConnectionFactoryImpl(updatedConfig, 1);
return new ClientFactoryImpl(scope, new ControllerImpl(ControllerImplConfig.builder().clientConfig(updatedConfig).build(), connectionFactory.getInternalExecutor()), updatedConfig, connectionFactory);
}
Aggregations