use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTxnWithTest method testTxnWithScale.
@Test(timeout = 10000)
public void testTxnWithScale() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = PRAVEGA.getLocalController();
controller.createScope("test").get();
String streamName = "testTxnWithScale";
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup TransactionalEventStreamWriter<String> test = clientFactory.createTransactionalEventWriter("writer", streamName, new UTF8StringSerializer(), EventWriterConfig.builder().transactionTimeoutTime(10000).build());
Transaction<String> transaction1 = test.beginTxn();
transaction1.writeEvent("0", "txntest1");
transaction1.commit();
assertEventuallyEquals(Transaction.Status.COMMITTED, () -> transaction1.checkStatus(), 5000);
// scale
Stream stream = new StreamImpl("test", streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executorService()).getFuture().get();
assertTrue(result);
Transaction<String> transaction2 = test.beginTxn();
transaction2.writeEvent("0", "txntest2");
transaction2.commit();
String group = "testTxnWithScale-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new UTF8StringSerializer(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(5000);
assertNotNull(event.getEvent());
assertEquals("txntest1", event.getEvent());
assertNull(reader.readNextEvent(100).getEvent());
groupManager.getReaderGroup(group).initiateCheckpoint("cp", executorService());
event = reader.readNextEvent(5000);
assertEquals("cp", event.getCheckpointName());
event = reader.readNextEvent(5000);
assertNotNull(event.getEvent());
assertEquals("txntest2", event.getEvent());
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTruncationTest method testSegmentTruncationWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) based on whether segment truncation takes place
* while reading (first part of the test) or before starting reading (second part).
*
* @throws InterruptedException If the current thread is interrupted while waiting for the Controller service.
*/
@Test(timeout = 60000)
public void testSegmentTruncationWhileReading() throws InterruptedException {
final int totalEvents = 100;
final String scope = "truncationTests";
final String streamName = "testSegmentTruncationWhileReading";
final String readerGroupName = "RGTestSegmentTruncationWhileReading";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
LocalController controller = (LocalController) PRAVEGA.getLocalController();
controller.createScope(scope).join();
controller.createStream(scope, streamName, config).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
// Write half of totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents / 2);
// Seal current segment (0) and split it into two segments (1,2).
Stream stream = new StreamImpl(scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
assertTrue(controller.scaleStream(stream, Lists.newArrayList(0L), map, executorService()).getFuture().join());
long one = computeSegmentId(1, 1);
long two = computeSegmentId(2, 1);
// Write rest of events to the new Stream segments.
ReadWriteUtils.writeEvents(clientFactory, streamName, totalEvents, totalEvents / 2);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(100).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
int read = 0;
while (read < 75) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
// Let readers to consume some events and truncate segment while readers are consuming events
Exceptions.handleInterrupted(() -> Thread.sleep(500));
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(one, 0L);
streamCutPositions.put(two, 0L);
assertTrue(controller.truncateStream(scope, streamName, streamCutPositions).join());
// Wait for readers to complete and assert that they have read all the events (totalEvents).
while (read < totalEvents) {
EventRead<String> event = reader.readNextEvent(1000);
if (event.getEvent() != null) {
read++;
}
}
assertEquals(read, totalEvents);
assertEquals(null, reader.readNextEvent(0).getEvent());
// Assert that from the truncation call onwards, the available segments are the ones after scaling.
List<Long> currentSegments = controller.getCurrentSegments(scope, streamName).join().getSegments().stream().map(Segment::getSegmentId).sorted().collect(toList());
currentSegments.removeAll(Lists.newArrayList(one, two));
assertTrue(currentSegments.isEmpty());
// The new set of readers, should only read the events beyond truncation point (segments 1 and 2).
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
List<CompletableFuture<Integer>> futures = readEvents(clientFactory, newReaderGroupName, 1);
Futures.allOf(futures).join();
assertEquals((int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get(), totalEvents / 2);
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTruncationTest method testDeleteStreamWhileReading.
/**
* This test checks the behavior of a reader (or group of readers) that gets a delete event while reading. While the
* client is reading events (Segment Store) the test deletes the Stream (Controller and metadata). Once the client
* reads all the events and reaches the end of segment, it contacts the Controller to retrieve subsequent segments
* (if any). However, the Stream-related metadata to answer this request has been previously deleted.
*/
// @Ignore //TODO: The controller does not currently handle the stream being deleted properly.
// Once it does so the client will need to throw an appropriate exception, and this test should reflect it.
@Test(timeout = 20000)
public void testDeleteStreamWhileReading() {
final String scope = "truncationTests";
final String streamName = "testDeleteStreamWhileReading";
final String readerGroup = "RGTestDeleteStreamWhileReading";
final int totalEvents = 100;
final int parallelism = 1;
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
// Write totalEvents to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream.
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(500).stream(Stream.of(scope, streamName)).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(String.valueOf(0), readerGroup, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(totalEvents / 2, ReadWriteUtils.readEventsUntil(reader, eventRead -> true, totalEvents / 2, 0));
reader.close();
val readerRecreated = clientFactory.createReader(String.valueOf(0), readerGroup, new JavaSerializer<>(), ReaderConfig.builder().build());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
assertThrows(InvalidStreamException.class, () -> clientFactory.createReader(String.valueOf(1), readerGroup, new JavaSerializer<>(), ReaderConfig.builder().build()));
// At the control plane, we expect a RetriesExhaustedException as readers try to get successor segments from a deleted stream.
assertThrows(TruncatedDataException.class, () -> ReadWriteUtils.readEvents(readerRecreated, totalEvents / 2, 0));
assertFalse(streamManager.deleteStream(scope, streamName));
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTransactionTest method main.
@Test
public static void main(String[] args) throws Exception {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
int port = Config.SERVICE_PORT;
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, serviceBuilder.createTableStoreService(), serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
Thread.sleep(1000);
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.awaitRunning();
final String testScope = "testScope";
final String testStream = "testStream";
if (!controller.createScope(testScope).get()) {
log.error("FAILURE: Error creating test scope");
return;
}
ScalingPolicy policy = ScalingPolicy.fixed(5);
StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
if (!controller.createStream(testScope, testStream, streamConfig).get()) {
log.error("FAILURE: Error creating test stream");
return;
}
final long txnTimeout = 4000;
ClientConfig config = ClientConfig.builder().build();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(config);
@Cleanup MockClientFactory clientFactory = new MockClientFactory(testScope, controller, new ConnectionPoolImpl(config, connectionFactory));
@Cleanup TransactionalEventStreamWriter<String> producer = clientFactory.createTransactionalEventWriter("writer", testStream, new UTF8StringSerializer(), EventWriterConfig.builder().transactionTimeoutTime(txnTimeout).build());
// region Successful commit tests
Transaction<String> transaction = producer.beginTxn();
for (int i = 0; i < 1; i++) {
String event = "\n Transactional Publish \n";
log.info("Producing event: " + event);
transaction.writeEvent("", event);
transaction.flush();
Thread.sleep(500);
}
CompletableFuture<Object> commit = CompletableFuture.supplyAsync(() -> {
try {
transaction.commit();
} catch (Exception e) {
log.warn("Error committing transaction", e);
}
return null;
});
commit.join();
Transaction.Status txnStatus = transaction.checkStatus();
assertTrue(txnStatus == Transaction.Status.COMMITTING || txnStatus == Transaction.Status.COMMITTED);
log.info("SUCCESS: successful in committing transaction. Transaction status=" + txnStatus);
Thread.sleep(2000);
txnStatus = transaction.checkStatus();
assertTrue(txnStatus == Transaction.Status.COMMITTED);
log.info("SUCCESS: successfully committed transaction. Transaction status=" + txnStatus);
// endregion
// region Successful abort tests
Transaction<String> transaction2 = producer.beginTxn();
for (int i = 0; i < 1; i++) {
String event = "\n Transactional Publish \n";
log.info("Producing event: " + event);
transaction2.writeEvent("", event);
transaction2.flush();
Thread.sleep(500);
}
CompletableFuture<Object> drop = CompletableFuture.supplyAsync(() -> {
try {
transaction2.abort();
} catch (Exception e) {
log.warn("Error aborting transaction", e);
}
return null;
});
drop.join();
Transaction.Status txn2Status = transaction2.checkStatus();
assertTrue(txn2Status == Transaction.Status.ABORTING || txn2Status == Transaction.Status.ABORTED);
log.info("SUCCESS: successful in dropping transaction. Transaction status=" + txn2Status);
Thread.sleep(2000);
txn2Status = transaction2.checkStatus();
assertTrue(txn2Status == Transaction.Status.ABORTED);
log.info("SUCCESS: successfully aborted transaction. Transaction status=" + txn2Status);
// endregion
// region Successful timeout tests
Transaction<String> tx1 = producer.beginTxn();
Thread.sleep((long) (1.3 * txnTimeout));
Transaction.Status txStatus = tx1.checkStatus();
Assert.assertTrue(Transaction.Status.ABORTING == txStatus || Transaction.Status.ABORTED == txStatus);
log.info("SUCCESS: successfully aborted transaction after timeout. Transaction status=" + txStatus);
// endregion
// region Ping failure due to controller going into disconnection state
// Fill in these tests once we have controller.stop() implemented.
System.exit(0);
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class ReadWriteUtils method writeEvents.
public static void writeEvents(EventStreamClientFactory clientFactory, String streamName, int totalEvents, int offset) {
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new UTF8StringSerializer(), EventWriterConfig.builder().build());
for (int i = offset; i < totalEvents; i++) {
writer.writeEvent(String.valueOf(i)).join();
log.info("Writing event: {} to stream {}", i, streamName);
}
}
Aggregations