use of io.pravega.client.stream.EventStreamReader in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method failOverTests.
@Test
public void failOverTests() throws CheckpointStoreException, InterruptedException {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
consumer = new ControllerService(streamStore, hostStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null);
// Create test scope and stream.
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM).scalingPolicy(policy1).build();
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis()).join());
// Set up txn task for creating transactions from a failedHost.
StreamTransactionMetadataTasks failedTxnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "failedHost", connectionFactory, false, "");
failedTxnTasks.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
// Create 3 transactions from failedHost.
VersionedTransactionData tx1 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
VersionedTransactionData tx2 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
VersionedTransactionData tx3 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
// Ping another txn from failedHost.
UUID txnId = UUID.randomUUID();
streamStore.createTransaction(SCOPE, STREAM, txnId, 10000, 30000, 30000, null, executor).join();
PingTxnStatus pingStatus = failedTxnTasks.pingTxn(SCOPE, STREAM, txnId, 10000, null).join();
VersionedTransactionData tx4 = streamStore.getTransactionData(SCOPE, STREAM, txnId, null, executor).join();
// Validate versions of all txn
Assert.assertEquals(0, tx1.getVersion());
Assert.assertEquals(0, tx2.getVersion());
Assert.assertEquals(0, tx3.getVersion());
Assert.assertEquals(1, tx4.getVersion());
Assert.assertEquals(PingTxnStatus.Status.OK, pingStatus.getStatus());
// Validate the txn index.
Assert.assertEquals(1, streamStore.listHostsOwningTxn().join().size());
// Change state of one txn to COMMITTING.
TxnStatus txnStatus2 = streamStore.sealTransaction(SCOPE, STREAM, tx2.getId(), true, Optional.empty(), null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.COMMITTING, txnStatus2);
// Change state of another txn to ABORTING.
TxnStatus txnStatus3 = streamStore.sealTransaction(SCOPE, STREAM, tx3.getId(), false, Optional.empty(), null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.ABORTING, txnStatus3);
// Create transaction tasks for sweeping txns from failedHost.
txnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
TxnSweeper txnSweeper = new TxnSweeper(streamStore, txnTasks, 100, executor);
// Before initializing, txnSweeper.sweepFailedHosts would throw an error
AssertExtensions.assertThrows("IllegalStateException before initialization", txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")), ex -> ex instanceof IllegalStateException);
// Initialize stream writers.
txnTasks.initializeStreamWriters("commitStream", commitWriter, "abortStream", abortWriter);
// Validate that txnTasks is ready.
assertTrue(txnTasks.isReady());
// Sweep txns that were being managed by failedHost.
txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")).join();
// Validate that sweeping completes correctly.
Assert.assertEquals(0, streamStore.listHostsOwningTxn().join().size());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
// Create commit and abort event processors.
ConnectionFactory connectionFactory = Mockito.mock(ConnectionFactory.class);
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new CommitEventProcessor(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, connectionFactory, processedCommitEvents));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, connectionFactory, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(tx2.getId(), commitEvent.getTxid());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
// Wait until 3 abort events are processed and ensure that the txn state is ABORTED.
Predicate<AbortEvent> predicate = event -> event.getTxid().equals(tx1.getId()) || event.getTxid().equals(tx3.getId()) || event.getTxid().equals(tx4.getId());
AbortEvent abortEvent1 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent1));
AbortEvent abortEvent2 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent2));
AbortEvent abortEvent3 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent3));
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
}
use of io.pravega.client.stream.EventStreamReader in project pravega by pravega.
the class ReaderGroupNotificationTest method testEndOfStreamNotifications.
@Test(timeout = 40000)
public void testEndOfStreamNotifications() throws Exception {
final String streamName = "stream2";
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(streamName).scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
// scale
Stream stream = new StreamImpl(SCOPE, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "data2").get();
// seal stream
assertTrue(controller.sealStream(SCOPE, streamName).get());
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory, connectionFactory);
ReaderGroup readerGroup = groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
// Add segment event listener
Listener<EndOfDataNotification> l1 = notification -> {
listenerInvoked.set(true);
listenerLatch.release();
};
readerGroup.getEndOfDataNotifier(executor).registerListener(l1);
EventRead<String> event1 = reader1.readNextEvent(10000);
EventRead<String> event2 = reader1.readNextEvent(10000);
EventRead<String> event3 = reader1.readNextEvent(10000);
assertNotNull(event1);
assertEquals("data1", event1.getEvent());
assertNotNull(event2);
assertEquals("data2", event2.getEvent());
assertNull(event3.getEvent());
listenerLatch.await();
assertTrue("Listener invoked", listenerInvoked.get());
}
use of io.pravega.client.stream.EventStreamReader in project pravega by pravega.
the class ReaderGroupNotificationTest method testSegmentNotifications.
@Test(timeout = 40000)
public void testSegmentNotifications() throws Exception {
final String streamName = "stream1";
StreamConfiguration config = StreamConfiguration.builder().scope(SCOPE).streamName(streamName).scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(SCOPE).get();
controller.createStream(config).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
@Cleanup ClientFactory clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "data1").get();
// scale
Stream stream = new StreamImpl(SCOPE, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
assertTrue(result);
writer.writeEvent("0", "data2").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl(SCOPE, controller, clientFactory, connectionFactory);
ReaderGroup readerGroup = groupManager.createReaderGroup("reader", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, streamName)).build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("readerId", "reader", new JavaSerializer<>(), ReaderConfig.builder().build());
// Add segment event listener
Listener<SegmentNotification> l1 = notification -> {
listenerInvoked.set(true);
numberOfReaders.set(notification.getNumOfReaders());
numberOfSegments.set(notification.getNumOfSegments());
listenerLatch.release();
};
readerGroup.getSegmentNotifier(executor).registerListener(l1);
EventRead<String> event1 = reader1.readNextEvent(15000);
EventRead<String> event2 = reader1.readNextEvent(15000);
assertNotNull(event1);
assertEquals("data1", event1.getEvent());
assertNotNull(event2);
assertEquals("data2", event2.getEvent());
listenerLatch.await();
assertTrue("Listener invoked", listenerInvoked.get());
assertEquals(2, numberOfSegments.get());
assertEquals(1, numberOfReaders.get());
}
use of io.pravega.client.stream.EventStreamReader in project pravega by pravega.
the class EndToEndAutoScaleUpWithTxnTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME).get();
@Cleanup ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactory internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
@Cleanup SegmentStatsFactory segmentStatsFactory = new SegmentStatsFactory();
SegmentStatsRecorder statsRecorder = segmentStatsFactory.createSegmentStatsRecorder(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, "localhost", 12345, store, statsRecorder, null, null, null);
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test").get();
controller.createStream(CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller);
// Mocking pravega service by putting scale up and scale down requests for the stream
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(30000).transactionTimeoutScaleGracePeriod(30000).build();
EventStreamWriter<String> test = clientFactory.createEventWriter("test", new JavaSerializer<>(), writerConfig);
// region Successful commit tests
Transaction<String> txn1 = test.beginTxn();
txn1.writeEvent("1");
txn1.flush();
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0 / 3.0);
map.put(1.0 / 3.0, 2.0 / 3.0);
map.put(2.0 / 3.0, 1.0);
Stream stream = new StreamImpl("test", "test");
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
controller.scaleStream(stream, Collections.singletonList(0), map, executor).getFuture().get();
Transaction<String> txn2 = test.beginTxn();
txn2.writeEvent("2");
txn2.flush();
txn2.commit();
txn1.commit();
Thread.sleep(1000);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory, connectionFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().stream("test").build());
final EventStreamReader<String> reader = clientFactory.createReader("1", "readergrp", new JavaSerializer<>(), ReaderConfig.builder().build());
String event1 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
String event2 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
assert event1.equals("1");
assert event2.equals("2");
final AtomicBoolean done = new AtomicBoolean(false);
startWriter(test, done);
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().size() > 3) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), Executors.newSingleThreadScheduledExecutor()).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
log.error("Test failed with exception: {}", e);
System.exit(-1);
}
System.exit(0);
}
use of io.pravega.client.stream.EventStreamReader in project pravega by pravega.
the class MultiReadersEndToEndTest method readAllEvents.
private Collection<Integer> readAllEvents(final int numParallelReaders, ClientFactory clientFactory, final String readerGroupName, final int numSegments) {
ConcurrentLinkedQueue<Integer> read = new ConcurrentLinkedQueue<>();
final ExecutorService executorService = Executors.newFixedThreadPool(numParallelReaders, new ThreadFactoryBuilder().setNameFormat("testreader-pool-%d").build());
List<Future<?>> futures = new ArrayList<>();
for (int i = 0; i < numParallelReaders; i++) {
futures.add(executorService.submit(() -> {
final String readerId = UUID.randomUUID().toString();
@Cleanup final EventStreamReader<Integer> reader = clientFactory.createReader(readerId, readerGroupName, new IntegerSerializer(), ReaderConfig.builder().build());
int emptyCount = 0;
while (emptyCount <= numSegments) {
try {
final Integer integerEventRead = reader.readNextEvent(100).getEvent();
if (integerEventRead != null) {
read.add(integerEventRead);
emptyCount = 0;
} else {
emptyCount++;
}
} catch (ReinitializationRequiredException e) {
throw new RuntimeException(e);
}
}
}));
}
// Wait until all readers are done.
futures.forEach(f -> Futures.getAndHandleExceptions(f, RuntimeException::new));
executorService.shutdownNow();
return read;
}
Aggregations