use of io.pravega.client.stream.Position in project pravega by pravega.
the class ZKCheckpointStoreTests method failingTests.
@Test(timeout = 30000)
public void failingTests() {
final String process1 = UUID.randomUUID().toString();
final String readerGroup1 = UUID.randomUUID().toString();
final String readerGroup2 = UUID.randomUUID().toString();
final String reader1 = UUID.randomUUID().toString();
cli.close();
Predicate<Throwable> predicate = e -> e instanceof CheckpointStoreException && e.getCause() instanceof IllegalStateException;
AssertExtensions.assertThrows("failed getProcesses", () -> checkpointStore.getProcesses(), predicate);
AssertExtensions.assertThrows("failed addReaderGroup", () -> checkpointStore.addReaderGroup(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed getReaderGroups", () -> checkpointStore.getReaderGroups(process1), predicate);
AssertExtensions.assertThrows("failed addReader", () -> checkpointStore.addReader(process1, readerGroup1, reader1), predicate);
Position position = new PositionImpl(Collections.emptyMap());
AssertExtensions.assertThrows("failed setPosition", () -> checkpointStore.setPosition(process1, readerGroup1, reader1, position), predicate);
AssertExtensions.assertThrows("failed getPositions", () -> checkpointStore.getPositions(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed sealReaderGroup", () -> checkpointStore.sealReaderGroup(process1, readerGroup2), predicate);
AssertExtensions.assertThrows("failed removeReader", () -> checkpointStore.removeReader(process1, readerGroup1, reader1), predicate);
AssertExtensions.assertThrows("failed removeReaderGroup", () -> checkpointStore.removeReaderGroup(process1, readerGroup1), predicate);
}
use of io.pravega.client.stream.Position in project pravega by pravega.
the class EventProcessorGroupTest method testNotifyProcessFailureSuccess.
@Test(timeout = 10000)
public void testNotifyProcessFailureSuccess() throws CheckpointStoreException {
this.requestEventProcessors = system.createEventProcessorGroup(requestConfig, checkpointStore, rebalanceExecutor);
requestEventProcessors.awaitRunning();
assertTrue(requestEventProcessors.isRunning());
Position mockReaderPosition = mock(Position.class);
doReturn(ImmutableMap.of("reader1", mockReaderPosition)).when(checkpointStore).sealReaderGroup("host1", "scaleGroup");
doNothing().when(mockReaderGroup).readerOffline(anyString(), any());
doNothing().when(checkpointStore).removeReader(anyString(), anyString(), anyString());
doNothing().when(checkpointStore).removeReaderGroup(anyString(), anyString());
requestEventProcessors.notifyProcessFailure("host1");
verify(checkpointStore, times(1)).sealReaderGroup("host1", "scaleGroup");
verify(mockReaderGroup, times(1)).readerOffline(anyString(), any());
verify(checkpointStore, times(1)).removeReader(anyString(), anyString(), anyString());
verify(checkpointStore, times(1)).removeReaderGroup("host1", "scaleGroup");
}
use of io.pravega.client.stream.Position in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method testDurableDataLogFailRecoveryReadersPaused.
/**
* Tests the data recovery scenario with readers stalling while reading. Readers read some events and then they are
* stopped. Durable data log is erased and restored. It's validated that readers are able to read rest of the unread
* events.
* What test does, step by step:
* 1. Starts Pravega locally with just 4 segment containers.
* 2. Writes {@link #TOTAL_NUM_EVENTS} events.
* 3. Waits for all segments created to be flushed to the long term storage.
* 4. Let a reader read N number of events.
* 5. Shuts down the controller, segment store and bookeeper/zookeeper.
* 6. Creates back up of container metadata segment and its attribute segment before deleting them from the Long Term Storage .
* 7. Starts 4 debug segment containers using a new bookeeper/zookeeper and the Long Term Storage.
* 8. Re-creates the container metadata segments in DurableLog and lets them to be flushed to the Long Term Storage.
* 9. Updates core attributes of segments in the new container metadata segment by using details from the back up of old container metadata segment.
* 10. Starts segment store and controller.
* 11. Let the reader read rest of the 10-N number of events.
* @throws Exception In case of an exception occurred while execution.
*/
@Test(timeout = 180000)
public void testDurableDataLogFailRecoveryReadersPaused() throws Exception {
int instanceId = 0;
int bookieCount = 1;
int containerCount = 4;
int eventsReadCount = RANDOM.nextInt(TOTAL_NUM_EVENTS);
String testReader = "readerDRIntegrationTest";
String testReaderGroup = "readerGroupDRIntegrationTest";
// Creating a long term storage only once here.
this.storageFactory = new InMemoryStorageFactory(executorService());
log.info("Created a long term storage.");
// Start a new BK & ZK, segment store and controller
@Cleanup PravegaRunner pravegaRunner = new PravegaRunner(instanceId++, bookieCount, containerCount, this.storageFactory);
// Create a stream for writing data
createScopeStream(pravegaRunner.controllerRunner.controller, SCOPE, STREAM1);
log.info("Created stream '{}'", STREAM1);
// Create a client to write events.
try (val clientRunner = new ClientRunner(pravegaRunner.controllerRunner)) {
// Write events.
writeEventsToStream(clientRunner.clientFactory, true);
// Create a reader for reading from the stream.
EventStreamReader<String> reader = createReader(clientRunner.clientFactory, clientRunner.readerGroupManager, SCOPE, STREAM1, testReaderGroup, testReader);
// Let reader read N number of events and mark its position.
Position p = readNEvents(reader, eventsReadCount);
ReaderGroup readerGroup = clientRunner.readerGroupManager.getReaderGroup(testReaderGroup);
readerGroup.readerOffline(testReader, p);
}
// Shut down the controller
pravegaRunner.controllerRunner.close();
// Flush DurableLog to Long Term Storage
flushToStorage(pravegaRunner.segmentStoreRunner.serviceBuilder);
// Shutdown SegmentStore
pravegaRunner.segmentStoreRunner.close();
// Shutdown BookKeeper & ZooKeeper
pravegaRunner.bookKeeperRunner.close();
log.info("SegmentStore, BookKeeper & ZooKeeper shutdown");
// Get the long term storage from the running pravega instance
@Cleanup Storage storage = new AsyncStorageWrapper(new RollingStorage(this.storageFactory.createSyncStorage(), new SegmentRollingPolicy(DEFAULT_ROLLING_SIZE)), executorService());
Map<Integer, String> backUpMetadataSegments = ContainerRecoveryUtils.createBackUpMetadataSegments(storage, containerCount, executorService(), TIMEOUT).join();
// start a new BookKeeper and ZooKeeper.
pravegaRunner.bookKeeperRunner = new BookKeeperRunner(instanceId++, bookieCount);
createBookKeeperLogFactory();
log.info("Started a new BookKeeper and ZooKeeper.");
// Recover segments
runRecovery(containerCount, storage, backUpMetadataSegments);
// Start a new segment store and controller
pravegaRunner.restartControllerAndSegmentStore(this.storageFactory, this.dataLogFactory);
log.info("Started segment store and controller again.");
// Create the client with new controller.
try (val clientRunner = new ClientRunner(pravegaRunner.controllerRunner)) {
// Get reader group.
ReaderGroup readerGroup = clientRunner.readerGroupManager.getReaderGroup(testReaderGroup);
assertNotNull(readerGroup);
EventStreamReader<String> reader = clientRunner.clientFactory.createReader(testReader, testReaderGroup, new UTF8StringSerializer(), ReaderConfig.builder().build());
// Read the remaining number of events.
readNEvents(reader, TOTAL_NUM_EVENTS - eventsReadCount);
// Reading next event should return null.
assertNull(reader.readNextEvent(READ_TIMEOUT.toMillis()).getEvent());
reader.close();
}
}
use of io.pravega.client.stream.Position in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method readNEvents.
// Reads the given number of events using the given reader and returns its position
private Position readNEvents(EventStreamReader<String> reader, int num) {
Position position = null;
EventRead<String> eventRead = null;
for (int q = 0; q < num; ) {
eventRead = reader.readNextEvent(READ_TIMEOUT.toMillis());
Assert.assertEquals("Event written and read back don't match", EVENT, eventRead.getEvent());
q++;
}
position = eventRead.getPosition();
return position;
}
use of io.pravega.client.stream.Position in project pravega by pravega.
the class EventProcessorTest method testEventProcessorFailover.
@Test(timeout = 60000)
public void testEventProcessorFailover() throws Exception {
final String scope = "controllerScope2";
final String streamName = "stream2";
final String readerGroup = "readerGroup2";
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
controller.createScope(scope).join();
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
controller.createStream(scope, streamName, config).join();
eventSerializer = new EventSerializer<>(new TestSerializer());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup EventStreamWriter<TestEvent> producer = clientFactory.createEventWriter(streamName, eventSerializer, EventWriterConfig.builder().build());
TestEvent event1 = new TestEvent(0);
producer.writeEvent("key", event1).join();
TestEvent event2 = new TestEvent(1);
producer.writeEvent("key", event2).join();
producer.flush();
EventProcessorSystem system = new EventProcessorSystemImpl("Controller", host, scope, clientFactory, new ReaderGroupManagerImpl(scope, controller, clientFactory));
CheckpointConfig checkpointConfig = CheckpointConfig.builder().type(CheckpointConfig.Type.None).build();
EventProcessorGroupConfig eventProcessorGroupConfig = EventProcessorGroupConfigImpl.builder().eventProcessorCount(1).readerGroupName(readerGroup).streamName(streamName).checkpointConfig(checkpointConfig).build();
LinkedBlockingQueue<TestEvent> eventsProcessed = new LinkedBlockingQueue<>();
EventProcessorConfig<TestEvent> eventProcessorConfig = EventProcessorConfig.<TestEvent>builder().supplier(() -> new EventProcessor<TestEvent>() {
@Override
protected void process(TestEvent event, Position position) {
try {
eventsProcessed.offer(event);
// keep sending null position
getCheckpointer().store(null);
} catch (CheckpointStoreException e) {
e.printStackTrace();
}
}
}).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).build();
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup = system.createEventProcessorGroup(eventProcessorConfig, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup.awaitRunning();
// wait until both events are read
assertEquals(event1, eventsProcessed.take());
assertEquals(event2, eventsProcessed.take());
assertTrue(eventsProcessed.isEmpty());
// shutdown event processor
// upon shutdown readerGroup.offline and reader.close should have been called.
eventProcessorGroup.stopAsync();
eventProcessorGroup.awaitTerminated();
@Cleanup ConnectionFactory connectionFactory2 = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory2 = new ClientFactoryImpl(scope, controller, connectionFactory2);
system = new EventProcessorSystemImpl("Controller2", host, scope, clientFactory2, new ReaderGroupManagerImpl(scope, controller, clientFactory2));
EventProcessorConfig<TestEvent> eventProcessorConfig2 = EventProcessorConfig.<TestEvent>builder().supplier(() -> new EventProcessor<TestEvent>() {
@Override
protected void process(TestEvent event, Position position) {
try {
eventsProcessed.offer(event);
getCheckpointer().store(null);
} catch (CheckpointStoreException e) {
e.printStackTrace();
}
}
}).serializer(eventSerializer).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(eventProcessorGroupConfig).build();
@Cleanup EventProcessorGroup<TestEvent> eventProcessorGroup2 = system.createEventProcessorGroup(eventProcessorConfig2, CheckpointStoreFactory.createInMemoryStore(), executorService());
eventProcessorGroup2.awaitRunning();
// verify that both events are read again
assertEquals(event1, eventsProcessed.take());
assertEquals(event2, eventsProcessed.take());
assertTrue(eventsProcessed.isEmpty());
eventProcessorGroup2.stopAsync();
eventProcessorGroup2.awaitTerminated();
}
Aggregations