use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class AbstractSegmentStoreCommandsTest method testReadSegmentRangeCommand.
@Test
public void testReadSegmentRangeCommand() throws Exception {
// Create a temporary directory.
Path tempDirPath = Files.createTempDirectory("readSegmentDir");
String filename = Paths.get(tempDirPath.toString(), "tmp" + System.currentTimeMillis(), "readSegmentTest.txt").toString();
TestUtils.createScopeStream(SETUP_UTILS.getController(), "segmentstore", "readsegment", StreamConfiguration.builder().build());
@Cleanup EventStreamClientFactory factory = EventStreamClientFactory.withScope("segmentstore", clientConfig);
@Cleanup EventStreamWriter<String> writer = factory.createEventWriter("readsegment", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvents("rk", Arrays.asList("a", "2", "3"));
writer.flush();
// Check to make sure that the file exists and data is written into it.
String commandResult = TestUtils.executeCommand("segmentstore read-segment segmentstore/readsegment/0.#epoch.0 0 8 localhost " + filename, STATE.get());
Assert.assertTrue(commandResult.contains("The segment data has been successfully written into"));
File file = new File(filename);
Assert.assertTrue(file.exists());
Assert.assertNotEquals(0, file.length());
AssertExtensions.assertThrows(FileAlreadyExistsException.class, () -> TestUtils.executeCommand("segmentstore read-segment _system/_RGcommitStreamReaders/0.#epoch.0 0 8 localhost " + filename, STATE.get()));
// Delete file created during the test.
Files.deleteIfExists(Paths.get(filename));
AssertExtensions.assertThrows(WireCommandFailedException.class, () -> TestUtils.executeCommand("segmentstore read-segment not/exists/0 0 1 localhost " + filename, STATE.get()));
Assert.assertNotNull(ReadSegmentRangeCommand.descriptor());
// Delete file created during the test.
Files.deleteIfExists(Paths.get(filename));
// Delete the temporary directory.
tempDirPath.toFile().deleteOnExit();
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class EventProcessorTest method testEventProcessorGroupRebalance.
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void testEventProcessorGroupRebalance() throws CheckpointStoreException, ReinitializationRequiredException {
String systemName = "rebalance";
String readerGroupName = "rebalance";
CheckpointStore checkpointStore = spy(CheckpointStoreFactory.createInMemoryStore());
checkpointStore.addReaderGroup(PROCESS, readerGroupName);
EventProcessorGroupConfig config = createEventProcessorGroupConfig(2);
EventStreamClientFactory clientFactory = Mockito.mock(EventStreamClientFactory.class);
EventStreamReader<TestEvent> reader = Mockito.mock(EventStreamReader.class);
Mockito.when(reader.readNextEvent(anyLong())).thenReturn(Mockito.mock(EventReadImpl.class));
Mockito.when(clientFactory.createReader(anyString(), anyString(), any(), any())).thenAnswer(x -> reader);
Mockito.when(clientFactory.<String>createEventWriter(anyString(), any(), any())).thenReturn(new EventStreamWriterMock<>());
ReaderGroup readerGroup = Mockito.mock(ReaderGroup.class);
Mockito.when(readerGroup.getGroupName()).thenReturn(readerGroupName);
ReaderGroupManager readerGroupManager = Mockito.mock(ReaderGroupManager.class);
Mockito.when(readerGroupManager.getReaderGroup(anyString())).then(invocation -> readerGroup);
EventProcessorSystemImpl system = new EventProcessorSystemImpl(systemName, PROCESS, SCOPE, clientFactory, readerGroupManager);
EventProcessorConfig<TestEvent> eventProcessorConfig = EventProcessorConfig.<TestEvent>builder().supplier(() -> new TestEventProcessor(false)).serializer(new EventSerializer<>()).decider((Throwable e) -> ExceptionHandler.Directive.Stop).config(config).minRebalanceIntervalMillis(0L).build();
// Create EventProcessorGroup.
@Cleanup EventProcessorGroupImpl<TestEvent> group = (EventProcessorGroupImpl<TestEvent>) system.createEventProcessorGroup(eventProcessorConfig, checkpointStore, executor);
group.awaitRunning();
ConcurrentHashMap<String, EventProcessorCell<TestEvent>> eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
List<String> readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
// region case 1: even distribution - 2 readers with 2 segments each
HashMap<String, Integer> distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
ReaderSegmentDistribution readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. no new readers should be added or existing reader removed.
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
// the original readers should not have been replaced
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// region case 2: two external readers with 0 segment assignment and 2 overloaded readers in the
// readergroup. unassigned = 0
String reader2 = "reader2";
String reader3 = "reader3";
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. this should replace existing overloaded readers
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertFalse(eventProcessorMap.containsKey(readerIds.get(0)));
assertFalse(eventProcessorMap.containsKey(readerIds.get(1)));
Enumeration<String> keys = eventProcessorMap.keys();
String firstReplacement = keys.nextElement();
String secondReplacement = keys.nextElement();
// verify that checkpointstore.addreader is called twice
verify(checkpointStore, times(2)).addReader(any(), any(), eq(firstReplacement));
verify(checkpointStore, times(2)).addReader(any(), any(), eq(secondReplacement));
// update the readers in the readergroup
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
// endregion
// region case 3: even distribution among 4 readers
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 1);
distribution.put(readerIds.get(1), 1);
distribution.put(reader2, 1);
distribution.put(reader3, 1);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. nothing should happen
group.rebalance();
// no change to the group
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// region case 4: with 1 overloaded reader and 2 unassigned segments
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 0);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(2).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// call rebalance. overloaded reader should be replaced
group.rebalance();
// reader0 should have been replaced.
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertFalse(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 0);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(2).build();
// case 5: region failure cases
doThrow(new RuntimeException("reader group throws")).when(readerGroup).getReaderSegmentDistribution();
// exception should be handled and there should be no state change in event processor
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// now reset the distribution
doReturn(readerSegmentDistribution).when(readerGroup).getReaderSegmentDistribution();
// throw from checkpoint store
doThrow(new CheckpointStoreException("checkpoint store exception")).when(checkpointStore).addReader(anyString(), anyString(), anyString());
// exception should have been thrown and handled
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
// Stop the group, and await its termmination.
group.stopAsync();
group.awaitTerminated();
// call rebalance after shutdown such that replace cell is called - this should throw precondition failed exception
readerIds = eventProcessorMap.entrySet().stream().map(Map.Entry::getKey).collect(Collectors.toList());
distribution = new HashMap<>();
distribution.put(readerIds.get(0), 2);
distribution.put(readerIds.get(1), 2);
distribution.put(reader2, 0);
distribution.put(reader3, 0);
readerSegmentDistribution = ReaderSegmentDistribution.builder().readerSegmentDistribution(distribution).unassignedSegments(0).build();
Mockito.when(readerGroup.getReaderSegmentDistribution()).thenReturn(readerSegmentDistribution);
// calling rebalance on terminated group will result in Precondition failure with exception logged and ignored
// and no rebalance occurring.
// exception should have been thrown and handled
group.rebalance();
eventProcessorMap = group.getEventProcessorMap();
assertEquals(2, eventProcessorMap.size());
assertTrue(eventProcessorMap.containsKey(readerIds.get(0)));
assertTrue(eventProcessorMap.containsKey(readerIds.get(1)));
// endregion
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class EventProcessorTest method createMockSystem.
private EventProcessorSystemImpl createMockSystem(final String name, final String processId, final String scope, final SequenceAnswer<EventStreamReader<TestEvent>> readers, final EventStreamWriter<TestEvent> writer, final String readerGroupName) {
EventStreamClientFactory clientFactory = Mockito.mock(EventStreamClientFactory.class);
Mockito.when(clientFactory.createReader(anyString(), anyString(), any(), any())).thenAnswer(readers);
Mockito.when(clientFactory.<TestEvent>createEventWriter(anyString(), any(), any())).thenReturn(writer);
ReaderGroup readerGroup = Mockito.mock(ReaderGroup.class);
Mockito.when(readerGroup.getGroupName()).thenReturn(readerGroupName);
ReaderGroupManager readerGroupManager = Mockito.mock(ReaderGroupManager.class);
Mockito.when(readerGroupManager.getReaderGroup(anyString())).then(invocation -> readerGroup);
return new EventProcessorSystemImpl(name, processId, scope, clientFactory, readerGroupManager);
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method writerInitializationTest.
@Test(timeout = 10000)
public void writerInitializationTest() throws Exception {
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
final long leasePeriod = 5000;
// region close before initialize
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
CompletableFuture<Void> future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
assertFalse(future.isDone());
txnTasks.close();
AssertExtensions.assertFutureThrows("", future, e -> Exceptions.unwrap(e) instanceof CancellationException);
// endregion
// region test initialize writers with client factory
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
EventStreamClientFactory cfMock = mock(EventStreamClientFactory.class);
ControllerEventProcessorConfig eventProcConfigMock = mock(ControllerEventProcessorConfig.class);
String commitStream = "commitStream";
doAnswer(x -> commitStream).when(eventProcConfigMock).getCommitStreamName();
doAnswer(x -> commitWriter).when(cfMock).createEventWriter(eq(commitStream), any(), any());
String abortStream = "abortStream";
doAnswer(x -> abortStream).when(eventProcConfigMock).getAbortStreamName();
doAnswer(x -> abortWriter).when(cfMock).createEventWriter(eq(abortStream), any(), any());
// future should not have completed as we have not initialized the writers.
assertFalse(future.isDone());
// initialize the writers. write future should have completed now.
txnTasks.initializeStreamWriters(cfMock, eventProcConfigMock);
assertTrue(Futures.await(future));
txnTasks.close();
// endregion
// region test method calls and initialize writers with direct writer set up method call
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
streamStore.createScope(SCOPE, null, executor).join();
streamStore.createStream(SCOPE, STREAM, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), 1L, null, executor).join();
streamStore.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
CompletableFuture<Pair<VersionedTransactionData, List<StreamSegmentRecord>>> createFuture = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 0L);
// create and ping transactions should not wait for writer initialization and complete immediately.
createFuture.join();
assertTrue(Futures.await(createFuture));
UUID txnId = createFuture.join().getKey().getId();
CompletableFuture<PingTxnStatus> pingFuture = txnTasks.pingTxn(SCOPE, STREAM, txnId, leasePeriod, 0L);
assertTrue(Futures.await(pingFuture));
CompletableFuture<TxnStatus> commitFuture = txnTasks.commitTxn(SCOPE, STREAM, txnId, 0L);
assertFalse(commitFuture.isDone());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
assertTrue(Futures.await(commitFuture));
UUID txnId2 = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 1024 * 1024L).join().getKey().getId();
assertTrue(Futures.await(txnTasks.abortTxn(SCOPE, STREAM, txnId2, null, 0L)));
}
use of io.pravega.client.EventStreamClientFactory in project pravega by pravega.
the class ReaderGroupStreamCutUpdateTest method testStreamcutsUpdateInReaderGroup.
@Test(timeout = 60000)
public void testStreamcutsUpdateInReaderGroup() throws Exception {
final String scope = "testStreamcutsUpdateInReaderGroup";
final String stream = "myStream";
final String readerGroupName = "testStreamcutsUpdateInReaderGroupRG";
final int checkpointingIntervalMs = 2000;
final int readerSleepInterval = 250;
final int numEvents = 100;
// First, create the stream.
@Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
Assert.assertTrue(streamManager.createScope(scope));
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
streamManager.createStream(scope, stream, streamConfiguration);
// Write some events in the stream.
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
writeEvents(clientFactory, stream, numEvents);
// Read the events and test that positions are getting updated.
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).automaticCheckpointIntervalMillis(checkpointingIntervalMs).build();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI);
readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig);
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(readerGroupName);
@Cleanup EventStreamReader<Double> reader = clientFactory.createReader("myReader", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build());
Map<Stream, StreamCut> currentStreamcuts = readerGroup.getStreamCuts();
EventRead<Double> eventRead;
int lastIteration = 0, iteration = 0;
int assertionFrequency = checkpointingIntervalMs / readerSleepInterval;
do {
eventRead = reader.readNextEvent(5000);
// Check that the streamcuts are being updated periodically via automatic reader group checkpoints.
if (iteration != lastIteration && iteration % assertionFrequency == 0) {
log.info("Comparing streamcuts: {} / {} in iteration {}.", currentStreamcuts, readerGroup.getStreamCuts(), iteration);
Assert.assertNotEquals(currentStreamcuts, readerGroup.getStreamCuts());
currentStreamcuts = readerGroup.getStreamCuts();
lastIteration = iteration;
}
Thread.sleep(readerSleepInterval);
if (!eventRead.isCheckpoint()) {
iteration++;
}
} while ((eventRead.isCheckpoint() || eventRead.getEvent() != null) && iteration < numEvents);
}
Aggregations