use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class AbstractControllerMetadataCommandsTest method testControllerMetadataViewReaderInfoCommand.
@Test
public void testControllerMetadataViewReaderInfoCommand() throws Exception {
final String process = UUID.randomUUID().toString();
final String readerGroup = UUID.randomUUID().toString();
final String reader = UUID.randomUUID().toString();
ZKHelper zkHelper = ZKHelper.create(SETUP_UTILS.getZkTestServer().getConnectString(), "pravega-cluster");
CheckpointStore checkpointStore = zkHelper.getCheckPointStore();
checkpointStore.addReaderGroup(process, readerGroup);
checkpointStore.addReader(process, readerGroup, reader);
Position position = new PositionImpl(ImmutableMap.of(new SegmentWithRange(Segment.fromScopedName("testScope/testStream/0"), 0, 0.5), 9999999L, new SegmentWithRange(Segment.fromScopedName("testScope/testStream/1"), 0.5, 1.0), -1L));
checkpointStore.setPosition(process, readerGroup, reader, position);
String commandResult = TestUtils.executeCommand("controller-metadata get-reader " + process + " " + readerGroup + " " + reader, STATE.get());
Assert.assertTrue(commandResult.contains("testScope/testStream"));
}
use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class ZkCheckpointStoreConnectivityTest method connectivityFailureTests.
@Test
public void connectivityFailureTests() throws IOException {
final String process1 = UUID.randomUUID().toString();
final String readerGroup1 = UUID.randomUUID().toString();
final String reader1 = UUID.randomUUID().toString();
Predicate<Throwable> predicate = e -> e instanceof CheckpointStoreException && ((CheckpointStoreException) e).getType().equals(CheckpointStoreException.Type.Connectivity);
AssertExtensions.assertThrows("failed getProcesses", () -> checkpointStore.getProcesses(), predicate);
AssertExtensions.assertThrows("failed addReaderGroup", () -> checkpointStore.addReaderGroup(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed addReader", () -> checkpointStore.addReader(process1, readerGroup1, reader1), predicate);
AssertExtensions.assertThrows("failed sealReaderGroup", () -> checkpointStore.sealReaderGroup(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed removeReader", () -> checkpointStore.removeReader(process1, readerGroup1, reader1), predicate);
AssertExtensions.assertThrows("failed getPositions", () -> checkpointStore.getPositions(process1, readerGroup1), predicate);
Position position = new PositionImpl(Collections.emptyMap());
AssertExtensions.assertThrows("failed setPosition", () -> checkpointStore.setPosition(process1, readerGroup1, reader1, position), predicate);
AssertExtensions.assertThrows("failed removeReader", () -> checkpointStore.removeReader(process1, readerGroup1, reader1), predicate);
AssertExtensions.assertThrows("failed removeReaderGroup", () -> checkpointStore.removeReaderGroup(process1, readerGroup1), predicate);
}
use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class ControllerEventProcessorsTest method testTruncate.
@Test(timeout = 10000L)
public void testTruncate() throws CheckpointStoreException, InterruptedException {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
Map<SegmentWithRange, Long> map1 = new HashMap<>();
map1.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 1L), 0.33, 0.66), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 20L);
Map<SegmentWithRange, Long> map2 = new HashMap<>();
map2.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 20L);
map2.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 10L);
Map<SegmentWithRange, Long> map3 = new HashMap<>();
map3.put(new SegmentWithRange(new Segment("scope", "stream", 3L), 0.0, 0.33), 0L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 4L), 0.33, 0.66), 10L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 5L), 0.66, 1.0), 20L);
PositionImpl position1 = new PositionImpl(map1);
PositionImpl position2 = new PositionImpl(map2);
PositionImpl position3 = new PositionImpl(map3);
doReturn(getProcessor()).when(system).createEventProcessorGroup(any(), any(), any());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createScope(anyString());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createInternalStream(anyString(), anyString(), any());
doNothing().when(streamMetadataTasks).initializeStreamWriters(any(), anyString());
doNothing().when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
AtomicBoolean requestCalled = new AtomicBoolean(false);
AtomicBoolean commitCalled = new AtomicBoolean(false);
CompletableFuture<Void> requestStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> kvtStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> abortStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> commitStreamTruncationFuture = new CompletableFuture<>();
doAnswer(x -> {
String argument = x.getArgument(1);
if (argument.equals(config.getRequestStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (!requestCalled.get()) {
requestCalled.set(true);
throw new RuntimeException("inducing sporadic failure");
} else {
requestStreamTruncationFuture.complete(null);
}
} else if (argument.equals(config.getCommitStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (commitCalled.get()) {
commitStreamTruncationFuture.complete(null);
} else {
commitCalled.set(true);
return CompletableFuture.completedFuture(false);
}
} else if (argument.equals(config.getAbortStreamName())) {
abortStreamTruncationFuture.complete(null);
} else if (argument.equals(config.getKvtStreamName())) {
kvtStreamTruncationFuture.complete(null);
}
return CompletableFuture.completedFuture(true);
}).when(streamMetadataTasks).startTruncation(anyString(), anyString(), any(), any());
Set<String> processes = Sets.newHashSet("p1", "p2", "p3");
// first throw checkpoint store exception
AtomicBoolean signal = new AtomicBoolean(false);
CountDownLatch cd = new CountDownLatch(4);
doAnswer(x -> {
// this ensures that the call to truncate has been invoked for all 4 internal streams.
cd.countDown();
cd.await();
if (!signal.get()) {
throw new CheckpointStoreException("CheckpointStoreException");
} else {
return processes;
}
}).when(checkpointStore).getProcesses();
Map<String, PositionImpl> r1 = Collections.singletonMap("r1", position1);
doReturn(r1).when(checkpointStore).getPositions(eq("p1"), anyString());
Map<String, PositionImpl> r2 = Collections.singletonMap("r2", position1);
doReturn(r2).when(checkpointStore).getPositions(eq("p2"), anyString());
Map<String, PositionImpl> r3 = Collections.singletonMap("r3", position1);
doReturn(r3).when(checkpointStore).getPositions(eq("p3"), anyString());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// set truncation interval
processors.setTruncationInterval(100L);
processors.startAsync();
processors.awaitRunning();
ControllerEventProcessors processorsSpied = spy(processors);
processorsSpied.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait for all 4 countdown exceptions to have been thrown.
cd.await();
verify(processorsSpied, atLeast(4)).truncate(any(), any(), any());
verify(checkpointStore, atLeast(4)).getProcesses();
verify(checkpointStore, never()).getPositions(anyString(), anyString());
verify(streamMetadataTasks, never()).startTruncation(anyString(), anyString(), any(), any());
signal.set(true);
CompletableFuture.allOf(requestStreamTruncationFuture, commitStreamTruncationFuture, abortStreamTruncationFuture, kvtStreamTruncationFuture).join();
// verify that truncate method is being called periodically.
verify(processorsSpied, atLeastOnce()).truncate(config.getRequestStreamName(), config.getRequestReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getCommitStreamName(), config.getCommitReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getAbortStreamName(), config.getAbortReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getKvtStreamName(), config.getKvtReaderGroupName(), streamMetadataTasks);
for (int i = 1; i <= 3; i++) {
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getRequestReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getCommitReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getAbortReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getKvtReaderGroupName());
}
}
use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class ZKCheckpointStoreTests method failingTests.
@Test(timeout = 30000)
public void failingTests() {
final String process1 = UUID.randomUUID().toString();
final String readerGroup1 = UUID.randomUUID().toString();
final String readerGroup2 = UUID.randomUUID().toString();
final String reader1 = UUID.randomUUID().toString();
cli.close();
Predicate<Throwable> predicate = e -> e instanceof CheckpointStoreException && e.getCause() instanceof IllegalStateException;
AssertExtensions.assertThrows("failed getProcesses", () -> checkpointStore.getProcesses(), predicate);
AssertExtensions.assertThrows("failed addReaderGroup", () -> checkpointStore.addReaderGroup(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed getReaderGroups", () -> checkpointStore.getReaderGroups(process1), predicate);
AssertExtensions.assertThrows("failed addReader", () -> checkpointStore.addReader(process1, readerGroup1, reader1), predicate);
Position position = new PositionImpl(Collections.emptyMap());
AssertExtensions.assertThrows("failed setPosition", () -> checkpointStore.setPosition(process1, readerGroup1, reader1, position), predicate);
AssertExtensions.assertThrows("failed getPositions", () -> checkpointStore.getPositions(process1, readerGroup1), predicate);
AssertExtensions.assertThrows("failed sealReaderGroup", () -> checkpointStore.sealReaderGroup(process1, readerGroup2), predicate);
AssertExtensions.assertThrows("failed removeReader", () -> checkpointStore.removeReader(process1, readerGroup1, reader1), predicate);
AssertExtensions.assertThrows("failed removeReaderGroup", () -> checkpointStore.removeReaderGroup(process1, readerGroup1), predicate);
}
use of io.pravega.client.stream.impl.PositionImpl in project pravega by pravega.
the class ReadTest method testEventPositions.
/**
* This test performs concurrent writes, reads and position checks on a Stream. Readers are checkers exercising the
* lazy construction of PositionImpl objects while the internal segmentOffsetUpdates list in EventStreamReaderImpl is
* being updated due to new read events. This test generates enough events to make segmentOffsetUpdates list in
* EventStreamReaderImpl to be filled and cleaned at least once. This test verifies the thread safety of the new
* optimization in EventStreamReaderImpl to prevent generating segmentOffset maps on every event read, as well as
* to check for the correctness of the segment offsets returned by PositionImpl.
*/
@Test(timeout = 60000)
public void testEventPositions() {
String endpoint = "localhost";
String streamName = "eventPositions";
String readerGroup = "groupPositions";
String scope = "scopePositions";
// Generate enough events to make the internal segment offset update buffer in EventStreamReaderImpl to be
// emptied and filled again.
int eventsToWrite = 2000;
BlockingQueue<Entry<Integer, PositionImpl>> readEventsPositions = new ArrayBlockingQueue<>(eventsToWrite);
@Cleanup("shutdown") ScheduledExecutorService readersWritersAndCheckers = ExecutorServiceHelpers.newScheduledThreadPool(4, "readers-writers-checkers");
AtomicInteger finishedProcesses = new AtomicInteger(0);
int port = TestUtils.getAvailableListenPort();
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().groupRefreshTimeMillis(1000).stream(Stream.of(scope, streamName)).build();
streamManager.createScope(scope);
// Create a Stream with 2 segments.
streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
streamManager.createReaderGroup(readerGroup, groupConfig);
JavaSerializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", readerGroup, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", readerGroup, serializer, ReaderConfig.builder().build());
// Leave some time for readers to re-balance the segments and acquire one each.
Exceptions.handleInterrupted(() -> Thread.sleep(2000));
// Start writers and readers in parallel.
CompletableFuture reader1Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader1, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture reader2Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader2, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture writerFuture = CompletableFuture.runAsync(() -> {
for (int i = 0; i < eventsToWrite; i++) {
producer.writeEvent("segment1", "a");
producer.writeEvent("segment2", "b");
Exceptions.handleInterrupted(() -> Thread.sleep(1));
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// This process access the positions read by the reader threads, which means that this thread is concurrently
// accessing the shared segmentOffsetUpdates list, whereas readers are appending data to it.
CompletableFuture checkOffsets = CompletableFuture.runAsync(() -> {
// 1-char string is assumed to be the payload of events
int sizeOfEvent = 16;
while (finishedProcesses.get() < 2) {
Entry<Integer, PositionImpl> element;
try {
element = readEventsPositions.poll(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
int numberOfSegments = element.getValue().getOwnedSegmentsWithOffsets().size();
assertEquals("Reader owning too many segments.", 1, numberOfSegments);
// The segment position should increase by sizeOfEvent every event.
long segmentPositionOffset = element.getValue().getOwnedSegmentsWithOffsets().values().iterator().next();
assertEquals("Wrong event position", sizeOfEvent * element.getKey(), segmentPositionOffset);
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// Wait for all futures to complete.
CompletableFuture.allOf(writerFuture, reader1Future, reader2Future, checkOffsets).join();
// Any failure reading, writing or checking positions will make this assertion to fail.
assertEquals(finishedProcesses.get(), 4);
ExecutorServiceHelpers.shutdown(readersWritersAndCheckers);
}
Aggregations