use of io.pravega.client.admin.StreamInfo in project pravega by pravega.
the class StreamManagerImplTest method testSealedStream.
@Test(timeout = 10000)
public void testSealedStream() throws ConnectionFailedException {
final String streamName = "stream";
final Stream stream = new StreamImpl(defaultScope, streamName);
// Setup Mocks
ClientConnection connection = mock(ClientConnection.class);
PravegaNodeUri location = new PravegaNodeUri("localhost", 0);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
WireCommands.CreateSegment request = (WireCommands.CreateSegment) invocation.getArgument(0);
connectionFactory.getProcessor(location).process(new WireCommands.SegmentCreated(request.getRequestId(), request.getSegment()));
return null;
}
}).when(connection).send(Mockito.any(WireCommands.CreateSegment.class));
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
WireCommands.GetStreamSegmentInfo request = (WireCommands.GetStreamSegmentInfo) invocation.getArgument(0);
connectionFactory.getProcessor(location).process(new WireCommands.StreamSegmentInfo(request.getRequestId(), request.getSegmentName(), true, false, false, 0, 0, 0));
return null;
}
}).when(connection).send(Mockito.any(WireCommands.GetStreamSegmentInfo.class));
connectionFactory.provideConnection(location, connection);
MockController mockController = spy(new MockController(location.getEndpoint(), location.getPort(), connectionFactory, true));
doReturn(CompletableFuture.completedFuture(true)).when(mockController).sealStream(defaultScope, streamName);
StreamSegments empty = new StreamSegments(new TreeMap<>());
doReturn(CompletableFuture.completedFuture(empty)).when(mockController).getCurrentSegments(defaultScope, streamName);
ConnectionPoolImpl pool = new ConnectionPoolImpl(ClientConfig.builder().maxConnectionsPerSegmentStore(1).build(), connectionFactory);
// Create a StreamManager
@Cleanup final StreamManager streamManager = new StreamManagerImpl(mockController, pool);
// Create a scope and stream and seal it.
streamManager.createScope(defaultScope);
streamManager.createStream(defaultScope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build());
streamManager.sealStream(defaultScope, streamName);
// Fetch StreamInfo
StreamInfo info = streamManager.getStreamInfo(defaultScope, streamName);
// validate results.
assertEquals(defaultScope, info.getScope());
assertEquals(streamName, info.getStreamName());
assertNotNull(info.getTailStreamCut());
assertEquals(stream, info.getTailStreamCut().asImpl().getStream());
assertEquals(0, info.getTailStreamCut().asImpl().getPositions().size());
assertNotNull(info.getHeadStreamCut());
assertEquals(stream, info.getHeadStreamCut().asImpl().getStream());
assertEquals(3, info.getHeadStreamCut().asImpl().getPositions().size());
assertTrue(info.isSealed());
}
use of io.pravega.client.admin.StreamInfo in project pravega by pravega.
the class BatchClientTest method testBatchClientWithStreamTruncation.
@Test(timeout = 50000)
@SuppressWarnings("deprecation")
public void testBatchClientWithStreamTruncation() throws InterruptedException, ExecutionException {
@Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
createTestStreamWithEvents(clientFactory);
log.info("Done creating a test stream with test events");
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
log.info("Done creating batch client factory");
// 1. Create a StreamCut after 2 events(offset = 2 * 30 = 60).
StreamCut streamCut60L = new StreamCutImpl(Stream.of(SCOPE, STREAM), ImmutableMap.of(new Segment(SCOPE, STREAM, 0), 60L));
// 2. Truncate stream.
assertTrue("truncate stream", controllerWrapper.getController().truncateStream(SCOPE, STREAM, streamCut60L).join());
// 3a. Fetch Segments using StreamCut.UNBOUNDED>
ArrayList<SegmentRange> segmentsPostTruncation1 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), StreamCut.UNBOUNDED, StreamCut.UNBOUNDED).getIterator());
// 3b. Fetch Segments using getStreamInfo() api.
StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, STREAM);
ArrayList<SegmentRange> segmentsPostTruncation2 = Lists.newArrayList(batchClient.getSegments(Stream.of(SCOPE, STREAM), streamInfo.getHeadStreamCut(), streamInfo.getTailStreamCut()).getIterator());
// Validate results.
validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation1);
validateSegmentCountAndEventCount(batchClient, segmentsPostTruncation2);
}
use of io.pravega.client.admin.StreamInfo in project pravega by pravega.
the class StreamManagerImplTest method testStreamInfo.
@Test(timeout = 15000)
public void testStreamInfo() throws Exception {
final String streamName = "stream";
final Stream stream = new StreamImpl(defaultScope, streamName);
// Setup Mocks
ClientConnection connection = mock(ClientConnection.class);
PravegaNodeUri location = new PravegaNodeUri("localhost", 0);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
WireCommands.CreateSegment request = (WireCommands.CreateSegment) invocation.getArgument(0);
connectionFactory.getProcessor(location).process(new WireCommands.SegmentCreated(request.getRequestId(), request.getSegment()));
return null;
}
}).when(connection).send(Mockito.any(WireCommands.CreateSegment.class));
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
WireCommands.GetStreamSegmentInfo request = (WireCommands.GetStreamSegmentInfo) invocation.getArgument(0);
connectionFactory.getProcessor(location).process(new WireCommands.StreamSegmentInfo(request.getRequestId(), request.getSegmentName(), true, false, false, 0, 0, 0));
return null;
}
}).when(connection).send(Mockito.any(WireCommands.GetStreamSegmentInfo.class));
connectionFactory.provideConnection(location, connection);
MockController mockController = new MockController(location.getEndpoint(), location.getPort(), connectionFactory, true);
ConnectionPoolImpl pool = new ConnectionPoolImpl(ClientConfig.builder().maxConnectionsPerSegmentStore(1).build(), connectionFactory);
@Cleanup final StreamManager streamManager = new StreamManagerImpl(mockController, pool);
streamManager.createScope(defaultScope);
streamManager.createStream(defaultScope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build());
// fetch StreamInfo.
StreamInfo info = streamManager.getStreamInfo(defaultScope, streamName);
// validate results.
assertEquals(defaultScope, info.getScope());
assertEquals(streamName, info.getStreamName());
assertNotNull(info.getTailStreamCut());
assertEquals(stream, info.getTailStreamCut().asImpl().getStream());
assertEquals(3, info.getTailStreamCut().asImpl().getPositions().size());
assertNotNull(info.getHeadStreamCut());
assertEquals(stream, info.getHeadStreamCut().asImpl().getStream());
assertEquals(3, info.getHeadStreamCut().asImpl().getPositions().size());
assertFalse(info.isSealed());
}
use of io.pravega.client.admin.StreamInfo in project pravega by pravega.
the class BatchClientSimpleTest method batchClientSimpleTest.
/**
* This test verifies the basic functionality of {@link BatchClientFactory}, including stream metadata checks, segment
* counts, parallel segment reads and reads with offsets using stream cuts.
*/
@Test
@SuppressWarnings("deprecation")
public void batchClientSimpleTest() {
final int totalEvents = RG_PARALLELISM * 100;
final int offsetEvents = RG_PARALLELISM * 20;
final int batchIterations = 4;
final Stream stream = Stream.of(SCOPE, STREAM);
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE, controller, connectionFactory);
@Cleanup BatchClientFactory batchClient = BatchClientFactory.withScope(SCOPE, clientConfig);
log.info("Invoking batchClientSimpleTest test with Controller URI: {}", controllerURI);
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(SCOPE + "/" + STREAM).build());
ReaderGroup readerGroup = groupManager.getReaderGroup(READER_GROUP);
log.info("Writing events to stream");
// Write events to the Stream.
writeEvents(clientFactory, STREAM, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = readEventFutures(clientFactory, READER_GROUP, RG_PARALLELISM, offsetEvents);
Futures.allOf(futures).join();
// Create a stream cut on the specified offset position.
Checkpoint cp = readerGroup.initiateCheckpoint("batchClientCheckpoint", executor).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
// Instantiate the batch client and assert it provides correct stream info.
log.debug("Creating batch client.");
StreamInfo streamInfo = streamManager.getStreamInfo(SCOPE, stream.getStreamName());
log.debug("Validating stream metadata fields.");
assertEquals("Expected Stream name: ", STREAM, streamInfo.getStreamName());
assertEquals("Expected Scope name: ", SCOPE, streamInfo.getScope());
// Test that we can read events from parallel segments from an offset onwards.
log.debug("Reading events from stream cut onwards in parallel.");
List<SegmentRange> ranges = Lists.newArrayList(batchClient.getSegments(stream, streamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", totalEvents - offsetEvents, readFromRanges(ranges, batchClient));
// Emulate the behavior of Hadoop client: i) Get tail of Stream, ii) Read from current point until tail, iii) repeat.
log.debug("Reading in batch iterations.");
StreamCut currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
int readEvents = 0;
for (int i = 0; i < batchIterations; i++) {
writeEvents(clientFactory, STREAM, totalEvents);
// Read all the existing events in parallel segments from the previous tail to the current one.
ranges = Lists.newArrayList(batchClient.getSegments(stream, currentTailStreamCut, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected number of segments: ", RG_PARALLELISM, ranges.size());
readEvents += readFromRanges(ranges, batchClient);
log.debug("Events read in parallel so far: {}.", readEvents);
currentTailStreamCut = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getTailStreamCut();
}
assertEquals("Expected events read: .", totalEvents * batchIterations, readEvents);
// Truncate the stream in first place.
log.debug("Truncating stream at event {}.", offsetEvents);
assertTrue(controller.truncateStream(SCOPE, STREAM, streamCut).join());
// Test the batch client when we select to start reading a Stream from a truncation point.
StreamCut initialPosition = streamManager.getStreamInfo(SCOPE, stream.getStreamName()).getHeadStreamCut();
List<SegmentRange> newRanges = Lists.newArrayList(batchClient.getSegments(stream, initialPosition, StreamCut.UNBOUNDED).getIterator());
assertEquals("Expected events read: ", (totalEvents - offsetEvents) + totalEvents * batchIterations, readFromRanges(newRanges, batchClient));
log.debug("Events correctly read from Stream: simple batch client test passed.");
}
use of io.pravega.client.admin.StreamInfo in project pravega by pravega.
the class StreamRecreationTest method testStreamRecreation.
@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testStreamRecreation() throws Exception {
final String myScope = "myScope";
final String myStream = "myStream";
final String myReaderGroup = "myReaderGroup";
final int numIterations = 6;
// Create the scope and the stream.
@Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
streamManager.createScope(myScope);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(myScope, controllerURI);
final ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(myScope, myStream)).build();
for (int i = 0; i < numIterations; i++) {
log.info("Stream re-creation iteration {}.", i);
final String eventContent = "myEvent" + String.valueOf(i);
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(i + 1)).build();
EventWriterConfig eventWriterConfig = EventWriterConfig.builder().build();
streamManager.createStream(myScope, myStream, streamConfiguration);
// Write a single event.
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(myScope, ClientConfig.builder().controllerURI(controllerURI).build());
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(myStream, new JavaSerializer<>(), eventWriterConfig);
TransactionalEventStreamWriter<String> txnWriter = clientFactory.createTransactionalEventWriter(myStream, new JavaSerializer<>(), eventWriterConfig);
// Write events regularly and with transactions.
if (i % 2 == 0) {
writer.writeEvent(eventContent).join();
} else {
Transaction<String> myTransaction = txnWriter.beginTxn();
myTransaction.writeEvent(eventContent);
myTransaction.commit();
while (myTransaction.checkStatus() != Transaction.Status.COMMITTED) {
Exceptions.handleInterrupted(() -> Thread.sleep(100));
}
}
writer.close();
// Read the event.
readerGroupManager.createReaderGroup(myReaderGroup, readerGroupConfig);
readerGroupManager.getReaderGroup(myReaderGroup).resetReaderGroup(readerGroupConfig);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("myReader", myReaderGroup, new JavaSerializer<>(), ReaderConfig.builder().build());
String readResult;
do {
readResult = reader.readNextEvent(1000).getEvent();
} while (readResult == null);
assertEquals("Wrong event read in re-created stream", eventContent, readResult);
// Delete the stream.
StreamInfo streamInfo = streamManager.getStreamInfo(myScope, myStream);
assertFalse(streamInfo.isSealed());
assertTrue("Unable to seal re-created stream.", streamManager.sealStream(myScope, myStream));
streamInfo = streamManager.getStreamInfo(myScope, myStream);
assertTrue(streamInfo.isSealed());
assertTrue("Unable to delete re-created stream.", streamManager.deleteStream(myScope, myStream));
}
}
Aggregations