use of io.pravega.client.admin.StreamManager in project pravega by pravega.
the class WriteBatchTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testBatchWrite";
String readerGroupName = "testBatchWriteRG";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
ConcurrentLinkedQueue<Long> eventsReadFromPravega = new ConcurrentLinkedQueue<>();
AtomicLong eventData = new AtomicLong();
AtomicLong eventReadCount = new AtomicLong();
AtomicBoolean stopReadFlag = new AtomicBoolean(false);
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + RandomFactory.create().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
ExecutorServiceHelpers.shutdown(writerPool);
ExecutorServiceHelpers.shutdown(readerPool);
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(totalNumberOfEvents.get(), eventsReadFromPravega.size());
// check unique events.
assertEquals(totalNumberOfEvents.get(), new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
use of io.pravega.client.admin.StreamManager in project pravega by pravega.
the class EndToEndTruncationTest method testTruncateOnSealedStream.
@Test(timeout = 50000)
public void testTruncateOnSealedStream() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(4)).build();
String streamName = "testTruncateOnSealedStream";
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
String scope = "test";
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, config);
LocalController controller = (LocalController) PRAVEGA.getLocalController();
// Seal Stream.
assertTrue(controller.sealStream(scope, streamName).get());
Map<Long, Long> streamCutPositions = new HashMap<>();
streamCutPositions.put(computeSegmentId(2, 1), 0L);
streamCutPositions.put(computeSegmentId(3, 1), 0L);
streamCutPositions.put(computeSegmentId(4, 1), 0L);
// Attempt to truncate a sealed stream should complete exceptionally.
assertFutureThrows("Should throw UnsupportedOperationException", controller.truncateStream(scope, streamName, streamCutPositions), e -> UnsupportedOperationException.class.isAssignableFrom(e.getClass()));
}
use of io.pravega.client.admin.StreamManager in project pravega by pravega.
the class EndToEndTruncationTest method testSimpleOffsetTruncation.
/**
* This test checks the basic operation of truncation with offsets. The test first writes two events on a Stream
* (1 segment) and then truncates the Stream after the first event. We verify that a new reader first gets a
* TruncatedDataException and then it reads only the second event written, as the first has been truncated.
*
* @throws ReinitializationRequiredException If a checkpoint or reset is performed on the reader group.
*/
@Test(timeout = 30000)
public void testSimpleOffsetTruncation() throws ReinitializationRequiredException {
final String scope = "truncationTests";
final String streamName = "testSimpleOffsetTruncation";
final String readerGroupName = "RGTestSimpleOffsetTruncation";
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + streamName).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write two events to the Stream.
writeEvents(clientFactory, streamName, 2);
// Read only the first one.
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerGroupName + "1", readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(reader.readNextEvent(5000).getEvent(), "0");
reader.close();
// Create a Checkpoint, get StreamCut and truncate the Stream at that point.
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint", executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Verify that a new reader reads from event 1 onwards.
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
@Cleanup final EventStreamReader<String> newReader = clientFactory.createReader(newReaderGroupName + "2", newReaderGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals("Expected read event: ", "1", newReader.readNextEvent(5000).getEvent());
assertNull(newReader.readNextEvent(5000).getEvent());
}
use of io.pravega.client.admin.StreamManager in project pravega by pravega.
the class EndToEndTruncationTest method testParallelSegmentOffsetTruncation.
/**
* This test verifies that truncation works specifying an offset that applies to multiple segments. To this end,
* the test first writes a set of events on a Stream (with multiple segments) and truncates it at a specified offset
* (truncatedEvents). The tests asserts that readers gets a TruncatedDataException after truncation and then it
* (only) reads the remaining events that have not been truncated.
*/
@Test(timeout = 600000)
public void testParallelSegmentOffsetTruncation() {
final String scope = "truncationTests";
final String streamName = "testParallelSegmentOffsetTruncation";
final int parallelism = 2;
final int totalEvents = 100;
final int truncatedEvents = 25;
StreamConfiguration streamConf = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(parallelism)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
streamManager.createScope(scope);
// Test truncation in new and re-created tests.
for (int i = 0; i < 2; i++) {
final String readerGroupName = "RGTestParallelSegmentOffsetTruncation" + i;
streamManager.createStream(scope, streamName, streamConf);
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(scope, streamName)).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write events to the Stream.
writeEvents(clientFactory, streamName, totalEvents);
// Instantiate readers to consume from Stream up to truncatedEvents.
List<CompletableFuture<Integer>> futures = ReadWriteUtils.readEvents(clientFactory, readerGroupName, parallelism, truncatedEvents);
Futures.allOf(futures).join();
int eventsReadBeforeTruncation = futures.stream().map(CompletableFuture::join).reduce(Integer::sum).get();
// Perform truncation on stream segment
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint" + i, executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Just after the truncation, trying to read the whole stream should raise a TruncatedDataException.
final String newGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
futures = readEvents(clientFactory, newGroupName, parallelism);
Futures.allOf(futures).join();
assertEquals("Expected read events: ", totalEvents - eventsReadBeforeTruncation, (int) futures.stream().map(CompletableFuture::join).reduce((a, b) -> a + b).get());
assertTrue(streamManager.sealStream(scope, streamName));
assertTrue(streamManager.deleteStream(scope, streamName));
}
}
use of io.pravega.client.admin.StreamManager in project pravega by pravega.
the class InProcPravegaClusterTest method createTestStream.
/**
* Create the test stream.
*
* @throws Exception on any errors.
*/
@Test
public void createTestStream() throws Exception {
Assert.assertNotNull("Pravega not initialized", localPravega);
String scope = "Scope";
String streamName = "Stream";
int numSegments = 10;
ClientConfig clientConfig = ClientConfig.builder().controllerURI(URI.create(localPravega.getInProcPravegaCluster().getControllerURI())).credentials(new DefaultCredentials("1111_aaaa", "admin")).trustStore("../config/cert.pem").validateHostName(false).build();
@Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
streamManager.createScope(scope);
Assert.assertTrue("Stream creation is not successful ", streamManager.createStream(scope, streamName, StreamConfiguration.builder().scope(scope).streamName(streamName).scalingPolicy(ScalingPolicy.fixed(numSegments)).build()));
log.info("Created stream: " + streamName);
ClientFactory clientFactory = ClientFactory.withScope(scope, clientConfig);
EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new JavaSerializer<String>(), EventWriterConfig.builder().build());
log.info("Created writer for stream: " + streamName);
writer.writeEvent("hello").get();
log.info("Wrote data to the stream");
}
Aggregations