use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamsAndScopesManagementTest method testCreateUpdateDeleteStreamTag.
private void testCreateUpdateDeleteStreamTag(String scope) {
final ImmutableSet<String> tagSet1 = ImmutableSet.of("t1", "t2", "t3");
final ImmutableSet<String> tagSet2 = ImmutableSet.of("t3", "t4", "t5");
// Create and Update Streams
for (int j = 1; j <= TEST_MAX_STREAMS; j++) {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j)).build();
final String stream = "stream" + j;
log.info("creating a new stream in scope {}/{}", stream, scope);
streamManager.createStream(scope, stream, config);
log.info("updating the stream in scope {}/{}", stream, scope);
streamManager.updateStream(scope, stream, config.toBuilder().tags(tagSet1).build());
assertEquals(tagSet1, streamManager.getStreamTags(scope, stream));
}
// Check the size of streams with tagName t1
assertEquals(TEST_MAX_STREAMS, newArrayList(streamManager.listStreams(scope, "t1")).size());
// Check if the lists of tag t3 and t1 are equal
assertEquals(newArrayList(streamManager.listStreams(scope, "t3")), newArrayList(streamManager.listStreams(scope, "t1")));
// Update the streams with new tagSet
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (int j = 1; j <= TEST_MAX_STREAMS; j++) {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j)).build();
final String stream = "stream" + j;
log.info("updating the stream tag scope {}/{}", stream, scope);
futures.add(CompletableFuture.runAsync(() -> streamManager.updateStream(scope, stream, config.toBuilder().clearTags().tags(tagSet2).build())));
}
assertEquals(TEST_MAX_STREAMS, futures.size());
CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
// Check if the update was successfully done
assertTrue(newArrayList(streamManager.listStreams(scope, "t1")).isEmpty());
assertEquals(TEST_MAX_STREAMS, newArrayList(streamManager.listStreams(scope, "t4")).size());
final int tagT3Size = newArrayList(streamManager.listStreams(scope, "t3")).size();
final int tagT4Size = newArrayList(streamManager.listStreams(scope, "t4")).size();
log.info("list size of t3 tags and t4 are {}/{}", tagT3Size, tagT4Size);
assertEquals(tagT3Size, tagT4Size);
// seal and delete stream
for (int j = 1; j <= TEST_MAX_STREAMS; j++) {
final String stream = "stream" + j;
streamManager.sealStream(scope, stream);
log.info("deleting the stream in scope {}/{}", stream, scope);
streamManager.deleteStream(scope, stream);
}
// Check if list streams is updated.
assertTrue(newArrayList(streamManager.listStreams(scope)).isEmpty());
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class ControllerFailoverTest method createStream.
private void createStream(Controller controller, String scope, String stream, ScalingPolicy scalingPolicy) {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
controller.createStream(scope, stream, config).join();
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class ReaderGroupStreamCutUpdateTest method testStreamcutsUpdateInReaderGroup.
@Test(timeout = 60000)
public void testStreamcutsUpdateInReaderGroup() throws Exception {
final String scope = "testStreamcutsUpdateInReaderGroup";
final String stream = "myStream";
final String readerGroupName = "testStreamcutsUpdateInReaderGroupRG";
final int checkpointingIntervalMs = 2000;
final int readerSleepInterval = 250;
final int numEvents = 100;
// First, create the stream.
@Cleanup StreamManager streamManager = StreamManager.create(controllerURI);
Assert.assertTrue(streamManager.createScope(scope));
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
streamManager.createStream(scope, stream, streamConfiguration);
// Write some events in the stream.
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(controllerURI).build());
writeEvents(clientFactory, stream, numEvents);
// Read the events and test that positions are getting updated.
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).automaticCheckpointIntervalMillis(checkpointingIntervalMs).build();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, controllerURI);
readerGroupManager.createReaderGroup(readerGroupName, readerGroupConfig);
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(readerGroupName);
@Cleanup EventStreamReader<Double> reader = clientFactory.createReader("myReader", readerGroupName, new JavaSerializer<>(), ReaderConfig.builder().build());
Map<Stream, StreamCut> currentStreamcuts = readerGroup.getStreamCuts();
EventRead<Double> eventRead;
int lastIteration = 0, iteration = 0;
int assertionFrequency = checkpointingIntervalMs / readerSleepInterval;
do {
eventRead = reader.readNextEvent(5000);
// Check that the streamcuts are being updated periodically via automatic reader group checkpoints.
if (iteration != lastIteration && iteration % assertionFrequency == 0) {
log.info("Comparing streamcuts: {} / {} in iteration {}.", currentStreamcuts, readerGroup.getStreamCuts(), iteration);
Assert.assertNotEquals(currentStreamcuts, readerGroup.getStreamCuts());
currentStreamcuts = readerGroup.getStreamCuts();
lastIteration = iteration;
}
Thread.sleep(readerSleepInterval);
if (!eventRead.isCheckpoint()) {
iteration++;
}
} while ((eventRead.isCheckpoint() || eventRead.getEvent() != null) && iteration < numEvents);
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method testDurableDataLogFailRecoveryWatermarking.
/**
* Tests the data recovery scenario with watermarking events.
* What test does, step by step:
* 1. Starts Pravega locally with just 4 segment containers.
* 2. Writes {@link #TOTAL_NUM_EVENTS} events to a segment with watermarks.
* 3. Waits for all segments created to be flushed to the long term storage.
* 4. Shuts down the controller, segment store and bookeeper/zookeeper.
* 5. Creates back up of container metadata segment and its attribute segment before deleting them from the Long Term Storage .
* 6. Starts 4 debug segment containers using a new bookeeper/zookeeper and the Long Term Storage.
* 7. Re-creates the container metadata segments in DurableLog and lets them to be flushed to the Long Term Storage.
* 8. Starts segment store and controller.
* 9. Read all events and verify that all events are below the bounds.
* @throws Exception In case of an exception occurred while execution.
*/
@Test(timeout = 180000)
public void testDurableDataLogFailRecoveryWatermarking() throws Exception {
int instanceId = 0;
int bookieCount = 1;
int containerCount = 4;
String readerGroup = "rgTx";
// Creating a long term storage only once here.
this.storageFactory = new InMemoryStorageFactory(executorService());
log.info("Created a long term storage.");
// Start a new BK & ZK, segment store and controller
@Cleanup PravegaRunner pravegaRunner = new PravegaRunner(instanceId++, bookieCount, containerCount, this.storageFactory);
// Create a scope and a stream
createScopeStream(pravegaRunner.controllerRunner.controller, SCOPE, STREAM1);
// Create a client to write events.
@Cleanup ClientRunner clientRunner = new ClientRunner(pravegaRunner.controllerRunner);
// Create a writer
@Cleanup TransactionalEventStreamWriter<Long> writer = clientRunner.clientFactory.createTransactionalEventWriter("writer1", STREAM1, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(TRANSACTION_TIMEOUT.toMillis()).build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
CompletableFuture<Void> writerFuture = writeTxEvents(writer, stopFlag);
// scale the stream several times so that we get complex positions
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
Stream streamObj = Stream.of(SCOPE, STREAM1);
scale(pravegaRunner.controllerRunner.controller, streamObj, config);
// get watermarks
LinkedBlockingQueue<Watermark> watermarks = getWatermarks(pravegaRunner, stopFlag, writerFuture);
// Shut down the controller
pravegaRunner.controllerRunner.close();
// Flush DurableLog to Long Term Storage
flushToStorage(pravegaRunner.segmentStoreRunner.serviceBuilder);
// Shutdown SegmentStore
pravegaRunner.segmentStoreRunner.close();
// Shutdown BookKeeper & ZooKeeper
pravegaRunner.bookKeeperRunner.close();
log.info("SegmentStore, BookKeeper & ZooKeeper shutdown");
// Get the long term storage from the running pravega instance
@Cleanup Storage storage = new AsyncStorageWrapper(new RollingStorage(this.storageFactory.createSyncStorage(), new SegmentRollingPolicy(DEFAULT_ROLLING_SIZE)), executorService());
Map<Integer, String> backUpMetadataSegments = ContainerRecoveryUtils.createBackUpMetadataSegments(storage, containerCount, executorService(), TIMEOUT).join();
// start a new BookKeeper and ZooKeeper.
pravegaRunner.bookKeeperRunner = new BookKeeperRunner(instanceId++, bookieCount);
createBookKeeperLogFactory();
log.info("Started a new BookKeeper and ZooKeeper.");
// Recover segments
runRecovery(containerCount, storage, backUpMetadataSegments);
// Start a new segment store and controller
pravegaRunner.restartControllerAndSegmentStore(this.storageFactory, this.dataLogFactory);
log.info("Started segment store and controller again.");
// Create the client with new controller.
@Cleanup ClientRunner newClientRunner = new ClientRunner(pravegaRunner.controllerRunner);
// read events and verify
readVerifyEventsWithWatermarks(readerGroup, newClientRunner, streamObj, watermarks);
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class WatermarkingTest method watermarkTest.
@Test(timeout = 120000)
public void watermarkTest() throws Exception {
Controller controller = PRAVEGA.getLocalController();
String scope = "scope";
String stream = "watermarkTest";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
ClientConfig clientConfig = ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build();
@Cleanup StreamManager streamManager = StreamManager.create(clientConfig);
streamManager.createScope(scope);
streamManager.createStream(scope, stream, config);
Stream streamObj = Stream.of(scope, stream);
// create 2 writers
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
JavaSerializer<Long> javaSerializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<Long> writer1 = clientFactory.createEventWriter(stream, javaSerializer, EventWriterConfig.builder().build());
@Cleanup EventStreamWriter<Long> writer2 = clientFactory.createEventWriter(stream, javaSerializer, EventWriterConfig.builder().build());
AtomicBoolean stopFlag = new AtomicBoolean(false);
// write events
CompletableFuture<Void> writer1Future = writeEvents(writer1, stopFlag);
CompletableFuture<Void> writer2Future = writeEvents(writer2, stopFlag);
// scale the stream several times so that we get complex positions
scale(controller, streamObj, config);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(clientConfig);
@Cleanup ClientFactoryImpl syncClientFactory = new ClientFactoryImpl(scope, new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), connectionFactory.getInternalExecutor()), connectionFactory);
String markStream = NameUtils.getMarkStreamForStream(stream);
@Cleanup RevisionedStreamClient<Watermark> watermarkReader = syncClientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
LinkedBlockingQueue<Watermark> watermarks = new LinkedBlockingQueue<>();
fetchWatermarks(watermarkReader, watermarks, stopFlag);
AssertExtensions.assertEventuallyEquals(true, () -> watermarks.size() >= 2, 100000);
stopFlag.set(true);
writer1Future.join();
writer2Future.join();
// read events from the stream
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, syncClientFactory);
Watermark watermark0 = watermarks.take();
Watermark watermark1 = watermarks.take();
assertTrue(watermark0.getLowerTimeBound() <= watermark0.getUpperTimeBound());
assertTrue(watermark1.getLowerTimeBound() <= watermark1.getUpperTimeBound());
assertTrue(watermark0.getLowerTimeBound() < watermark1.getLowerTimeBound());
Map<Segment, Long> positionMap0 = watermark0.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
Map<Segment, Long> positionMap1 = watermark1.getStreamCut().entrySet().stream().collect(Collectors.toMap(x -> new Segment(scope, stream, x.getKey().getSegmentId()), Map.Entry::getValue));
StreamCut streamCutFirst = new StreamCutImpl(streamObj, positionMap0);
StreamCut streamCutSecond = new StreamCutImpl(streamObj, positionMap1);
Map<Stream, StreamCut> firstMarkStreamCut = Collections.singletonMap(streamObj, streamCutFirst);
Map<Stream, StreamCut> secondMarkStreamCut = Collections.singletonMap(streamObj, streamCutSecond);
// read from stream cut of first watermark
String readerGroup = "watermarkTest-group";
readerGroupManager.createReaderGroup(readerGroup, ReaderGroupConfig.builder().stream(streamObj).startingStreamCuts(firstMarkStreamCut).endingStreamCuts(secondMarkStreamCut).disableAutomaticCheckpoints().build());
@Cleanup final EventStreamReader<Long> reader = clientFactory.createReader("myreader", readerGroup, javaSerializer, ReaderConfig.builder().build());
EventRead<Long> event = reader.readNextEvent(10000L);
TimeWindow currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
while (event.getEvent() != null && currentTimeWindow.getLowerTimeBound() == null && currentTimeWindow.getUpperTimeBound() == null) {
event = reader.readNextEvent(10000L);
currentTimeWindow = reader.getCurrentTimeWindow(streamObj);
}
assertNotNull(currentTimeWindow.getUpperTimeBound());
// read all events and verify that all events are below the bounds
while (event.getEvent() != null) {
Long time = event.getEvent();
log.info("timewindow = {} event = {}", currentTimeWindow, time);
assertTrue(currentTimeWindow.getLowerTimeBound() == null || time >= currentTimeWindow.getLowerTimeBound());
assertTrue(currentTimeWindow.getUpperTimeBound() == null || time <= currentTimeWindow.getUpperTimeBound());
TimeWindow nextTimeWindow = reader.getCurrentTimeWindow(streamObj);
assertTrue(currentTimeWindow.getLowerTimeBound() == null || nextTimeWindow.getLowerTimeBound() >= currentTimeWindow.getLowerTimeBound());
assertTrue(currentTimeWindow.getUpperTimeBound() == null || nextTimeWindow.getUpperTimeBound() >= currentTimeWindow.getUpperTimeBound());
currentTimeWindow = nextTimeWindow;
event = reader.readNextEvent(10000L);
if (event.isCheckpoint()) {
event = reader.readNextEvent(10000L);
}
}
assertNotNull(currentTimeWindow.getLowerTimeBound());
}
Aggregations