use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.
@Test
public void singleSubscriberCBRTest() throws Exception {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
// Write a single event.
log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e1", SIZE_30_EVENT).join();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
// Read one event.
log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
Map<Stream, StreamCut> streamCuts = futureCuts.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
readerGroup.updateRetentionStreamCut(streamCuts);
// Write two more events.
log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e2", SIZE_30_EVENT).join();
log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
writer.writeEvent("e3", SIZE_30_EVENT).join();
// Check to make sure truncation happened after the first event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
// Read next event.
log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
read = reader.readNextEvent(READ_TIMEOUT);
assertFalse(read.isCheckpoint());
assertEquals("data of size 30", read.getEvent());
// Update the retention stream-cut.
log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
// Wait for 5 seconds to force reader group state update. This will allow for the silent
// checkpoint event generated as part of generateStreamCuts to be picked and processed.
Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
readerGroup.updateRetentionStreamCut(streamCuts2);
// Check to make sure truncation happened after the second event.
// The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
// and a little longer in order to confirm that the retention has taken place.
AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class MetadataScalabilityTest method scale.
List<List<Segment>> scale(ControllerImpl controller) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
// manually scale the stream SCALES_TO_PERFORM times
Stream stream = new StreamImpl(SCOPE, getStreamName());
AtomicInteger counter = new AtomicInteger(0);
List<List<Segment>> listOfEpochs = new LinkedList<>();
CompletableFuture<Void> scaleFuture = Futures.loop(() -> counter.incrementAndGet() <= scalesToPerform, () -> controller.getCurrentSegments(SCOPE, streamName).thenCompose(segments -> {
ArrayList<Segment> sorted = Lists.newArrayList(segments.getSegments().stream().sorted(Comparator.comparingInt(x -> NameUtils.getSegmentNumber(x.getSegmentId()) % numSegments)).collect(Collectors.toList()));
listOfEpochs.add(sorted);
// note: with SCALES_TO_PERFORM < numSegments, we can use the segment number as the index
// into the range map
Pair<List<Long>, Map<Double, Double>> scaleInput = getScaleInput(sorted);
List<Long> segmentsToSeal = scaleInput.getKey();
Map<Double, Double> newRanges = scaleInput.getValue();
return controller.scaleStream(stream, segmentsToSeal, newRanges, executorService).getFuture().thenAccept(scaleStatus -> {
log.info("scale stream for epoch {} completed with status {}", counter.get(), scaleStatus);
assert scaleStatus;
});
}), executorService);
scaleFuture.join();
return listOfEpochs;
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ControllerFailoverTest method failoverTest.
@Test
public void failoverTest() throws InterruptedException, ExecutionException {
String scope = "testFailoverScope" + RandomStringUtils.randomAlphabetic(5);
String stream = "testFailoverStream" + RandomStringUtils.randomAlphabetic(5);
int initialSegments = 1;
List<Long> segmentsToSeal = Collections.singletonList(0L);
Map<Double, Double> newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 1.0);
ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
// Connect with first controller instance.
final Controller controller1 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), executorService);
// Create scope, stream, and a transaction with high timeout value.
controller1.createScope(scope).join();
log.info("Scope {} created successfully", scope);
createStream(controller1, scope, stream, ScalingPolicy.fixed(initialSegments));
log.info("Stream {}/{} created successfully", scope, stream);
long txnCreationTimestamp = System.nanoTime();
StreamImpl stream1 = new StreamImpl(scope, stream);
// Initiate scale operation. It will block until ongoing transaction is complete.
controller1.startScale(stream1, segmentsToSeal, newRangesToCreate).join();
// Now stop the controller instance executing scale operation.
Futures.getAndHandleExceptions(controllerService.scaleService(0), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
// restart controller service
Futures.getAndHandleExceptions(controllerService.scaleService(1), ExecutionException::new);
log.info("Successfully stopped one instance of controller service");
List<URI> controllerUris = controllerService.getServiceDetails();
// Fetch all the RPC endpoints and construct the client URIs.
final List<String> uris = controllerUris.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
log.info("Controller Service direct URI: {}", controllerURIDirect);
ClientConfig clientConf = Utils.buildClientConfig(controllerURIDirect);
// Connect to another controller instance.
@Cleanup final Controller controller2 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConf).build(), executorService);
// Note: if scale does not complete within desired time, test will timeout.
boolean scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
while (!scaleStatus) {
scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
Thread.sleep(30000);
}
segmentsToSeal = Collections.singletonList(NameUtils.computeSegmentId(1, 1));
newRangesToCreate = new HashMap<>();
newRangesToCreate.put(0.0, 0.5);
newRangesToCreate.put(0.5, 1.0);
controller2.scaleStream(stream1, segmentsToSeal, newRangesToCreate, executorService).getFuture().join();
log.info("Checking whether scale operation succeeded by fetching current segments");
StreamSegments streamSegments = controller2.getCurrentSegments(scope, stream).join();
log.info("Current segment count= {}", streamSegments.getSegments().size());
Assert.assertEquals(2, streamSegments.getSegments().size());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class EndToEndAutoScaleDownTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port, false);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).with(AutoScalerConfig.CACHE_CLEANUP_IN_SECONDS, 5).with(AutoScalerConfig.CACHE_EXPIRY_IN_SECONDS, 30).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", CONFIG).get();
Stream stream = new StreamImpl("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.33);
map.put(0.33, 0.66);
map.put(0.66, 1.0);
@Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().size() < 3) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), executor).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
System.exit(-1);
}
System.exit(0);
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class StreamMetricsTest method testSegmentSplitMerge.
@Test(timeout = 30000)
public void testSegmentSplitMerge() throws Exception {
String scaleScopeName = "scaleScope";
String scaleStreamName = "scaleStream";
controllerWrapper.getControllerService().createScope(scaleScopeName, 0L).get();
if (!controller.createStream(scaleScopeName, scaleStreamName, config).get()) {
log.error("Stream {} for scale testing already existed, exiting", scaleScopeName + "/" + scaleStreamName);
return;
}
Stream scaleStream = new StreamImpl(scaleScopeName, scaleStreamName);
// split to 3 segments
Map<Double, Double> keyRanges = new HashMap<>();
keyRanges.put(0.0, 0.33);
keyRanges.put(0.33, 0.66);
keyRanges.put(0.66, 1.0);
if (!controller.scaleStream(scaleStream, Collections.singletonList(0L), keyRanges, executor).getFuture().get()) {
log.error("Scale stream: splitting segment into three failed, exiting");
return;
}
assertEquals(3, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleScopeName, scaleStreamName)).value());
assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleScopeName, scaleStreamName)).value());
assertEquals(0, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleScopeName, scaleStreamName)).value());
// merge back to 2 segments
keyRanges = new HashMap<>();
keyRanges.put(0.0, 0.5);
keyRanges.put(0.5, 1.0);
if (!controller.scaleStream(scaleStream, Arrays.asList(1L, 2L, 3L), keyRanges, executor).getFuture().get()) {
log.error("Scale stream: merging segments into two failed, exiting");
return;
}
assertEquals(2, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleScopeName, scaleStreamName)).value());
assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleScopeName, scaleStreamName)).value());
assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleScopeName, scaleStreamName)).value());
}
Aggregations