Search in sources :

Example 66 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class SingleSubscriberUpdateRetentionStreamCutTest method singleSubscriberCBRTest.

@Test
public void singleSubscriberCBRTest() throws Exception {
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
    @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
    @Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
    // Write a single event.
    log.info("Writing event e1 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e1", SIZE_30_EVENT).join();
    @Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
    readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().retentionType(ReaderGroupConfig.StreamDataRetention.MANUAL_RELEASE_AT_USER_STREAMCUT).disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
    ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
    @Cleanup EventStreamReader<String> reader = clientFactory.createReader(READER_GROUP + "-" + 1, READER_GROUP, new JavaSerializer<>(), readerConfig);
    // Read one event.
    log.info("Reading event e1 from {}/{}", SCOPE, STREAM);
    EventRead<String> read = reader.readNextEvent(READ_TIMEOUT);
    assertFalse(read.isCheckpoint());
    assertEquals("data of size 30", read.getEvent());
    // Update the retention stream-cut.
    log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
    CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(streamCutExecutor);
    // Wait for 5 seconds to force reader group state update. This will allow for the silent
    // checkpoint event generated as part of generateStreamCuts to be picked and processed.
    Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
    EventRead<String> emptyEvent = reader.readNextEvent(READ_TIMEOUT);
    assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts, 10_000));
    Map<Stream, StreamCut> streamCuts = futureCuts.join();
    log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts);
    readerGroup.updateRetentionStreamCut(streamCuts);
    // Write two more events.
    log.info("Writing event e2 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e2", SIZE_30_EVENT).join();
    log.info("Writing event e3 to {}/{}", SCOPE, STREAM);
    writer.writeEvent("e3", SIZE_30_EVENT).join();
    // Check to make sure truncation happened after the first event.
    // The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
    // and a little longer in order to confirm that the retention has taken place.
    AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 30.", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 30), 1000, 5 * 60 * 1000L);
    // Read next event.
    log.info("Reading event e2 from {}/{}", SCOPE, STREAM);
    read = reader.readNextEvent(READ_TIMEOUT);
    assertFalse(read.isCheckpoint());
    assertEquals("data of size 30", read.getEvent());
    // Update the retention stream-cut.
    log.info("{} generating stream-cuts for {}/{}", READER_GROUP, SCOPE, STREAM);
    CompletableFuture<Map<Stream, StreamCut>> futureCuts2 = readerGroup.generateStreamCuts(streamCutExecutor);
    // Wait for 5 seconds to force reader group state update. This will allow for the silent
    // checkpoint event generated as part of generateStreamCuts to be picked and processed.
    Exceptions.handleInterrupted(() -> TimeUnit.SECONDS.sleep(5));
    EventRead<String> emptyEvent2 = reader.readNextEvent(READ_TIMEOUT);
    assertTrue("Stream-cut generation did not complete", Futures.await(futureCuts2, 10_000));
    Map<Stream, StreamCut> streamCuts2 = futureCuts2.join();
    log.info("{} updating its retention stream-cut to {}", READER_GROUP, streamCuts2);
    readerGroup.updateRetentionStreamCut(streamCuts2);
    // Check to make sure truncation happened after the second event.
    // The timeout is 5 minutes as the retention period is set to 2 minutes. We allow for 2 cycles to fully complete
    // and a little longer in order to confirm that the retention has taken place.
    AssertExtensions.assertEventuallyEquals("Truncation did not take place at offset 60", true, () -> controller.getSegmentsAtTime(new StreamImpl(SCOPE, STREAM), 0L).join().values().stream().anyMatch(off -> off >= 60), 1000, 5 * 60 * 1000L);
}
Also used : StreamCut(io.pravega.client.stream.StreamCut) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) MarathonException(mesosphere.marathon.client.MarathonException) StreamImpl(io.pravega.client.stream.impl.StreamImpl) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamManager(io.pravega.client.admin.StreamManager) Exceptions(io.pravega.common.Exceptions) RunWith(org.junit.runner.RunWith) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) ReaderGroup(io.pravega.client.stream.ReaderGroup) JavaSerializer(io.pravega.client.stream.impl.JavaSerializer) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventRead(io.pravega.client.stream.EventRead) Service(io.pravega.test.system.framework.services.Service) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) Stream(io.pravega.client.stream.Stream) After(org.junit.After) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) URI(java.net.URI) Utils(io.pravega.test.system.framework.Utils) ReaderGroupConfig(io.pravega.client.stream.ReaderGroupConfig) Before(org.junit.Before) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) Environment(io.pravega.test.system.framework.Environment) Assert.assertTrue(org.junit.Assert.assertTrue) EventStreamReader(io.pravega.client.stream.EventStreamReader) Test(org.junit.Test) TimeUnit(java.util.concurrent.TimeUnit) ControllerImplConfig(io.pravega.client.control.impl.ControllerImplConfig) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) ReaderConfig(io.pravega.client.stream.ReaderConfig) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) RandomFactory(io.pravega.common.hash.RandomFactory) Controller(io.pravega.client.control.impl.Controller) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Futures(io.pravega.common.concurrent.Futures) SystemTestRunner(io.pravega.test.system.framework.SystemTestRunner) Assert.assertEquals(org.junit.Assert.assertEquals) ClientConfig(io.pravega.client.ClientConfig) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) StreamCut(io.pravega.client.stream.StreamCut) ReaderGroup(io.pravega.client.stream.ReaderGroup) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) Cleanup(lombok.Cleanup) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) ClientConfig(io.pravega.client.ClientConfig) Map(java.util.Map) Test(org.junit.Test)

Example 67 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class MetadataScalabilityTest method scale.

List<List<Segment>> scale(ControllerImpl controller) {
    int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
    int scalesToPerform = getScalesToPerform();
    // manually scale the stream SCALES_TO_PERFORM times
    Stream stream = new StreamImpl(SCOPE, getStreamName());
    AtomicInteger counter = new AtomicInteger(0);
    List<List<Segment>> listOfEpochs = new LinkedList<>();
    CompletableFuture<Void> scaleFuture = Futures.loop(() -> counter.incrementAndGet() <= scalesToPerform, () -> controller.getCurrentSegments(SCOPE, streamName).thenCompose(segments -> {
        ArrayList<Segment> sorted = Lists.newArrayList(segments.getSegments().stream().sorted(Comparator.comparingInt(x -> NameUtils.getSegmentNumber(x.getSegmentId()) % numSegments)).collect(Collectors.toList()));
        listOfEpochs.add(sorted);
        // note: with SCALES_TO_PERFORM < numSegments, we can use the segment number as the index
        // into the range map
        Pair<List<Long>, Map<Double, Double>> scaleInput = getScaleInput(sorted);
        List<Long> segmentsToSeal = scaleInput.getKey();
        Map<Double, Double> newRanges = scaleInput.getValue();
        return controller.scaleStream(stream, segmentsToSeal, newRanges, executorService).getFuture().thenAccept(scaleStatus -> {
            log.info("scale stream for epoch {} completed with status {}", counter.get(), scaleStatus);
            assert scaleStatus;
        });
    }), executorService);
    scaleFuture.join();
    return listOfEpochs;
}
Also used : Segment(io.pravega.client.segment.impl.Segment) StreamCut(io.pravega.client.stream.StreamCut) StreamImpl(io.pravega.client.stream.impl.StreamImpl) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) Random(java.util.Random) CompletableFuture(java.util.concurrent.CompletableFuture) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) Lists(com.google.common.collect.Lists) Pair(org.apache.commons.lang3.tuple.Pair) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Stream(io.pravega.client.stream.Stream) StreamCutImpl(io.pravega.client.stream.impl.StreamCutImpl) Map(java.util.Map) After(org.junit.After) Timeout(org.junit.rules.Timeout) URI(java.net.URI) LinkedList(java.util.LinkedList) Before(org.junit.Before) Environment(io.pravega.test.system.framework.Environment) NameUtils(io.pravega.shared.NameUtils) Assert.assertTrue(org.junit.Assert.assertTrue) Collectors(java.util.stream.Collectors) ExecutionException(java.util.concurrent.ExecutionException) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) Rule(org.junit.Rule) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Comparator(java.util.Comparator) Controller(io.pravega.client.control.impl.Controller) Futures(io.pravega.common.concurrent.Futures) SystemTestRunner(io.pravega.test.system.framework.SystemTestRunner) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.commons.lang3.tuple.Pair)

Example 68 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class ControllerFailoverTest method failoverTest.

@Test
public void failoverTest() throws InterruptedException, ExecutionException {
    String scope = "testFailoverScope" + RandomStringUtils.randomAlphabetic(5);
    String stream = "testFailoverStream" + RandomStringUtils.randomAlphabetic(5);
    int initialSegments = 1;
    List<Long> segmentsToSeal = Collections.singletonList(0L);
    Map<Double, Double> newRangesToCreate = new HashMap<>();
    newRangesToCreate.put(0.0, 1.0);
    ClientConfig clientConfig = Utils.buildClientConfig(controllerURIDirect);
    // Connect with first controller instance.
    final Controller controller1 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConfig).build(), executorService);
    // Create scope, stream, and a transaction with high timeout value.
    controller1.createScope(scope).join();
    log.info("Scope {} created successfully", scope);
    createStream(controller1, scope, stream, ScalingPolicy.fixed(initialSegments));
    log.info("Stream {}/{} created successfully", scope, stream);
    long txnCreationTimestamp = System.nanoTime();
    StreamImpl stream1 = new StreamImpl(scope, stream);
    // Initiate scale operation. It will block until ongoing transaction is complete.
    controller1.startScale(stream1, segmentsToSeal, newRangesToCreate).join();
    // Now stop the controller instance executing scale operation.
    Futures.getAndHandleExceptions(controllerService.scaleService(0), ExecutionException::new);
    log.info("Successfully stopped one instance of controller service");
    // restart controller service
    Futures.getAndHandleExceptions(controllerService.scaleService(1), ExecutionException::new);
    log.info("Successfully stopped one instance of controller service");
    List<URI> controllerUris = controllerService.getServiceDetails();
    // Fetch all the RPC endpoints and construct the client URIs.
    final List<String> uris = controllerUris.stream().filter(ISGRPC).map(URI::getAuthority).collect(Collectors.toList());
    controllerURIDirect = URI.create((Utils.TLS_AND_AUTH_ENABLED ? TLS : TCP) + String.join(",", uris));
    log.info("Controller Service direct URI: {}", controllerURIDirect);
    ClientConfig clientConf = Utils.buildClientConfig(controllerURIDirect);
    // Connect to another controller instance.
    @Cleanup final Controller controller2 = new ControllerImpl(ControllerImplConfig.builder().clientConfig(clientConf).build(), executorService);
    // Note: if scale does not complete within desired time, test will timeout.
    boolean scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
    while (!scaleStatus) {
        scaleStatus = controller2.checkScaleStatus(stream1, 0).join();
        Thread.sleep(30000);
    }
    segmentsToSeal = Collections.singletonList(NameUtils.computeSegmentId(1, 1));
    newRangesToCreate = new HashMap<>();
    newRangesToCreate.put(0.0, 0.5);
    newRangesToCreate.put(0.5, 1.0);
    controller2.scaleStream(stream1, segmentsToSeal, newRangesToCreate, executorService).getFuture().join();
    log.info("Checking whether scale operation succeeded by fetching current segments");
    StreamSegments streamSegments = controller2.getCurrentSegments(scope, stream).join();
    log.info("Current segment count= {}", streamSegments.getSegments().size());
    Assert.assertEquals(2, streamSegments.getSegments().size());
}
Also used : HashMap(java.util.HashMap) ControllerImpl(io.pravega.client.control.impl.ControllerImpl) Controller(io.pravega.client.control.impl.Controller) URI(java.net.URI) Cleanup(lombok.Cleanup) StreamImpl(io.pravega.client.stream.impl.StreamImpl) ClientConfig(io.pravega.client.ClientConfig) ExecutionException(java.util.concurrent.ExecutionException) StreamSegments(io.pravega.client.stream.impl.StreamSegments) Test(org.junit.Test)

Example 69 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class EndToEndAutoScaleDownTest method main.

public static void main(String[] args) throws Exception {
    try {
        @Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
        int port = Config.SERVICE_PORT;
        @Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port, false);
        Controller controller = controllerWrapper.getController();
        controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
        ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
        ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
        serviceBuilder.initialize();
        StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
        TableStore tableStore = serviceBuilder.createTableStoreService();
        @Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).with(AutoScalerConfig.CACHE_CLEANUP_IN_SECONDS, 5).with(AutoScalerConfig.CACHE_EXPIRY_IN_SECONDS, 30).build());
        @Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
        server.startListening();
        controllerWrapper.awaitRunning();
        controllerWrapper.getControllerService().createScope("test", 0L).get();
        controller.createStream("test", "test", CONFIG).get();
        Stream stream = new StreamImpl("test", "test");
        Map<Double, Double> map = new HashMap<>();
        map.put(0.0, 0.33);
        map.put(0.33, 0.66);
        map.put(0.66, 1.0);
        @Cleanup("shutdownNow") ScheduledExecutorService executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
        controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
        Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
            if (streamSegments.getSegments().size() < 3) {
                System.err.println("Success");
                log.info("Success");
                System.exit(0);
            } else {
                throw new NotDoneException();
            }
        }), executor).exceptionally(e -> {
            System.err.println("Failure");
            log.error("Failure");
            System.exit(1);
            return null;
        }).get();
    } catch (Throwable e) {
        System.err.print("Test failed with exception: " + e.getMessage());
        System.exit(-1);
    }
    System.exit(0);
}
Also used : TestingServer(org.apache.curator.test.TestingServer) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Retry(io.pravega.common.util.Retry) TableStore(io.pravega.segmentstore.contracts.tables.TableStore) AutoScaleMonitor(io.pravega.segmentstore.server.host.stat.AutoScaleMonitor) Cleanup(lombok.Cleanup) HashMap(java.util.HashMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ServiceBuilderConfig(io.pravega.segmentstore.server.store.ServiceBuilderConfig) ServiceBuilder(io.pravega.segmentstore.server.store.ServiceBuilder) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) TestingServerStarter(io.pravega.test.common.TestingServerStarter) Stream(io.pravega.client.stream.Stream) Map(java.util.Map) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) PravegaConnectionListener(io.pravega.segmentstore.server.host.handler.PravegaConnectionListener) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) AutoScalerConfig(io.pravega.segmentstore.server.host.stat.AutoScalerConfig) NameUtils(io.pravega.shared.NameUtils) Slf4j(lombok.extern.slf4j.Slf4j) Config(io.pravega.controller.util.Config) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Controller(io.pravega.client.control.impl.Controller) ClientConfig(io.pravega.client.ClientConfig) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) AutoScaleMonitor(io.pravega.segmentstore.server.host.stat.AutoScaleMonitor) TestingServerStarter(io.pravega.test.common.TestingServerStarter) HashMap(java.util.HashMap) Controller(io.pravega.client.control.impl.Controller) SocketConnectionFactoryImpl(io.pravega.client.connection.impl.SocketConnectionFactoryImpl) Cleanup(lombok.Cleanup) PravegaConnectionListener(io.pravega.segmentstore.server.host.handler.PravegaConnectionListener) ServiceBuilder(io.pravega.segmentstore.server.store.ServiceBuilder) TableStore(io.pravega.segmentstore.contracts.tables.TableStore) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream)

Example 70 with StreamImpl

use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.

the class StreamMetricsTest method testSegmentSplitMerge.

@Test(timeout = 30000)
public void testSegmentSplitMerge() throws Exception {
    String scaleScopeName = "scaleScope";
    String scaleStreamName = "scaleStream";
    controllerWrapper.getControllerService().createScope(scaleScopeName, 0L).get();
    if (!controller.createStream(scaleScopeName, scaleStreamName, config).get()) {
        log.error("Stream {} for scale testing already existed, exiting", scaleScopeName + "/" + scaleStreamName);
        return;
    }
    Stream scaleStream = new StreamImpl(scaleScopeName, scaleStreamName);
    // split to 3 segments
    Map<Double, Double> keyRanges = new HashMap<>();
    keyRanges.put(0.0, 0.33);
    keyRanges.put(0.33, 0.66);
    keyRanges.put(0.66, 1.0);
    if (!controller.scaleStream(scaleStream, Collections.singletonList(0L), keyRanges, executor).getFuture().get()) {
        log.error("Scale stream: splitting segment into three failed, exiting");
        return;
    }
    assertEquals(3, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleScopeName, scaleStreamName)).value());
    assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleScopeName, scaleStreamName)).value());
    assertEquals(0, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleScopeName, scaleStreamName)).value());
    // merge back to 2 segments
    keyRanges = new HashMap<>();
    keyRanges.put(0.0, 0.5);
    keyRanges.put(0.5, 1.0);
    if (!controller.scaleStream(scaleStream, Arrays.asList(1L, 2L, 3L), keyRanges, executor).getFuture().get()) {
        log.error("Scale stream: merging segments into two failed, exiting");
        return;
    }
    assertEquals(2, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_COUNT, streamTags(scaleScopeName, scaleStreamName)).value());
    assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_SPLITS, streamTags(scaleScopeName, scaleStreamName)).value());
    assertEquals(1, (long) MetricRegistryUtils.getGauge(MetricsNames.SEGMENTS_MERGES, streamTags(scaleScopeName, scaleStreamName)).value());
}
Also used : HashMap(java.util.HashMap) StreamImpl(io.pravega.client.stream.impl.StreamImpl) Stream(io.pravega.client.stream.Stream) Test(org.junit.Test)

Aggregations

StreamImpl (io.pravega.client.stream.impl.StreamImpl)74 Test (org.junit.Test)50 Stream (io.pravega.client.stream.Stream)47 Cleanup (lombok.Cleanup)36 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)32 HashMap (java.util.HashMap)32 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)22 Map (java.util.Map)22 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)21 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)21 Controller (io.pravega.client.control.impl.Controller)21 ClientConfig (io.pravega.client.ClientConfig)20 ReaderGroupManagerImpl (io.pravega.client.admin.impl.ReaderGroupManagerImpl)18 Segment (io.pravega.client.segment.impl.Segment)18 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)18 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)16 Slf4j (lombok.extern.slf4j.Slf4j)14 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)13 CompletableFuture (java.util.concurrent.CompletableFuture)12 Before (org.junit.Before)12