Search in sources :

Example 16 with ScalingPolicy

use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.

the class StreamMetadataStoreTest method deleteTest.

@Test
public void deleteTest() throws Exception {
    final String scope = "ScopeDelete";
    final String stream = "StreamDelete";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    assertTrue(store.checkStreamExists(scope, stream).join());
    store.deleteStream(scope, stream, null, executor).get();
    assertFalse(store.checkStreamExists(scope, stream).join());
    DeleteScopeStatus status = store.deleteScope(scope).join();
    assertEquals(status.getStatus(), DeleteScopeStatus.Status.SUCCESS);
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) Test(org.junit.Test)

Example 17 with ScalingPolicy

use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.

the class ZKStreamMetadataStoreTest method testSplitsMerges.

@Test
public void testSplitsMerges() throws Exception {
    String scope = "testScopeScale";
    String stream = "testStreamScale";
    ScalingPolicy policy = ScalingPolicy.fixed(2);
    StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // Case: Initial state, splits = 0, merges = 0
    // time t0, total segments 2, S0 {0.0 - 0.5} S1 {0.5 - 1.0}
    List<ScaleMetadata> scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    assertTrue(scaleRecords.size() == 1);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(0), simpleEntrySplitsMerges.getKey());
    assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges.getValue());
    // Case: Only splits, S0 split into S2, S3, S4 and S1 split into S5, S6,
    // total splits = 2, total merges = 3
    // time t1, total segments 5, S2 {0.0, 0.2}, S3 {0.2, 0.4}, S4 {0.4, 0.5}, S5 {0.5, 0.7}, S6 {0.7, 1.0}
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.2);
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.2, 0.4);
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.4, 0.5);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.5, 0.7);
    SimpleEntry<Double, Double> segment6 = new SimpleEntry<>(0.7, 1.0);
    List<SimpleEntry<Double, Double>> newRanges1 = Arrays.asList(segment2, segment3, segment4, segment5, segment6);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges1);
    scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    assertTrue(scaleRecords.size() == 2);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges1 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(2), simpleEntrySplitsMerges1.getKey());
    assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges1.getValue());
    // Case: Splits and merges both, S2 and S3 merged to S7,  S4 and S5 merged to S8,  S6 split to S9 and S10
    // total splits = 3, total merges = 2
    // time t2, total segments 4, S7 {0.0, 0.4}, S8 {0.4, 0.7}, S9 {0.7, 0.8}, S10 {0.8, 1.0}
    SimpleEntry<Double, Double> segment7 = new SimpleEntry<>(0.0, 0.4);
    SimpleEntry<Double, Double> segment8 = new SimpleEntry<>(0.4, 0.7);
    SimpleEntry<Double, Double> segment9 = new SimpleEntry<>(0.7, 0.8);
    SimpleEntry<Double, Double> segment10 = new SimpleEntry<>(0.8, 1.0);
    List<SimpleEntry<Double, Double>> newRanges2 = Arrays.asList(segment7, segment8, segment9, segment10);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges2);
    scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    SimpleEntry<Long, Long> simpleEntrySplitsMerges2 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges2.getKey());
    assertEquals("Number of merges", new Long(2), simpleEntrySplitsMerges2.getValue());
    // Case: Only merges , S7 and S8 merged to S11,  S9 and S10 merged to S12
    // total splits = 3, total merges = 4
    // time t3, total segments 2, S11 {0.0, 0.7}, S12 {0.7, 1.0}
    SimpleEntry<Double, Double> segment11 = new SimpleEntry<>(0.0, 0.7);
    SimpleEntry<Double, Double> segment12 = new SimpleEntry<>(0.7, 1.0);
    List<SimpleEntry<Double, Double>> newRanges3 = Arrays.asList(segment11, segment12);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges3);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges3 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges3.getKey());
    assertEquals("Number of merges", new Long(4), simpleEntrySplitsMerges3.getValue());
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) SimpleEntry(java.util.AbstractMap.SimpleEntry) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 18 with ScalingPolicy

use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.

the class ModelHelperTest method encodeStreamConfig.

@Test
public void encodeStreamConfig() {
    StreamConfiguration config = ModelHelper.encode(ModelHelper.decode(StreamConfiguration.builder().scope("scope").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(100, 2, 3)).retentionPolicy(RetentionPolicy.bySizeBytes(1000L)).build()));
    assertEquals("test", config.getStreamName());
    ScalingPolicy policy = config.getScalingPolicy();
    assertEquals(ScalingPolicy.ScaleType.BY_RATE_IN_EVENTS_PER_SEC, policy.getScaleType());
    assertEquals(100L, policy.getTargetRate());
    assertEquals(2, policy.getScaleFactor());
    assertEquals(3, policy.getMinNumSegments());
    RetentionPolicy retentionPolicy = config.getRetentionPolicy();
    assertEquals(RetentionPolicy.RetentionType.SIZE, retentionPolicy.getRetentionType());
    assertEquals(1000L, (long) retentionPolicy.getRetentionParam());
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Test(org.junit.Test)

Example 19 with ScalingPolicy

use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.

the class MockController method getSegmentsForStream.

@Synchronized
List<Segment> getSegmentsForStream(Stream stream) {
    StreamConfiguration config = createdStreams.get(stream);
    Preconditions.checkArgument(config != null, "Stream must be created first");
    ScalingPolicy scalingPolicy = config.getScalingPolicy();
    if (scalingPolicy.getScaleType() != ScalingPolicy.ScaleType.FIXED_NUM_SEGMENTS) {
        throw new IllegalArgumentException("Dynamic scaling not supported with a mock controller");
    }
    List<Segment> result = new ArrayList<>(scalingPolicy.getMinNumSegments());
    for (int i = 0; i < scalingPolicy.getMinNumSegments(); i++) {
        result.add(new Segment(config.getScope(), config.getStreamName(), i));
    }
    return result;
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) CreateSegment(io.pravega.shared.protocol.netty.WireCommands.CreateSegment) Segment(io.pravega.client.segment.impl.Segment) DeleteSegment(io.pravega.shared.protocol.netty.WireCommands.DeleteSegment) Synchronized(lombok.Synchronized)

Example 20 with ScalingPolicy

use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.

the class ReadWriteTest method readWriteTest.

@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
    String scope = "testMultiReaderWriterScope";
    String readerGroupName = "testMultiReaderWriterReaderGroup";
    // 20  readers -> 20 stream segments ( to have max read parallelism)
    ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
    StreamConfiguration config = StreamConfiguration.builder().scope(scope).streamName(STREAM_NAME).scalingPolicy(scalingPolicy).build();
    eventsReadFromPravega = new ConcurrentLinkedQueue<>();
    // data used by each of the writers.
    eventData = new AtomicLong();
    // used by readers to maintain a count of events.
    eventReadCount = new AtomicLong();
    stopReadFlag = new AtomicBoolean(false);
    try (StreamManager streamManager = new StreamManagerImpl(controller)) {
        // create a scope
        Boolean createScopeStatus = streamManager.createScope(scope);
        log.info("Create scope status {}", createScopeStatus);
        // create a stream
        Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
        log.info("Create stream status {}", createStreamStatus);
    }
    try (ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
        ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
        ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory, connectionFactory)) {
        // start writing events to the stream
        log.info("Creating {} writers", NUM_WRITERS);
        List<CompletableFuture<Void>> writerList = new ArrayList<>();
        for (int i = 0; i < NUM_WRITERS; i++) {
            log.info("Starting writer{}", i);
            writerList.add(startNewWriter(eventData, clientFactory));
        }
        // create a reader group
        log.info("Creating Reader group : {}", readerGroupName);
        readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
        log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
        log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
        // create readers
        log.info("Creating {} readers", NUM_READERS);
        List<CompletableFuture<Void>> readerList = new ArrayList<>();
        String readerName = "reader" + new Random().nextInt(Integer.MAX_VALUE);
        // start reading events
        for (int i = 0; i < NUM_READERS; i++) {
            log.info("Starting reader{}", i);
            readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
        }
        // wait for writers completion
        Futures.allOf(writerList).get();
        // set stop read flag to true
        stopReadFlag.set(true);
        // wait for readers completion
        Futures.allOf(readerList).get();
        // delete readergroup
        log.info("Deleting readergroup {}", readerGroupName);
        readerGroupManager.deleteReaderGroup(readerGroupName);
    }
    log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
    assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
    // check unique events.
    assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
    // seal the stream
    CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
    log.info("Sealing stream {}", STREAM_NAME);
    assertTrue(sealStreamStatus.get());
    // delete the stream
    CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
    log.info("Deleting stream {}", STREAM_NAME);
    assertTrue(deleteStreamStatus.get());
    // delete the  scope
    CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
    log.info("Deleting scope {}", scope);
    assertTrue(deleteScopeStatus.get());
    log.info("Read write test succeeds");
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) ClientFactory(io.pravega.client.ClientFactory) ArrayList(java.util.ArrayList) StreamManagerImpl(io.pravega.client.admin.impl.StreamManagerImpl) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) ConnectionFactory(io.pravega.client.netty.impl.ConnectionFactory) ClientFactoryImpl(io.pravega.client.stream.impl.ClientFactoryImpl) CompletableFuture(java.util.concurrent.CompletableFuture) Random(java.util.Random) StreamManager(io.pravega.client.admin.StreamManager) TreeSet(java.util.TreeSet) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ReaderGroupManagerImpl(io.pravega.client.admin.impl.ReaderGroupManagerImpl) ConnectionFactoryImpl(io.pravega.client.netty.impl.ConnectionFactoryImpl) Test(org.junit.Test)

Aggregations

ScalingPolicy (io.pravega.client.stream.ScalingPolicy)47 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)44 Test (org.junit.Test)42 Before (org.junit.Before)16 TestingServerStarter (io.pravega.test.common.TestingServerStarter)15 Executors (java.util.concurrent.Executors)15 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)15 After (org.junit.After)15 Assert.assertEquals (org.junit.Assert.assertEquals)15 TestingServer (org.apache.curator.test.TestingServer)14 Assert.assertTrue (org.junit.Assert.assertTrue)14 ArrayList (java.util.ArrayList)13 Collections (java.util.Collections)13 HashMap (java.util.HashMap)13 List (java.util.List)13 RetentionPolicy (io.pravega.client.stream.RetentionPolicy)12 Map (java.util.Map)12 UUID (java.util.UUID)12 AbstractMap (java.util.AbstractMap)11 ExecutionException (java.util.concurrent.ExecutionException)11