Search in sources :

Example 21 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class StreamMetadataStoreTest method sizeTest.

@Test
public void sizeTest() throws Exception {
    final String scope = "ScopeSize";
    final String stream = "StreamSize";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    store.addUpdateStreamForAutoStreamCut(scope, stream, retentionPolicy, null, executor).get();
    List<String> streams = store.getStreamsForBucket(0, executor).get();
    assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
    // region Size Computation on stream cuts on epoch 0
    Map<Integer, Long> map1 = new HashMap<>();
    map1.put(0, 10L);
    map1.put(1, 10L);
    Long size = store.getSizeTillStreamCut(scope, stream, map1, null, executor).join();
    assertTrue(size == 20L);
    long recordingTime = System.currentTimeMillis();
    StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, size, map1);
    store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
    Map<Integer, Long> map2 = new HashMap<>();
    map2.put(0, 20L);
    map2.put(1, 20L);
    size = store.getSizeTillStreamCut(scope, stream, map2, null, executor).join();
    assertTrue(size == 40L);
    StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, size, map2);
    store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
    Map<Integer, Long> map3 = new HashMap<>();
    map3.put(0, 30L);
    map3.put(1, 30L);
    size = store.getSizeTillStreamCut(scope, stream, map3, null, executor).join();
    assertTrue(size == 60L);
    StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, 60L, map3);
    store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
    // endregion
    // region Size Computation on multiple epochs
    long scaleTs = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.5, 1.0);
    List<Integer> scale1SealedSegments = Lists.newArrayList(0, 1);
    StartScaleResponse response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, false, null, executor).join();
    final List<Segment> scale1SegmentsCreated = response.getSegmentsCreated();
    store.setState(scope, stream, State.SCALING, null, executor).get();
    store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 40L)), scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    // complex stream cut - across two epochs
    Map<Integer, Long> map4 = new HashMap<>();
    map4.put(0, 40L);
    map4.put(3, 10L);
    size = store.getSizeTillStreamCut(scope, stream, map4, null, executor).join();
    assertTrue(size == 90L);
    StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, size, map4);
    store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
    // simple stream cut on epoch 2
    Map<Integer, Long> map5 = new HashMap<>();
    map5.put(2, 10L);
    map5.put(3, 10L);
    size = store.getSizeTillStreamCut(scope, stream, map5, null, executor).join();
    assertTrue(size == 100L);
    StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime + 30, size, map5);
    store.addStreamCutToRetentionSet(scope, stream, streamCut5, null, executor).get();
// endregion
}
Also used : Arrays(java.util.Arrays) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicReference(java.util.concurrent.atomic.AtomicReference) Lists(com.google.common.collect.Lists) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Timeout(org.junit.rules.Timeout) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SimpleEntry(java.util.AbstractMap.SimpleEntry) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) BucketChangeListener(io.pravega.controller.server.retention.BucketChangeListener) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) Before(org.junit.Before) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) State(io.pravega.controller.store.stream.tables.State) Collectors(java.util.stream.Collectors) TxnResource(io.pravega.controller.store.task.TxnResource) Executors(java.util.concurrent.Executors) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Assert(org.junit.Assert) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) HashMap(java.util.HashMap) SimpleEntry(java.util.AbstractMap.SimpleEntry) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 22 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class StreamMetadataStoreTest method scaleTest.

@Test
public void scaleTest() throws Exception {
    final String scope = "ScopeScale";
    final String stream = "StreamScale";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // region idempotent
    long scaleTs = System.currentTimeMillis();
    SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
    List<Integer> scale1SealedSegments = Collections.singletonList(1);
    // test run only if started
    AssertExtensions.assertThrows("", () -> store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, true, null, executor).join(), e -> Exceptions.unwrap(e) instanceof TaskExceptions.StartException);
    // 1. start scale
    StartScaleResponse response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, false, null, executor).join();
    final List<Segment> scale1SegmentsCreated = response.getSegmentsCreated();
    final int scale1ActiveEpoch = response.getActiveEpoch();
    // rerun start scale
    response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, false, null, executor).join();
    assertEquals(response.getSegmentsCreated(), scale1SegmentsCreated);
    store.setState(scope, stream, State.SCALING, null, executor).get();
    // 2. scale new segments created
    store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    // rerun start scale and new segments created
    response = store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, false, null, executor).join();
    assertEquals(response.getSegmentsCreated(), scale1SegmentsCreated);
    store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    // 3. scale segments sealed -- this will complete scale
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), scale1SegmentsCreated, response.getActiveEpoch(), scaleTs, null, executor).join();
    // rerun -- illegal state exception
    AssertExtensions.assertThrows("", () -> store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, scale1ActiveEpoch, scaleTs, null, executor).join(), e -> Exceptions.unwrap(e) instanceof StoreException.IllegalStateException);
    // rerun  -- illegal state exception
    AssertExtensions.assertThrows("", () -> store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), scale1SegmentsCreated, scale1ActiveEpoch, scaleTs, null, executor).join(), e -> Exceptions.unwrap(e) instanceof StoreException.IllegalStateException);
    // rerun start scale -- should fail with precondition failure
    AssertExtensions.assertThrows("", () -> store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, false, null, executor).join(), e -> Exceptions.unwrap(e) instanceof ScaleOperationExceptions.ScalePreConditionFailureException);
    // endregion
    // 2 different conflicting scale operations
    // region run concurrent conflicting scale
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
    List<Integer> scale2SealedSegments = Arrays.asList(0, 2, 3);
    long scaleTs2 = System.currentTimeMillis();
    response = store.startScale(scope, stream, scale2SealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs2, false, null, executor).get();
    final List<Segment> scale2SegmentsCreated = response.getSegmentsCreated();
    final int scale2ActiveEpoch = response.getActiveEpoch();
    store.setState(scope, stream, State.SCALING, null, executor).get();
    // rerun of scale 1 -- should fail with precondition failure
    AssertExtensions.assertThrows("", () -> store.startScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, false, null, executor).join(), e -> Exceptions.unwrap(e) instanceof ScaleOperationExceptions.ScaleConflictException);
    // rerun of scale 1's new segments created method
    AssertExtensions.assertThrows("", () -> store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, scale1ActiveEpoch, scaleTs, null, executor).join(), e -> Exceptions.unwrap(e) instanceof ScaleOperationExceptions.ScaleConditionInvalidException);
    store.scaleNewSegmentsCreated(scope, stream, scale2SealedSegments, scale2SegmentsCreated, scale2ActiveEpoch, scaleTs2, null, executor).get();
    // rerun of scale 1's new segments created method
    AssertExtensions.assertThrows("", () -> store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, scale1ActiveEpoch, scaleTs, null, executor).join(), e -> Exceptions.unwrap(e) instanceof ScaleOperationExceptions.ScaleConditionInvalidException);
    store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), scale1SegmentsCreated, scale2ActiveEpoch, scaleTs2, null, executor).get();
    store.setState(scope, stream, State.SCALING, null, executor).get();
    // rerun of scale 1's new segments created method
    AssertExtensions.assertThrows("", () -> store.scaleNewSegmentsCreated(scope, stream, scale1SealedSegments, scale1SegmentsCreated, scale1ActiveEpoch, scaleTs, null, executor).join(), e -> Exceptions.unwrap(e) instanceof ScaleOperationExceptions.ScaleConditionInvalidException);
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
// endregion
}
Also used : Arrays(java.util.Arrays) AssertExtensions(io.pravega.test.common.AssertExtensions) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Exceptions(io.pravega.common.Exceptions) HashMap(java.util.HashMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) AtomicReference(java.util.concurrent.atomic.AtomicReference) Lists(com.google.common.collect.Lists) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Timeout(org.junit.rules.Timeout) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SimpleEntry(java.util.AbstractMap.SimpleEntry) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) BucketChangeListener(io.pravega.controller.server.retention.BucketChangeListener) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) Before(org.junit.Before) Assert.assertNotNull(org.junit.Assert.assertNotNull) StreamTruncationRecord(io.pravega.controller.store.stream.tables.StreamTruncationRecord) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) State(io.pravega.controller.store.stream.tables.State) Collectors(java.util.stream.Collectors) TxnResource(io.pravega.controller.store.task.TxnResource) Executors(java.util.concurrent.Executors) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Assert(org.junit.Assert) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) TaskExceptions(io.pravega.controller.server.eventProcessor.requesthandlers.TaskExceptions) SimpleEntry(java.util.AbstractMap.SimpleEntry) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 23 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class StreamMetadataStoreTest method deleteTest.

@Test
public void deleteTest() throws Exception {
    final String scope = "ScopeDelete";
    final String stream = "StreamDelete";
    final ScalingPolicy policy = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    long start = System.currentTimeMillis();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, start, null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    assertTrue(store.checkStreamExists(scope, stream).join());
    store.deleteStream(scope, stream, null, executor).get();
    assertFalse(store.checkStreamExists(scope, stream).join());
    DeleteScopeStatus status = store.deleteScope(scope).join();
    assertEquals(status.getStatus(), DeleteScopeStatus.Status.SUCCESS);
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) Test(org.junit.Test)

Example 24 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class ZKStreamMetadataStoreTest method testSplitsMerges.

@Test
public void testSplitsMerges() throws Exception {
    String scope = "testScopeScale";
    String stream = "testStreamScale";
    ScalingPolicy policy = ScalingPolicy.fixed(2);
    StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
    store.createScope(scope).get();
    store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
    store.setState(scope, stream, State.ACTIVE, null, executor).get();
    // Case: Initial state, splits = 0, merges = 0
    // time t0, total segments 2, S0 {0.0 - 0.5} S1 {0.5 - 1.0}
    List<ScaleMetadata> scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    assertTrue(scaleRecords.size() == 1);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(0), simpleEntrySplitsMerges.getKey());
    assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges.getValue());
    // Case: Only splits, S0 split into S2, S3, S4 and S1 split into S5, S6,
    // total splits = 2, total merges = 3
    // time t1, total segments 5, S2 {0.0, 0.2}, S3 {0.2, 0.4}, S4 {0.4, 0.5}, S5 {0.5, 0.7}, S6 {0.7, 1.0}
    SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.2);
    SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.2, 0.4);
    SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.4, 0.5);
    SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.5, 0.7);
    SimpleEntry<Double, Double> segment6 = new SimpleEntry<>(0.7, 1.0);
    List<SimpleEntry<Double, Double>> newRanges1 = Arrays.asList(segment2, segment3, segment4, segment5, segment6);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges1);
    scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    assertTrue(scaleRecords.size() == 2);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges1 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(2), simpleEntrySplitsMerges1.getKey());
    assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges1.getValue());
    // Case: Splits and merges both, S2 and S3 merged to S7,  S4 and S5 merged to S8,  S6 split to S9 and S10
    // total splits = 3, total merges = 2
    // time t2, total segments 4, S7 {0.0, 0.4}, S8 {0.4, 0.7}, S9 {0.7, 0.8}, S10 {0.8, 1.0}
    SimpleEntry<Double, Double> segment7 = new SimpleEntry<>(0.0, 0.4);
    SimpleEntry<Double, Double> segment8 = new SimpleEntry<>(0.4, 0.7);
    SimpleEntry<Double, Double> segment9 = new SimpleEntry<>(0.7, 0.8);
    SimpleEntry<Double, Double> segment10 = new SimpleEntry<>(0.8, 1.0);
    List<SimpleEntry<Double, Double>> newRanges2 = Arrays.asList(segment7, segment8, segment9, segment10);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges2);
    scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
    SimpleEntry<Long, Long> simpleEntrySplitsMerges2 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges2.getKey());
    assertEquals("Number of merges", new Long(2), simpleEntrySplitsMerges2.getValue());
    // Case: Only merges , S7 and S8 merged to S11,  S9 and S10 merged to S12
    // total splits = 3, total merges = 4
    // time t3, total segments 2, S11 {0.0, 0.7}, S12 {0.7, 1.0}
    SimpleEntry<Double, Double> segment11 = new SimpleEntry<>(0.0, 0.7);
    SimpleEntry<Double, Double> segment12 = new SimpleEntry<>(0.7, 1.0);
    List<SimpleEntry<Double, Double>> newRanges3 = Arrays.asList(segment11, segment12);
    scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges3);
    SimpleEntry<Long, Long> simpleEntrySplitsMerges3 = store.findNumSplitsMerges(scope, stream, executor).get();
    assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges3.getKey());
    assertEquals("Number of merges", new Long(4), simpleEntrySplitsMerges3.getValue());
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) SimpleEntry(java.util.AbstractMap.SimpleEntry) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Test(org.junit.Test)

Example 25 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class ModelHelperTest method encodeStreamConfig.

@Test
public void encodeStreamConfig() {
    StreamConfiguration config = ModelHelper.encode(ModelHelper.decode(StreamConfiguration.builder().scope("scope").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(100, 2, 3)).retentionPolicy(RetentionPolicy.bySizeBytes(1000L)).build()));
    assertEquals("test", config.getStreamName());
    ScalingPolicy policy = config.getScalingPolicy();
    assertEquals(ScalingPolicy.ScaleType.BY_RATE_IN_EVENTS_PER_SEC, policy.getScaleType());
    assertEquals(100L, policy.getTargetRate());
    assertEquals(2, policy.getScaleFactor());
    assertEquals(3, policy.getMinNumSegments());
    RetentionPolicy retentionPolicy = config.getRetentionPolicy();
    assertEquals(RetentionPolicy.RetentionType.SIZE, retentionPolicy.getRetentionType());
    assertEquals(1000L, (long) retentionPolicy.getRetentionParam());
}
Also used : ScalingPolicy(io.pravega.client.stream.ScalingPolicy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) Test(org.junit.Test)

Aggregations

StreamConfiguration (io.pravega.client.stream.StreamConfiguration)80 Test (org.junit.Test)67 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)44 HashMap (java.util.HashMap)21 ArrayList (java.util.ArrayList)20 Controller (io.pravega.client.stream.impl.Controller)19 TestingServerStarter (io.pravega.test.common.TestingServerStarter)17 List (java.util.List)17 Cleanup (lombok.Cleanup)17 ConnectionFactoryImpl (io.pravega.client.netty.impl.ConnectionFactoryImpl)16 CompletableFuture (java.util.concurrent.CompletableFuture)16 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)16 TestingServer (org.apache.curator.test.TestingServer)16 Before (org.junit.Before)16 ClientFactory (io.pravega.client.ClientFactory)15 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)15 Executors (java.util.concurrent.Executors)15 After (org.junit.After)15 Assert.assertEquals (org.junit.Assert.assertEquals)15 Map (java.util.Map)14