use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class StreamMetadataStoreTest method deleteTest.
@Test
public void deleteTest() throws Exception {
final String scope = "ScopeDelete";
final String stream = "StreamDelete";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
assertTrue(store.checkStreamExists(scope, stream).join());
store.deleteStream(scope, stream, null, executor).get();
assertFalse(store.checkStreamExists(scope, stream).join());
DeleteScopeStatus status = store.deleteScope(scope).join();
assertEquals(status.getStatus(), DeleteScopeStatus.Status.SUCCESS);
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class ZKStreamMetadataStoreTest method testSplitsMerges.
@Test
public void testSplitsMerges() throws Exception {
String scope = "testScopeScale";
String stream = "testStreamScale";
ScalingPolicy policy = ScalingPolicy.fixed(2);
StreamConfiguration configuration = StreamConfiguration.builder().scope(scope).streamName(stream).scalingPolicy(policy).build();
store.createScope(scope).get();
store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// Case: Initial state, splits = 0, merges = 0
// time t0, total segments 2, S0 {0.0 - 0.5} S1 {0.5 - 1.0}
List<ScaleMetadata> scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleRecords.size() == 1);
SimpleEntry<Long, Long> simpleEntrySplitsMerges = store.findNumSplitsMerges(scope, stream, executor).get();
assertEquals("Number of splits ", new Long(0), simpleEntrySplitsMerges.getKey());
assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges.getValue());
// Case: Only splits, S0 split into S2, S3, S4 and S1 split into S5, S6,
// total splits = 2, total merges = 3
// time t1, total segments 5, S2 {0.0, 0.2}, S3 {0.2, 0.4}, S4 {0.4, 0.5}, S5 {0.5, 0.7}, S6 {0.7, 1.0}
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.2);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.2, 0.4);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.4, 0.5);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.5, 0.7);
SimpleEntry<Double, Double> segment6 = new SimpleEntry<>(0.7, 1.0);
List<SimpleEntry<Double, Double>> newRanges1 = Arrays.asList(segment2, segment3, segment4, segment5, segment6);
scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges1);
scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
assertTrue(scaleRecords.size() == 2);
SimpleEntry<Long, Long> simpleEntrySplitsMerges1 = store.findNumSplitsMerges(scope, stream, executor).get();
assertEquals("Number of splits ", new Long(2), simpleEntrySplitsMerges1.getKey());
assertEquals("Number of merges", new Long(0), simpleEntrySplitsMerges1.getValue());
// Case: Splits and merges both, S2 and S3 merged to S7, S4 and S5 merged to S8, S6 split to S9 and S10
// total splits = 3, total merges = 2
// time t2, total segments 4, S7 {0.0, 0.4}, S8 {0.4, 0.7}, S9 {0.7, 0.8}, S10 {0.8, 1.0}
SimpleEntry<Double, Double> segment7 = new SimpleEntry<>(0.0, 0.4);
SimpleEntry<Double, Double> segment8 = new SimpleEntry<>(0.4, 0.7);
SimpleEntry<Double, Double> segment9 = new SimpleEntry<>(0.7, 0.8);
SimpleEntry<Double, Double> segment10 = new SimpleEntry<>(0.8, 1.0);
List<SimpleEntry<Double, Double>> newRanges2 = Arrays.asList(segment7, segment8, segment9, segment10);
scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges2);
scaleRecords = store.getScaleMetadata(scope, stream, null, executor).get();
SimpleEntry<Long, Long> simpleEntrySplitsMerges2 = store.findNumSplitsMerges(scope, stream, executor).get();
assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges2.getKey());
assertEquals("Number of merges", new Long(2), simpleEntrySplitsMerges2.getValue());
// Case: Only merges , S7 and S8 merged to S11, S9 and S10 merged to S12
// total splits = 3, total merges = 4
// time t3, total segments 2, S11 {0.0, 0.7}, S12 {0.7, 1.0}
SimpleEntry<Double, Double> segment11 = new SimpleEntry<>(0.0, 0.7);
SimpleEntry<Double, Double> segment12 = new SimpleEntry<>(0.7, 1.0);
List<SimpleEntry<Double, Double>> newRanges3 = Arrays.asList(segment11, segment12);
scale(scope, stream, scaleRecords.get(0).getSegments(), newRanges3);
SimpleEntry<Long, Long> simpleEntrySplitsMerges3 = store.findNumSplitsMerges(scope, stream, executor).get();
assertEquals("Number of splits ", new Long(3), simpleEntrySplitsMerges3.getKey());
assertEquals("Number of merges", new Long(4), simpleEntrySplitsMerges3.getValue());
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class ModelHelperTest method encodeStreamConfig.
@Test
public void encodeStreamConfig() {
StreamConfiguration config = ModelHelper.encode(ModelHelper.decode(StreamConfiguration.builder().scope("scope").streamName("test").scalingPolicy(ScalingPolicy.byEventRate(100, 2, 3)).retentionPolicy(RetentionPolicy.bySizeBytes(1000L)).build()));
assertEquals("test", config.getStreamName());
ScalingPolicy policy = config.getScalingPolicy();
assertEquals(ScalingPolicy.ScaleType.BY_RATE_IN_EVENTS_PER_SEC, policy.getScaleType());
assertEquals(100L, policy.getTargetRate());
assertEquals(2, policy.getScaleFactor());
assertEquals(3, policy.getMinNumSegments());
RetentionPolicy retentionPolicy = config.getRetentionPolicy();
assertEquals(RetentionPolicy.RetentionType.SIZE, retentionPolicy.getRetentionType());
assertEquals(1000L, (long) retentionPolicy.getRetentionParam());
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class MockController method getSegmentsForStream.
@Synchronized
List<Segment> getSegmentsForStream(Stream stream) {
StreamConfiguration config = createdStreams.get(stream);
Preconditions.checkArgument(config != null, "Stream must be created first");
ScalingPolicy scalingPolicy = config.getScalingPolicy();
if (scalingPolicy.getScaleType() != ScalingPolicy.ScaleType.FIXED_NUM_SEGMENTS) {
throw new IllegalArgumentException("Dynamic scaling not supported with a mock controller");
}
List<Segment> result = new ArrayList<>(scalingPolicy.getMinNumSegments());
for (int i = 0; i < scalingPolicy.getMinNumSegments(); i++) {
result.add(new Segment(config.getScope(), config.getStreamName(), i));
}
return result;
}
use of io.pravega.client.stream.ScalingPolicy in project pravega by pravega.
the class ReadWriteTest method readWriteTest.
@Test(timeout = 60000)
public void readWriteTest() throws InterruptedException, ExecutionException {
String scope = "testMultiReaderWriterScope";
String readerGroupName = "testMultiReaderWriterReaderGroup";
// 20 readers -> 20 stream segments ( to have max read parallelism)
ScalingPolicy scalingPolicy = ScalingPolicy.fixed(20);
StreamConfiguration config = StreamConfiguration.builder().scope(scope).streamName(STREAM_NAME).scalingPolicy(scalingPolicy).build();
eventsReadFromPravega = new ConcurrentLinkedQueue<>();
// data used by each of the writers.
eventData = new AtomicLong();
// used by readers to maintain a count of events.
eventReadCount = new AtomicLong();
stopReadFlag = new AtomicBoolean(false);
try (StreamManager streamManager = new StreamManagerImpl(controller)) {
// create a scope
Boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactory clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory, connectionFactory)) {
// start writing events to the stream
log.info("Creating {} writers", NUM_WRITERS);
List<CompletableFuture<Void>> writerList = new ArrayList<>();
for (int i = 0; i < NUM_WRITERS; i++) {
log.info("Starting writer{}", i);
writerList.add(startNewWriter(eventData, clientFactory));
}
// create a reader group
log.info("Creating Reader group : {}", readerGroupName);
readerGroupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).build());
log.info("Reader group name {} ", readerGroupManager.getReaderGroup(readerGroupName).getGroupName());
log.info("Reader group scope {}", readerGroupManager.getReaderGroup(readerGroupName).getScope());
// create readers
log.info("Creating {} readers", NUM_READERS);
List<CompletableFuture<Void>> readerList = new ArrayList<>();
String readerName = "reader" + new Random().nextInt(Integer.MAX_VALUE);
// start reading events
for (int i = 0; i < NUM_READERS; i++) {
log.info("Starting reader{}", i);
readerList.add(startNewReader(readerName + i, clientFactory, readerGroupName, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag));
}
// wait for writers completion
Futures.allOf(writerList).get();
// set stop read flag to true
stopReadFlag.set(true);
// wait for readers completion
Futures.allOf(readerList).get();
// delete readergroup
log.info("Deleting readergroup {}", readerGroupName);
readerGroupManager.deleteReaderGroup(readerGroupName);
}
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
assertEquals(TOTAL_NUM_EVENTS, eventsReadFromPravega.size());
// check unique events.
assertEquals(TOTAL_NUM_EVENTS, new TreeSet<>(eventsReadFromPravega).size());
// seal the stream
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
// delete the stream
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
// delete the scope
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
log.info("Read write test succeeds");
}
Aggregations