use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class SegmentSelectorTest method testSameRoutingKey.
@Test
public void testSameRoutingKey() {
Controller controller = Mockito.mock(Controller.class);
SegmentOutputStreamFactory factory = Mockito.mock(SegmentOutputStreamFactory.class);
SegmentSelector selector = new SegmentSelector(new StreamImpl(scope, streamName), controller, factory, config, DelegationTokenProviderFactory.createWithEmptyToken());
TreeMap<Double, SegmentWithRange> segments = new TreeMap<>();
addNewSegment(segments, 0, 0.0, 0.25);
addNewSegment(segments, 1, 0.25, 0.5);
addNewSegment(segments, 2, 0.5, 0.75);
addNewSegment(segments, 3, 0.75, 1.0);
StreamSegments streamSegments = new StreamSegments(segments);
when(controller.getCurrentSegments(scope, streamName)).thenReturn(CompletableFuture.completedFuture(streamSegments));
selector.refreshSegmentEventWriters(segmentSealedCallback);
int[] counts = new int[4];
Arrays.fill(counts, 0);
for (int i = 0; i < 20; i++) {
Segment segment = selector.getSegmentForEvent("Foo");
assertNotNull(segment);
counts[NameUtils.getSegmentNumber(segment.getSegmentId())]++;
}
assertArrayEquals(new int[] { 20, 0, 0, 0 }, counts);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class EventStreamReaderTest method testTimeWindow.
@Test
public void testTimeWindow() throws SegmentSealedException {
String scope = "scope";
String streamName = "stream";
Stream stream = Stream.of(scope, streamName);
String groupName = "readerGroup";
String readerGroupStream = NameUtils.getStreamForReaderGroup(groupName);
String markStream = NameUtils.getMarkStreamForStream(streamName);
// Create factories
MockSegmentStreamFactory segmentStreamFactory = new MockSegmentStreamFactory();
@Cleanup MockClientFactory clientFactory = new MockClientFactory(scope, segmentStreamFactory);
MockController controller = (MockController) clientFactory.getController();
@Cleanup InlineExecutor executor = new InlineExecutor();
// Create streams
controller.createScope(scope).join();
controller.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
controller.createStream(scope, readerGroupStream, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build());
// Reader group state synchronizer
ReaderGroupConfig config = ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(stream).build();
StateSynchronizer<ReaderGroupState> sync = clientFactory.createStateSynchronizer(readerGroupStream, new ReaderGroupStateUpdatesSerializer(), new ReaderGroupStateInitSerializer(), SynchronizerConfig.builder().build());
// Watermark reader/writer
@Cleanup RevisionedStreamClient<Watermark> markWriter = clientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
@Cleanup WatermarkReaderImpl markReader = new WatermarkReaderImpl(stream, markWriter, executor);
// Initialize reader group state
Map<SegmentWithRange, Long> segments = ReaderGroupImpl.getSegmentsForStreams(controller, config);
sync.initialize(new ReaderGroupState.ReaderGroupStateInit(config, segments, getEndSegmentsForStreams(config), false));
// Data segment writers
Segment segment1 = new Segment(scope, streamName, 0);
Segment segment2 = new Segment(scope, streamName, 1);
@Cleanup SegmentOutputStream stream1 = segmentStreamFactory.createOutputStreamForSegment(segment1, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken());
@Cleanup SegmentOutputStream stream2 = segmentStreamFactory.createOutputStreamForSegment(segment2, segmentSealedCallback, writerConfig, DelegationTokenProviderFactory.createWithEmptyToken());
// Write stream data
writeInt(stream1, 1);
writeInt(stream2, 2);
writeInt(stream2, 2);
writeInt(stream2, 2);
// Write mark data
val r1 = new SegmentWithRange(segment1, 0, 0.5).convert();
val r2 = new SegmentWithRange(segment2, 0.5, 1).convert();
markWriter.writeUnconditionally(new Watermark(0L, 99L, ImmutableMap.of(r1, 0L, r2, 0L)));
markWriter.writeUnconditionally(new Watermark(100L, 199L, ImmutableMap.of(r1, 12L, r2, 0L)));
markWriter.writeUnconditionally(new Watermark(200L, 299L, ImmutableMap.of(r1, 12L, r2, 12L)));
markWriter.writeUnconditionally(new Watermark(300L, 399L, ImmutableMap.of(r1, 12L, r2, 24L)));
markWriter.writeUnconditionally(new Watermark(400L, 499L, ImmutableMap.of(r1, 12L, r2, 36L)));
// Create reader
AtomicLong clock = new AtomicLong();
ReaderGroupStateManager groupState = new ReaderGroupStateManager(scope, groupName, "reader1", sync, controller, clock::get);
groupState.initializeReader(0);
@Cleanup EventStreamReaderImpl<byte[]> reader = new EventStreamReaderImpl<>(segmentStreamFactory, segmentStreamFactory, new ByteArraySerializer(), groupState, new Orderer(), clock::get, ReaderConfig.builder().build(), ImmutableMap.of(stream, markReader), Mockito.mock(Controller.class));
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
EventRead<byte[]> event = reader.readNextEvent(100);
assertEquals(2, readInt(event));
TimeWindow timeWindow = reader.getCurrentTimeWindow(Stream.of(scope, streamName));
assertEquals(0, timeWindow.getLowerTimeBound().longValue());
assertEquals(199, timeWindow.getUpperTimeBound().longValue());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
event = reader.readNextEvent(100);
assertEquals(1, readInt(event));
timeWindow = reader.getCurrentTimeWindow(Stream.of(scope, streamName));
assertEquals(0, timeWindow.getLowerTimeBound().longValue());
assertEquals(299, timeWindow.getUpperTimeBound().longValue());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
event = reader.readNextEvent(100);
assertEquals(2, readInt(event));
timeWindow = reader.getCurrentTimeWindow(Stream.of(scope, streamName));
assertEquals(200, timeWindow.getLowerTimeBound().longValue());
assertEquals(399, timeWindow.getUpperTimeBound().longValue());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
event = reader.readNextEvent(100);
assertEquals(2, readInt(event));
timeWindow = reader.getCurrentTimeWindow(Stream.of(scope, streamName));
assertEquals(300, timeWindow.getLowerTimeBound().longValue());
assertEquals(499, timeWindow.getUpperTimeBound().longValue());
clock.addAndGet(ReaderGroupStateManager.UPDATE_WINDOW.toNanos());
event = reader.readNextEvent(100);
assertEquals(null, event.getEvent());
timeWindow = reader.getCurrentTimeWindow(Stream.of(scope, streamName));
assertEquals(400, timeWindow.getLowerTimeBound().longValue());
assertEquals(null, timeWindow.getUpperTimeBound());
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class MetadataScalabilityTest method scale.
List<List<Segment>> scale(ControllerImpl controller) {
int numSegments = getStreamConfig().getScalingPolicy().getMinNumSegments();
int scalesToPerform = getScalesToPerform();
// manually scale the stream SCALES_TO_PERFORM times
Stream stream = new StreamImpl(SCOPE, getStreamName());
AtomicInteger counter = new AtomicInteger(0);
List<List<Segment>> listOfEpochs = new LinkedList<>();
CompletableFuture<Void> scaleFuture = Futures.loop(() -> counter.incrementAndGet() <= scalesToPerform, () -> controller.getCurrentSegments(SCOPE, streamName).thenCompose(segments -> {
ArrayList<Segment> sorted = Lists.newArrayList(segments.getSegments().stream().sorted(Comparator.comparingInt(x -> NameUtils.getSegmentNumber(x.getSegmentId()) % numSegments)).collect(Collectors.toList()));
listOfEpochs.add(sorted);
// note: with SCALES_TO_PERFORM < numSegments, we can use the segment number as the index
// into the range map
Pair<List<Long>, Map<Double, Double>> scaleInput = getScaleInput(sorted);
List<Long> segmentsToSeal = scaleInput.getKey();
Map<Double, Double> newRanges = scaleInput.getValue();
return controller.scaleStream(stream, segmentsToSeal, newRanges, executorService).getFuture().thenAccept(scaleStatus -> {
log.info("scale stream for epoch {} completed with status {}", counter.get(), scaleStatus);
assert scaleStatus;
});
}), executorService);
scaleFuture.join();
return listOfEpochs;
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class MetadataScalabilityTest method setup.
/**
* Invoke the createStream method, ensure we are able to create stream.
*
* @throws InterruptedException if interrupted
* @throws ExecutionException if error in create stream
*/
@Before
public void setup() throws InterruptedException, ExecutionException {
// create a scope
Controller controller = getController();
executorService = ExecutorServiceHelpers.newScheduledThreadPool(5, "Scalability-main");
Boolean createScopeStatus = controller.createScope(SCOPE).get();
log.debug("create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = controller.createStream(SCOPE, getStreamName(), getStreamConfig()).get();
log.debug("create stream status for scale up stream {}", createStreamStatus);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class AutoScaleTest method scaleUpTxnTest.
/**
* Invoke the scale up Test with transactional writes. Produce traffic from multiple writers in parallel. Each
* writer writes using transactions. The test will periodically check if a scale event has occurred by talking to
* controller via controller client.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
private CompletableFuture<Void> scaleUpTxnTest() {
ControllerImpl controller = getController();
final AtomicBoolean exit = new AtomicBoolean(false);
ClientFactoryImpl clientFactory = getClientFactory();
startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer", SCALE_UP_TXN_STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build()), exit);
// overall wait for test to complete in 260 seconds (4.2 minutes) or scale up, whichever happens first.
return Retry.withExpBackoff(10, 10, 30, Duration.ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, SCALE_UP_TXN_STREAM_NAME).thenAccept(x -> {
if (x.getSegments().size() == 1) {
throw new ScaleOperationNotDoneException();
} else {
log.info("txn test scale up done successfully");
exit.set(true);
}
}), scaleExecutorService);
}
Aggregations