use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRange.
@Test(timeout = 30000)
public void testScaleRange() throws ExecutionException, InterruptedException {
// key range values taken from issue #2543
StreamSegmentRecord segment = new StreamSegmentRecord(2, 1, 100L, 0.1706574888245243, 0.7085170563088633);
doReturn(CompletableFuture.completedFuture(segment)).when(streamStore).getSegment(any(), any(), anyLong(), any(), any());
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
// Send number of splits = 1
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
AutoScaleEvent scaleUpEvent = new AutoScaleEvent(scope, stream, NameUtils.computeSegmentId(2, 1), AutoScaleEvent.UP, System.currentTimeMillis(), 1, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleUpEvent, () -> false)));
reset(streamStore);
// verify that one scaleOp event is written into the stream
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleOpEvent = (ScaleOpEvent) event;
assertEquals(2, scaleOpEvent.getNewRanges().size());
assertEquals(0.1706574888245243, scaleOpEvent.getNewRanges().get(0).getKey(), 0.0);
assertEquals(0.7085170563088633, scaleOpEvent.getNewRanges().get(1).getValue(), 0.0);
assertTrue(scaleOpEvent.getNewRanges().get(0).getValue().doubleValue() == scaleOpEvent.getNewRanges().get(1).getKey().doubleValue());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequestWithMinimumSegment.
@Test(timeout = 30000)
public void testScaleRequestWithMinimumSegment() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
String stream = "mystream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 5)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
// change stream configuration to min segment count = 4
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 4)).build();
streamStore.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = streamStore.getConfigurationRecord(scope, stream, null, executor).join();
streamStore.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// process first auto scale down event. it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 1L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(writer.queue.isEmpty());
assertTrue(streamStore.isCold(scope, stream, 1L, null, executor).join());
// process second auto scale down event. since its not for an immediate neighbour so it should only mark the segment as cold
multiplexer.process(new AutoScaleEvent(scope, stream, 3L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 3L, null, executor).join());
// no scale event should be posted
assertTrue(writer.queue.isEmpty());
// process third auto scale down event. This should result in a scale op event being posted to merge segments 0, 1
multiplexer.process(new AutoScaleEvent(scope, stream, 0L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 0L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent1 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent1.getNewRanges().size());
assertEquals(2, scaleDownEvent1.getSegmentsToSeal().size());
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(0L));
assertTrue(scaleDownEvent1.getSegmentsToSeal().contains(1L));
// process fourth auto scale down event. This should result in a scale op event being posted to merge segments 3, 4
multiplexer.process(new AutoScaleEvent(scope, stream, 4L, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis()), () -> false).join();
assertTrue(streamStore.isCold(scope, stream, 4L, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleDownEvent2 = (ScaleOpEvent) event;
assertEquals(1, scaleDownEvent2.getNewRanges().size());
assertEquals(2, scaleDownEvent2.getSegmentsToSeal().size());
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(3L));
assertTrue(scaleDownEvent2.getSegmentsToSeal().contains(4L));
// process first scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent1, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
List<StreamSegmentRecord> segments = activeEpoch.getSegments();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
// process second scale down event, this should submit scale and scale the stream down to 4 segments
multiplexer.process(scaleDownEvent2, () -> false).join();
// verify that no scale has happened
activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
// verify that no scale has happened.
assertEquals(1, activeEpoch.getEpoch());
assertEquals(4, segments.size());
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 2));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 3));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 4));
assertTrue(segments.stream().anyMatch(x -> x.getSegmentNumber() == 5));
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentDistinctScaleRun.
// concurrent run of scale 1 intermixed with scale 2
private void concurrentDistinctScaleRun(String stream, String funcToWaitOn, boolean isManual, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, funcToWaitOn, signal, wait);
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
scaleRequestHandler2.execute(event).join();
long one = NameUtils.computeSegmentId(1, 1);
ScaleOpEvent event2 = new ScaleOpEvent(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
scaleRequestHandler2.execute(event2).join();
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(4, getVersionNumber(versioned));
assertEquals(2, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testWithExistingEpochTransition.
@Test(timeout = 30000)
public void testWithExistingEpochTransition() {
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
String stream = "testETR";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
ArrayList<Map.Entry<Double, Double>> newRange = new ArrayList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
// submit scale request. that will create an ETR.
streamStore.submitScale(scope, stream, Lists.newArrayList(0L), new ArrayList<>(newRange), System.currentTimeMillis(), null, null, executor).join();
// now post a scale event with different input
newRange = new ArrayList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
newRange.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
// case 1: manual scale
ScaleOpEvent manual = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), newRange, true, System.currentTimeMillis(), System.currentTimeMillis());
// run scaling workflow. This should do nothing and complete.
scaleRequestHandler.execute(manual).join();
// case 2: auto scale
ScaleOpEvent auto = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), newRange, false, System.currentTimeMillis(), System.currentTimeMillis());
// run scaling workflow. This should throw Conflict Exception.
AssertExtensions.assertSuppliedFutureThrows("", () -> scaleRequestHandler.execute(auto), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.ConflictException);
// verify that neither event led to any processing and stream is still at epoch 0 and active state.
assertEquals(State.ACTIVE, streamStore.getState(scope, stream, true, null, executor).join());
assertEquals(0, streamStore.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
}
Aggregations