use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleStateReset.
@Test(timeout = 30000)
public void testScaleStateReset() {
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
String stream = "testResetState";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
ArrayList<Map.Entry<Double, Double>> newRange = new ArrayList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
// start with manual scale
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), newRange, true, System.currentTimeMillis(), System.currentTimeMillis());
streamStore.submitScale(scope, stream, Lists.newArrayList(0L), new ArrayList<>(newRange), System.currentTimeMillis(), null, null, executor).join();
// perform scaling
scaleRequestHandler.execute(event).join();
long one = NameUtils.computeSegmentId(1, 1);
assertEquals(State.ACTIVE, streamStore.getState(scope, stream, true, null, executor).join());
assertEquals(1, streamStore.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
// now set the state to SCALING
this.streamStore.setState(scope, stream, State.SCALING, null, executor).join();
// rerun same manual scaling job. It should succeed after simply resetting the state back to active.
scaleRequestHandler.execute(event).join();
// verify that state is reset
assertEquals(State.ACTIVE, streamStore.getState(scope, stream, true, null, executor).join());
assertEquals(1, streamStore.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
// rerun same manual scaling job. This time it should not do anything at all.
scaleRequestHandler.execute(event).join();
// run scale 2.. this time auto scale
ScaleOpEvent event2 = new ScaleOpEvent(scope, stream, Lists.newArrayList(one), newRange, false, System.currentTimeMillis(), System.currentTimeMillis());
scaleRequestHandler.execute(event2).join();
this.streamStore.setState(scope, stream, State.SCALING, null, executor).join();
// rerun same auto scaling job.
scaleRequestHandler.execute(event2).join();
assertEquals(State.ACTIVE, streamStore.getState(scope, stream, true, null, executor).join());
assertEquals(2, streamStore.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
// now set the state to SCALING and run a new scaling job. This should succeed.
this.streamStore.setState(scope, stream, State.SCALING, null, executor).join();
long two = NameUtils.computeSegmentId(2, 2);
ScaleOpEvent event3 = new ScaleOpEvent(scope, stream, Lists.newArrayList(two), newRange, false, System.currentTimeMillis(), System.currentTimeMillis());
scaleRequestHandler.execute(event3).join();
assertEquals(State.ACTIVE, streamStore.getState(scope, stream, true, null, executor).join());
assertEquals(3, streamStore.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testMigrateManualScaleRequestAfterRollingTxn.
@Test(timeout = 30000)
public void testMigrateManualScaleRequestAfterRollingTxn() throws Exception {
// This test checks a scenario where after rolling txn, if an outstanding scale request
// was present, its epoch consistency should fail
String stream = "newStream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 2)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
// 1 create transaction on old epoch and set it to committing
UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
UUID txnIdOldEpoch2 = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnData2 = streamStore.createTransaction(scope, stream, txnIdOldEpoch2, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnData2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(0, epochZero.getEpoch());
// 2. start scale
requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
// 3. verify that scale is complete
State state = streamStore.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, state);
// 4. just submit a new scale. don't let it run. this should create an epoch transition. state should still be active
streamStore.submitScale(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), System.currentTimeMillis(), null, null, executor).join();
// 5. commit on old epoch. this should roll over.
assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
assertEquals(TxnStatus.COMMITTED, txnStatus);
// 6. run scale against old record but with manual scale flag set to true. This should be migrated to new epoch and processed.
requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), true, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
state = streamStore.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, state);
EpochRecord epoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(4, epoch.getEpoch());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequest.
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testScaleRequest() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
// Send number of splits = 1
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
AutoScaleEvent scaleUpEvent = new AutoScaleEvent(scope, stream, 2, AutoScaleEvent.UP, System.currentTimeMillis(), 1, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleUpEvent, () -> false)));
// verify that one scaleOp event is written into the stream
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleOpEvent = (ScaleOpEvent) event;
double start = 2.0 / 3.0;
double end = 1.0;
double middle = (start + end) / 2;
assertEquals(2, scaleOpEvent.getNewRanges().size());
double delta = 0.0000000000001;
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(1).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(1).getValue(), delta);
assertEquals(1, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(2L));
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
// verify that the event is processed successfully
List<StreamSegmentRecord> activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == 2L));
// verify that two splits are created even when we sent 1 as numOfSplits in AutoScaleEvent.
long three = computeSegmentId(3, 1);
long four = computeSegmentId(4, 1);
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
// process first scale down event. it should only mark the segment as cold
AutoScaleEvent scaleDownEvent = new AutoScaleEvent(scope, stream, four, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent, () -> false)));
assertTrue(writer.queue.isEmpty());
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
assertTrue(streamStore.isCold(scope, stream, four, null, executor).join());
AutoScaleEvent scaleDownEvent2 = new AutoScaleEvent(scope, stream, three, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent2, () -> false)));
assertTrue(streamStore.isCold(scope, stream, three, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
scaleOpEvent = (ScaleOpEvent) event;
assertEquals(1, scaleOpEvent.getNewRanges().size());
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(2, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(three));
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(four));
// process scale down event
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
long five = computeSegmentId(5, 2);
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
// make it throw a non retryable failure so that test does not wait for number of retries.
// This will bring down the test duration drastically because a retryable failure can keep retrying for few seconds.
// And if someone changes retry durations and number of attempts in retry helper, it will impact this test's running time.
// hence sending incorrect segmentsToSeal list which will result in a non retryable failure and this will fail immediately
assertFalse(Futures.await(multiplexer.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(five), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false)));
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
assertFalse(Futures.await(multiplexer.process(new AbortEvent(scope, stream, 0, UUID.randomUUID(), 11L), () -> false)));
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleWithTransactionRequest.
@Test(timeout = 30000)
public void testScaleWithTransactionRequest() throws InterruptedException {
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
// 1 create transaction on old epoch and set it to committing
UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(0, epochZero.getEpoch());
// 2. start scale
requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L, 1L, 2L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
// 3. verify that scale is complete
State state = streamStore.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, state);
EpochRecord epochOne = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(1, epochOne.getEpoch());
// 4. create transaction -> verify that this is created on new epoch
UUID txnIdNewEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnDataNew = streamStore.createTransaction(scope, stream, txnIdNewEpoch, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnDataNew.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// 5. commit on old epoch. this should roll over
assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
assertEquals(TxnStatus.COMMITTED, txnStatus);
EpochRecord epochTwo = streamStore.getEpoch(scope, stream, 2, null, executor).join();
EpochRecord epochThree = streamStore.getEpoch(scope, stream, 3, null, executor).join();
assertEquals(0, epochTwo.getReferenceEpoch());
assertEquals(epochZero.getSegments().size(), epochTwo.getSegments().size());
assertEquals(epochZero.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()), epochTwo.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()));
assertEquals(1, epochThree.getReferenceEpoch());
assertEquals(epochOne.getSegments().size(), epochThree.getSegments().size());
assertEquals(epochOne.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()), epochThree.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()));
EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(epochThree, activeEpoch);
// 6. commit on new epoch. This should happen on duplicate of new epoch successfully
assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnDataNew.getEpoch()))));
txnStatus = streamStore.transactionStatus(scope, stream, txnIdNewEpoch, null, executor).join();
assertEquals(TxnStatus.COMMITTED, txnStatus);
activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(epochThree, activeEpoch);
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class RequestSweeperTest method testRequestSweeper.
@Test(timeout = 30000)
public void testRequestSweeper() throws ExecutionException, InterruptedException {
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
CompletableFuture<Void> wait1 = new CompletableFuture<>();
CompletableFuture<Void> wait2 = new CompletableFuture<>();
LinkedBlockingQueue<CompletableFuture<Void>> waitQueue = new LinkedBlockingQueue<>();
waitQueue.put(wait1);
waitQueue.put(wait2);
CompletableFuture<Void> signal1 = new CompletableFuture<>();
CompletableFuture<Void> signal2 = new CompletableFuture<>();
LinkedBlockingQueue<CompletableFuture<Void>> signalQueue = new LinkedBlockingQueue<>();
signalQueue.put(signal1);
signalQueue.put(signal2);
doAnswer(x -> {
signalQueue.take().complete(null);
return waitQueue.take();
}).when(requestEventWriter).writeEvent(any(), any());
streamMetadataTasks.manualScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), System.currentTimeMillis(), 0L);
signal1.join();
// since we dont complete writeEventFuture, manual scale will not complete and index is not removed
// verify that index has the entry.
HostIndex hostIndex = getHostIndex();
List<String> entities = hostIndex.getEntities(HOSTNAME).join();
assertEquals(1, entities.size());
byte[] data = hostIndex.getEntityData(HOSTNAME, entities.get(0)).join();
ControllerEventSerializer serializer = new ControllerEventSerializer();
ControllerEvent event = serializer.fromByteBuffer(ByteBuffer.wrap(data));
assertTrue(event instanceof ScaleOpEvent);
RequestSweeper requestSweeper = new RequestSweeper(streamStore, executor, streamMetadataTasks);
CompletableFuture<Void> failoverFuture = requestSweeper.handleFailedProcess(HOSTNAME);
// verify that the event is posted.. signal 2 future should be completed.
signal2.join();
// let wait2 be complete as well.
wait2.complete(null);
// wait for failover to complete
failoverFuture.join();
// verify that entity is removed.
entities = hostIndex.getEntities(HOSTNAME).join();
assertTrue(entities.isEmpty());
// verify that the host is removed.
Set<String> hosts = hostIndex.getHosts().join();
assertTrue(hosts.isEmpty());
}
Aggregations