use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataTasks method checkScale.
/**
* Helper method to check if scale operation against an epoch completed or not.
*
* @param scope scope.
* @param stream stream name.
* @param epoch stream epoch.
* @param requestId request id.
* @return returns the newly created segments.
*/
public CompletableFuture<ScaleStatusResponse> checkScale(String scope, String stream, int epoch, long requestId) {
OperationContext context = streamMetadataStore.createStreamContext(scope, stream, requestId);
CompletableFuture<EpochRecord> activeEpochFuture = streamMetadataStore.getActiveEpoch(scope, stream, context, true, executor);
CompletableFuture<State> stateFuture = streamMetadataStore.getState(scope, stream, true, context, executor);
CompletableFuture<EpochTransitionRecord> etrFuture = streamMetadataStore.getEpochTransition(scope, stream, context, executor).thenApply(VersionedMetadata::getObject);
return CompletableFuture.allOf(stateFuture, activeEpochFuture, etrFuture).handle((r, ex) -> {
ScaleStatusResponse.Builder response = ScaleStatusResponse.newBuilder();
if (ex != null) {
Throwable e = Exceptions.unwrap(ex);
if (e instanceof StoreException.DataNotFoundException) {
response.setStatus(ScaleStatusResponse.ScaleStatus.INVALID_INPUT);
} else {
response.setStatus(ScaleStatusResponse.ScaleStatus.INTERNAL_ERROR);
}
} else {
EpochRecord activeEpoch = activeEpochFuture.join();
State state = stateFuture.join();
EpochTransitionRecord etr = etrFuture.join();
if (epoch > activeEpoch.getEpoch()) {
response.setStatus(ScaleStatusResponse.ScaleStatus.INVALID_INPUT);
} else if (activeEpoch.getEpoch() == epoch || activeEpoch.getReferenceEpoch() == epoch) {
response.setStatus(ScaleStatusResponse.ScaleStatus.IN_PROGRESS);
} else {
// has not completed.
if (epoch + 1 == activeEpoch.getReferenceEpoch() && state.equals(State.SCALING) && (etr.equals(EpochTransitionRecord.EMPTY) || etr.getNewEpoch() == activeEpoch.getEpoch())) {
response.setStatus(ScaleStatusResponse.ScaleStatus.IN_PROGRESS);
} else {
response.setStatus(ScaleStatusResponse.ScaleStatus.SUCCESS);
}
}
}
return response.build();
});
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class PersistentStreamBase method scaleOldSegmentsSealed.
@Override
public CompletableFuture<Void> scaleOldSegmentsSealed(Map<Long, Long> sealedSegmentSizes, VersionedMetadata<EpochTransitionRecord> record, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
EpochTransitionRecord epochTransition = record.getObject();
return Futures.toVoid(clearMarkers(epochTransition.getSegmentsToSeal(), context).thenCompose(x -> updateSealedSegmentSizes(sealedSegmentSizes, context)).thenCompose(x -> updateCurrentEpochRecord(epochTransition.getNewEpoch(), context)));
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentIdenticalScaleRun.
private void concurrentIdenticalScaleRun(String stream, String func, boolean isManual, Predicate<Throwable> firstExceptionPredicate, boolean expectFailureOnSecondJob, Predicate<Throwable> secondExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, func, signal, wait);
// the processing will stall at start scale
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
if (!expectFailureOnSecondJob) {
scaleRequestHandler2.execute(event).join();
} else {
AssertExtensions.assertSuppliedFutureThrows("second job should fail", () -> scaleRequestHandler2.execute(event), secondExceptionPredicate);
}
// verify that scale is complete
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(2, getVersionNumber(versioned));
assertEquals(1, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataTasksTest method updateStreamSegmentCountScalingPolicyTest.
@Test(timeout = 30000)
public void updateStreamSegmentCountScalingPolicyTest() throws Exception {
int initialSegments = streamStorePartialMock.getActiveSegments(SCOPE, stream1, null, executor).join().size();
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
// scaleup
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, initialSegments + 1)).build();
updateConfigVerifyScale(requestEventWriter, streamConfiguration, initialSegments + 1);
// now reduce the number of segments (=1). no scale should happen as we are already more than that.
streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
updateConfigVerifyScale(requestEventWriter, streamConfiguration, initialSegments + 1);
EpochRecord activeEpoch = streamStorePartialMock.getActiveEpoch(SCOPE, stream1, null, true, executor).join();
// now create an epoch transition record (store.submit scale)
VersionedMetadata<EpochTransitionRecord> etr = streamStorePartialMock.submitScale(SCOPE, stream1, new ArrayList<>(activeEpoch.getSegmentIds()), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
// update the stream. the epoch transition should be reset and should have no effect.
streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, initialSegments + 5)).build();
updateConfigVerifyScale(requestEventWriter, streamConfiguration, initialSegments + 5);
assertEquals(streamMetadataTasks.checkScale(SCOPE, stream1, etr.getObject().getActiveEpoch(), 0L).join().getStatus(), Controller.ScaleStatusResponse.ScaleStatus.SUCCESS);
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method scaleTest.
@Test(timeout = 30000)
public void scaleTest() throws Exception {
final String scope = "ScopeScale";
final String stream = "StreamScale";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// set minimum number of segments to 1 so that we can also test scale downs
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// region idempotent
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale1SealedSegments = Collections.singletonList(computeSegmentId(1, 0));
// 1. submit scale
VersionedMetadata<EpochTransitionRecord> empty = store.getEpochTransition(scope, stream, null, executor).join();
VersionedMetadata<EpochTransitionRecord> response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
Map<Long, Map.Entry<Double, Double>> scale1SegmentsCreated = response.getObject().getNewSegmentsWithRange();
final int scale1ActiveEpoch = response.getObject().getActiveEpoch();
assertEquals(0, scale1ActiveEpoch);
// rerun start scale with old epoch transition. should throw write conflict
AssertExtensions.assertSuppliedFutureThrows("", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, empty, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
// rerun start scale with null epoch transition, should be idempotent
response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
assertEquals(response.getObject().getNewSegmentsWithRange(), scale1SegmentsCreated);
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).get();
response = store.startScale(scope, stream, false, response, state, null, executor).join();
// 2. scale new segments created
store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
// rerun start scale and new segments created
response = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join();
assertEquals(response.getObject().getNewSegmentsWithRange(), scale1SegmentsCreated);
response = store.startScale(scope, stream, false, response, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
// 3. scale segments sealed -- this will complete scale
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).join();
store.completeScale(scope, stream, response, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// rerun -- idempotent
store.scaleCreateNewEpochs(scope, stream, response, null, executor).join();
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).join();
store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
// rerun submit scale -- should fail with precondition failure
VersionedMetadata<EpochTransitionRecord> etr = store.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, empty.getObject());
AssertExtensions.assertThrows("Submit scale with old data with old etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, empty, null, executor).join(), e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
AssertExtensions.assertThrows("Submit scale with old data with latest etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, etr, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
AssertExtensions.assertThrows("Submit scale with null etr", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
// endregion
// 2 different conflicting scale operations
// region run concurrent conflicting scale
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale2SealedSegments = Arrays.asList(computeSegmentId(0, 0), computeSegmentId(2, 1), computeSegmentId(3, 1));
long scaleTs2 = System.currentTimeMillis();
response = store.submitScale(scope, stream, scale2SealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs2, null, null, executor).get();
Map<Long, Map.Entry<Double, Double>> scale2SegmentsCreated = response.getObject().getNewSegmentsWithRange();
final int scale2ActiveEpoch = response.getObject().getActiveEpoch();
store.setState(scope, stream, State.SCALING, null, executor).get();
// rerun of scale 1 -- should fail with conflict
AssertExtensions.assertThrows("Concurrent conflicting scale", () -> store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment1, segment2), scaleTs, null, null, executor).join(), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.ConflictException);
store.scaleCreateNewEpochs(scope, stream, response, null, executor).get();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).get();
store.completeScale(scope, stream, response, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// endregion
// region concurrent submit scale requests
// run two concurrent runScale operations such that after doing a getEpochTransition, we create a new epoch
// transition node. We should get ScaleConflict in such a case.
// mock createEpochTransition
SimpleEntry<Double, Double> segment6 = new SimpleEntry<>(0.0, 1.0);
List<Long> scale3SealedSegments = Arrays.asList(computeSegmentId(4, 2), computeSegmentId(5, 2), computeSegmentId(6, 2));
long scaleTs3 = System.currentTimeMillis();
@SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
PersistentStreamBase streamObjSpied = spy(streamObj);
CompletableFuture<Void> latch = new CompletableFuture<>();
CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
doAnswer(x -> CompletableFuture.runAsync(() -> {
// wait until we create epoch transition outside of this method
updateEpochTransitionCalled.complete(null);
latch.join();
}).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
OperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
// the following should be stuck at createEpochTransition
CompletableFuture<VersionedMetadata<EpochTransitionRecord>> resp = store.submitScale(scope, stream, scale3SealedSegments, Collections.singletonList(segment6), scaleTs3, null, context, executor);
updateEpochTransitionCalled.join();
VersionedMetadata<EpochTransitionRecord> epochRecord = streamObj.getEpochTransition(context).join();
streamObj.updateEpochTransitionNode(new VersionedMetadata<>(EpochTransitionRecord.EMPTY, epochRecord.getVersion()), context).join();
latch.complete(null);
AssertExtensions.assertFutureThrows("", resp, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
// endregion
}
Aggregations