use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataTasksTest method manualScaleTest.
@Test(timeout = 30000)
public void manualScaleTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(0L), newRanges, 30, 0L).get();
assertEquals(ScaleStreamStatus.STARTED, scaleOpResult.getStatus());
OperationContext context = streamStorePartialMock.createStreamContext(SCOPE, "test", 0L);
assertEquals(streamStorePartialMock.getState(SCOPE, "test", false, context, executor).get(), State.ACTIVE);
// Now when runScale runs even after that we should get the state as active.
VersionedMetadata<EpochTransitionRecord> response = streamStorePartialMock.submitScale(SCOPE, "test", Collections.singletonList(0L), new LinkedList<>(newRanges), 30, null, null, executor).get();
assertEquals(response.getObject().getActiveEpoch(), 0);
VersionedMetadata<State> versionedState = streamStorePartialMock.getVersionedState(SCOPE, "test", context, executor).get();
assertEquals(versionedState.getObject(), State.ACTIVE);
// if we call start scale without scale being set to SCALING, this should throw illegal argument exception
AssertExtensions.assertThrows("", () -> streamStorePartialMock.startScale(SCOPE, "test", true, response, versionedState, context, executor).get(), ex -> Exceptions.unwrap(ex) instanceof IllegalArgumentException);
ScaleOperationTask task = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
task.runScale((ScaleOpEvent) requestEventWriter.getEventQueue().take(), true, context).get();
Map<Long, Map.Entry<Double, Double>> segments = response.getObject().getNewSegmentsWithRange();
assertTrue(segments.entrySet().stream().anyMatch(x -> x.getKey() == computeSegmentId(1, 1) && AssertExtensions.nearlyEquals(x.getValue().getKey(), 0.0, 0) && AssertExtensions.nearlyEquals(x.getValue().getValue(), 0.5, 0)));
assertTrue(segments.entrySet().stream().anyMatch(x -> x.getKey() == computeSegmentId(2, 1) && AssertExtensions.nearlyEquals(x.getValue().getKey(), 0.5, 0) && AssertExtensions.nearlyEquals(x.getValue().getValue(), 1.0, 0)));
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataTasksTest method checkScaleCompleteTest.
@Test(timeout = 10000)
public void checkScaleCompleteTest() throws ExecutionException, InterruptedException {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
String test = "testCheckScale";
streamStorePartialMock.createStream(SCOPE, test, configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, test, State.ACTIVE, null, executor).get();
List<Map.Entry<Double, Double>> newRanges = Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0));
streamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
// region scale
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, test, Collections.singletonList(0L), newRanges, 30, 0L).get();
assertEquals(ScaleStreamStatus.STARTED, scaleOpResult.getStatus());
streamStorePartialMock.setState(SCOPE, test, State.SCALING, null, executor).join();
Controller.ScaleStatusResponse scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.IN_PROGRESS, scaleStatusResult.getStatus());
// perform scale steps and check scale after each step
VersionedMetadata<EpochTransitionRecord> etr = streamStorePartialMock.getEpochTransition(SCOPE, test, null, executor).join();
streamStorePartialMock.scaleCreateNewEpochs(SCOPE, test, etr, null, executor).join();
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.IN_PROGRESS, scaleStatusResult.getStatus());
streamStorePartialMock.scaleSegmentsSealed(SCOPE, test, Collections.singletonMap(0L, 0L), etr, null, executor).join();
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.IN_PROGRESS, scaleStatusResult.getStatus());
streamStorePartialMock.completeScale(SCOPE, test, etr, null, executor).join();
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.IN_PROGRESS, scaleStatusResult.getStatus());
streamStorePartialMock.setState(SCOPE, test, State.ACTIVE, null, executor).join();
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.SUCCESS, scaleStatusResult.getStatus());
// start another scale
scaleOpResult = streamMetadataTasks.manualScale(SCOPE, test, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), newRanges, 30, 0L).get();
assertEquals(ScaleStreamStatus.STARTED, scaleOpResult.getStatus());
streamStorePartialMock.setState(SCOPE, test, State.SCALING, null, executor).join();
// even now we should get success for epoch 0
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 0, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.SUCCESS, scaleStatusResult.getStatus());
scaleStatusResult = streamMetadataTasks.checkScale(SCOPE, test, 1, 0L).get();
assertEquals(Controller.ScaleStatusResponse.ScaleStatus.IN_PROGRESS, scaleStatusResult.getStatus());
// endregion
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataTasksTest method setup.
@Before
public void setup() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
StreamMetrics.initialize();
TransactionMetrics.initialize();
StreamMetadataStore streamStore = getStore();
// create a partial mock.
streamStorePartialMock = spy(streamStore);
ImmutableMap<BucketStore.ServiceType, Integer> map = ImmutableMap.of(BucketStore.ServiceType.RetentionService, 1, BucketStore.ServiceType.WatermarkingService, 1);
bucketStore = StreamStoreFactory.createInMemoryBucketStore(map);
kvtStore = spy(getKvtStore());
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
EventHelper helper = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = spy(new StreamMetadataTasks(streamStorePartialMock, bucketStore, taskMetadataStore, segmentHelperMock, executor, "host", new GrpcAuthHelper(authEnabled, "key", 300), helper));
EventHelper helperMock = EventHelperMock.getEventHelperMock(executor, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
kvtMetadataTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStorePartialMock, segmentHelperMock, executor, "host", new GrpcAuthHelper(authEnabled, "key", 300));
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStorePartialMock, executor), new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor), new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStorePartialMock, executor), new DeleteStreamTask(streamMetadataTasks, streamStorePartialMock, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStorePartialMock, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStorePartialMock, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStorePartialMock, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executor), executor);
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStorePartialMock, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null, requestTracker);
commitWriter = new EventStreamWriterMock<>();
abortWriter = new EventStreamWriterMock<>();
streamTransactionMetadataTasks.initializeStreamWriters(commitWriter, abortWriter);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
streamStorePartialMock.createScope(SCOPE, null, executor).join();
// stream1
long start = System.currentTimeMillis();
streamStorePartialMock.createStream(SCOPE, stream1, configuration1, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
VersionedMetadata<EpochTransitionRecord> response = streamStorePartialMock.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, null, null, executor).get();
VersionedMetadata<State> state = streamStorePartialMock.getVersionedState(SCOPE, stream1, null, executor).join();
state = streamStorePartialMock.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).join();
streamStorePartialMock.startScale(SCOPE, stream1, false, response, state, null, executor).join();
streamStorePartialMock.scaleCreateNewEpochs(SCOPE, stream1, response, null, executor).get();
streamStorePartialMock.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, null, executor).get();
streamStorePartialMock.completeScale(SCOPE, stream1, response, null, executor).join();
streamStorePartialMock.updateVersionedState(SCOPE, stream1, State.ACTIVE, state, null, executor).get();
// stream2
streamStorePartialMock.createStream(SCOPE, stream2, configuration1, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
streamStorePartialMock.createStream(SCOPE, stream3, configuration1, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class PersistentStreamBase method submitScale.
/**
* This method attempts to start a new scale workflow. For this it first computes epoch transition and stores it
* in the metadastore.
* This method can be called by manual scale or during the processing of auto-scale event. Which means there could be
* concurrent calls to this method.
*
* @param segmentsToSeal segments that will be sealed at the end of this scale operation.
* @param newRanges key ranges of new segments to be created
* @param scaleTimestamp scaling timestamp
* @return : list of newly created segments with current epoch
*/
@Override
public CompletableFuture<VersionedMetadata<EpochTransitionRecord>> submitScale(final List<Long> segmentsToSeal, final List<Map.Entry<Double, Double>> newRanges, final long scaleTimestamp, final VersionedMetadata<EpochTransitionRecord> existing, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return verifyNotSealed(context).thenCompose(v -> {
if (existing == null) {
return getEpochTransition(context);
} else {
return CompletableFuture.completedFuture(existing);
}
}).thenCompose(record -> getActiveEpochRecord(true, context).thenCompose(currentEpoch -> getConfiguration(context).thenCompose(config -> {
if (!record.getObject().equals(EpochTransitionRecord.EMPTY)) {
// and new ranges are identical). else throw scale conflict exception
if (!RecordHelper.verifyRecordMatchesInput(segmentsToSeal, newRanges, false, record.getObject())) {
log.debug(context.getRequestId(), "scale conflict, another scale operation is ongoing");
throw new EpochTransitionOperationExceptions.ConflictException();
}
return CompletableFuture.completedFuture(record);
} else {
// check input is valid and satisfies preconditions
if (!RecordHelper.canScaleFor(segmentsToSeal, currentEpoch)) {
return updateEpochTransitionNode(new VersionedMetadata<>(EpochTransitionRecord.EMPTY, record.getVersion()), context).thenApply(x -> {
log.warn(context.getRequestId(), "scale precondition failed {}", segmentsToSeal);
throw new EpochTransitionOperationExceptions.PreConditionFailureException();
});
}
if (!RecordHelper.validateInputRange(segmentsToSeal, newRanges, currentEpoch)) {
log.error(context.getRequestId(), "scale input invalid {} {}", segmentsToSeal, newRanges);
throw new EpochTransitionOperationExceptions.InputInvalidException();
}
int numberOfSegmentsPostScale = currentEpoch.getSegments().size() - segmentsToSeal.size() + newRanges.size();
if (numberOfSegmentsPostScale < config.getScalingPolicy().getMinNumSegments()) {
log.warn(context.getRequestId(), "Scale cannot be performed as Min Segment Count will not hold {} {}", segmentsToSeal, newRanges);
throw new EpochTransitionOperationExceptions.PreConditionFailureException();
}
EpochTransitionRecord epochTransition = RecordHelper.computeEpochTransition(currentEpoch, segmentsToSeal, newRanges, scaleTimestamp);
return updateEpochTransitionNode(new VersionedMetadata<>(epochTransition, record.getVersion()), context).thenApply(version -> {
log.info(context.getRequestId(), "scale for stream {}/{} accepted. Segments to seal = {}", scope, name, epochTransition.getSegmentsToSeal());
return new VersionedMetadata<>(epochTransition, version);
});
}
})));
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class PersistentStreamBase method scaleCreateNewEpoch.
@Override
public CompletableFuture<VersionedMetadata<EpochTransitionRecord>> scaleCreateNewEpoch(VersionedMetadata<EpochTransitionRecord> versionedMetadata, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
return getActiveEpochRecord(true, context).thenCompose(currentEpoch -> {
// only perform idempotent update. If update is already completed, do nothing.
if (currentEpoch.getEpoch() < versionedMetadata.getObject().getNewEpoch()) {
EpochTransitionRecord epochTransition = versionedMetadata.getObject();
// time
long time = Math.max(epochTransition.getTime(), currentEpoch.getCreationTime() + 1);
// new segments
ImmutableList.Builder<StreamSegmentRecord> newSegmentsBuilder = ImmutableList.builder();
epochTransition.getNewSegmentsWithRange().forEach((key, value) -> newSegmentsBuilder.add(newSegmentRecord(key, time, value.getKey(), value.getValue())));
// sealed segments
ImmutableList.Builder<StreamSegmentRecord> sealedSegmentsBuilder = ImmutableList.builder();
epochTransition.getSegmentsToSeal().forEach(x -> sealedSegmentsBuilder.add(currentEpoch.getSegment(x)));
// overall segments in epoch
ImmutableList.Builder<StreamSegmentRecord> builder = ImmutableList.builder();
currentEpoch.getSegments().forEach(x -> {
if (!epochTransition.getSegmentsToSeal().contains(x.segmentId())) {
builder.add(x);
}
});
ImmutableList<StreamSegmentRecord> newSegments = newSegmentsBuilder.build();
builder.addAll(newSegments);
ImmutableList<StreamSegmentRecord> newEpochSegments = builder.build();
// epoch record
return getSplitMergeCountsTillEpoch(currentEpoch, context).thenCompose(cumulativeSplitMergeCount -> {
EpochRecord epochRecord = new EpochRecord(epochTransition.getNewEpoch(), epochTransition.getNewEpoch(), newEpochSegments, time, getNewEpochSplitCount(cumulativeSplitMergeCount.getKey(), currentEpoch.getSegments(), newEpochSegments), getNewEpochMergeCount(cumulativeSplitMergeCount.getValue(), currentEpoch.getSegments(), newEpochSegments));
HistoryTimeSeriesRecord timeSeriesRecord = new HistoryTimeSeriesRecord(epochTransition.getNewEpoch(), epochTransition.getNewEpoch(), sealedSegmentsBuilder.build(), newSegments, epochRecord.getCreationTime());
return createEpochRecord(epochRecord, context).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecord, context)).thenCompose(x -> createSegmentSealedEpochRecords(epochTransition.getSegmentsToSeal(), epochTransition.getNewEpoch(), context)).thenApply(x -> versionedMetadata);
});
} else {
return CompletableFuture.completedFuture(versionedMetadata);
}
});
}
Aggregations