use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class StreamMetadataTasks method manualScale.
/**
* Helper method to perform scale operation against an scale request.
* This method posts a request in the request stream and then starts the scale operation while
* tracking it's progress. Eventually, after scale completion, it sends a response to the caller.
*
* @param scope scope.
* @param stream stream name.
* @param segmentsToSeal segments to be sealed.
* @param newRanges key ranges for new segments.
* @param scaleTimestamp scaling time stamp.
* @param requestId request id
* @return returns the newly created segments.
*/
public CompletableFuture<ScaleResponse> manualScale(String scope, String stream, List<Long> segmentsToSeal, List<Map.Entry<Double, Double>> newRanges, long scaleTimestamp, long requestId) {
final OperationContext context = streamMetadataStore.createStreamContext(scope, stream, requestId);
ScaleOpEvent event = new ScaleOpEvent(scope, stream, segmentsToSeal, newRanges, true, scaleTimestamp, requestId);
return eventHelperFuture.thenCompose(eventHelper -> eventHelper.addIndexAndSubmitTask(event, () -> streamMetadataStore.submitScale(scope, stream, segmentsToSeal, new ArrayList<>(newRanges), scaleTimestamp, null, context, executor)).handle((startScaleResponse, e) -> {
ScaleResponse.Builder response = ScaleResponse.newBuilder();
if (e != null) {
Throwable cause = Exceptions.unwrap(e);
if (cause instanceof EpochTransitionOperationExceptions.PreConditionFailureException) {
response.setStatus(ScaleResponse.ScaleStreamStatus.PRECONDITION_FAILED);
} else {
log.error(requestId, "Scale for stream {}/{} failed with exception {}", scope, stream, cause);
response.setStatus(ScaleResponse.ScaleStreamStatus.FAILURE);
}
} else {
log.info(requestId, "scale for stream {}/{} started successfully", scope, stream);
response.setStatus(ScaleResponse.ScaleStreamStatus.STARTED);
response.addAllSegments(startScaleResponse.getObject().getNewSegmentsWithRange().entrySet().stream().map(segment -> convert(scope, stream, segment)).collect(Collectors.toList()));
response.setEpoch(startScaleResponse.getObject().getActiveEpoch());
}
return response.build();
}));
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ControllerEventProcessorTest method testCommitAndStreamProcessorFairness.
@Test(timeout = 30000)
public void testCommitAndStreamProcessorFairness() {
List<VersionedTransactionData> txnDataList1 = createAndCommitTransactions(3);
int epoch = txnDataList1.get(0).getEpoch();
streamStore.startCommitTransactions(SCOPE, STREAM, 100, null, executor).join();
EventStreamWriterMock<ControllerEvent> requestEventWriter = new EventStreamWriterMock<>();
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), null, null, null, null, null, null, null, streamStore, null, executor);
// set some processor name so that the processing gets postponed
streamStore.createWaitingRequestIfAbsent(SCOPE, STREAM, "test", null, executor).join();
AssertExtensions.assertFutureThrows("Operation should be disallowed", commitEventProcessor.processEvent(new CommitEvent(SCOPE, STREAM, epoch)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(SCOPE, STREAM, "test1", null, executor).join();
assertEquals("test", streamStore.getWaitingRequestProcessor(SCOPE, STREAM, null, executor).join());
// now remove the barrier but change the state so that processing can not happen.
streamStore.deleteWaitingRequestConditionally(SCOPE, STREAM, "test", null, executor).join();
assertNull(streamStore.getWaitingRequestProcessor(SCOPE, STREAM, null, executor).join());
streamStore.setState(SCOPE, STREAM, State.SCALING, null, executor).join();
AssertExtensions.assertFutureThrows("Operation should be disallowed", commitEventProcessor.processEvent(new CommitEvent(SCOPE, STREAM, epoch)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
assertEquals(commitEventProcessor.getProcessorName(), streamStore.getWaitingRequestProcessor(SCOPE, STREAM, null, executor).join());
streamStore.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
// verify that we are able to process if the waiting processor name is same as ours.
commitEventProcessor.processEvent(new CommitEvent(SCOPE, STREAM, epoch)).join();
// verify that waiting processor is cleaned up.
assertNull(streamStore.getWaitingRequestProcessor(SCOPE, STREAM, null, executor).join());
// now set the state to COMMITTING_TXN and try the same with scaling
streamStore.setState(SCOPE, STREAM, State.COMMITTING_TXN, null, executor).join();
// verify that event that does not try to use `processor.withCompletion` runs without contention
assertTrue(Futures.await(streamRequestHandler.processEvent(new AutoScaleEvent(SCOPE, STREAM, 0L, AutoScaleEvent.UP, 0L, 2, true, 0L))));
// now same event's processing in face of a barrier should get postponed
streamStore.createWaitingRequestIfAbsent(SCOPE, STREAM, commitEventProcessor.getProcessorName(), null, executor).join();
assertTrue(Futures.await(streamRequestHandler.processEvent(new AutoScaleEvent(SCOPE, STREAM, 0L, AutoScaleEvent.UP, 0L, 2, true, 0L))));
AssertExtensions.assertFutureThrows("Operation should be disallowed", streamRequestHandler.processEvent(new ScaleOpEvent(SCOPE, STREAM, Collections.emptyList(), Collections.emptyList(), false, 0L, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
assertEquals(commitEventProcessor.getProcessorName(), streamStore.getWaitingRequestProcessor(SCOPE, STREAM, null, executor).join());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class StreamMetadataTasksTest method truncateStreamTest.
@Test(timeout = 30000)
public void truncateStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(1L), newRanges, 30, 0L).get();
assertTrue(scaleOpResult.getStatus().equals(ScaleStreamStatus.STARTED));
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
assertTrue(Futures.await(scaleTask.execute((ScaleOpEvent) requestEventWriter.eventQueue.take())));
// start truncation
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertFalse(truncProp.isUpdating());
// 1. happy day test
// update.. should succeed
Map<Long, Long> streamCut = new HashMap<>();
streamCut.put(0L, 1L);
streamCut.put(1L, 11L);
CompletableFuture<UpdateStreamStatus.Status> truncateFuture = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateFuture.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut));
assertTrue(truncProp.getStreamCut().equals(streamCut));
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, "test", State.SCALING, null, executor).get();
// call update should fail without posting the event
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
Map<Long, Long> streamCut2 = new HashMap<>();
streamCut2.put(0L, 1L);
streamCut2.put(two, 1L);
streamCut2.put(three, 1L);
streamMetadataTasks.truncateStream(SCOPE, "test", streamCut2, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> Futures.delayedFuture(() -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor), 1000, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
TruncateStreamTask truncateStreamTask = new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
TruncateStreamEvent taken = (TruncateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", truncateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(truncateStreamTask.execute(taken)));
// 3. multiple back to back updates.
Map<Long, Long> streamCut3 = new HashMap<>();
streamCut3.put(0L, 12L);
streamCut3.put(two, 12L);
streamCut3.put(three, 12L);
CompletableFuture<UpdateStreamStatus.Status> truncateOp1 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut3, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && truncProp.isUpdating());
// post the second update request. This should fail here itself as previous one has started.
Map<Long, Long> streamCut4 = new HashMap<>();
streamCut4.put(0L, 14L);
streamCut4.put(two, 14L);
streamCut4.put(three, 14L);
CompletableFuture<UpdateStreamStatus.Status> truncateOpFuture2 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut4, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, truncateOpFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateOp1.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && !truncProp.isUpdating());
streamStorePartialMock.setState(SCOPE, "test", State.TRUNCATING, null, executor).join();
TruncateStreamEvent event = new TruncateStreamEvent(SCOPE, "test", System.nanoTime());
assertTrue(Futures.await(truncateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
truncateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, "test", true, null, executor).join());
doReturn(CompletableFuture.completedFuture(true)).when(streamStorePartialMock).isScopeSealed(anyString(), any(), any());
CompletableFuture<CreateStreamResponse> streamResponse = streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor);
CreateStreamResponse.CreateStatus s = streamResponse.get().getStatus();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, streamResponse.get().getStatus());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class PravegaTablesScaleRequestHandlerTest method testEpochMigration.
@Test(timeout = 30000)
public void testEpochMigration() throws ExecutionException, InterruptedException {
final String scope = "scopeEpoch";
streamStore.createScope(scope, null, executor).get();
final String testStream = "streamEpoch";
final String epoch0Key = "epochRecord-0";
long creationTime = System.currentTimeMillis();
StreamSegmentRecord segRecord = new StreamSegmentRecord(0, 0, creationTime, 0.0, 1.0);
EpochRecord firstEpochInOldFormat = new EpochRecord(0, 0, ImmutableList.of(segRecord), creationTime, EpochRecord.DEFAULT_COUNT_VALUE, EpochRecord.DEFAULT_COUNT_VALUE);
VersionedMetadata<EpochRecord> expectedEpochRecord = new VersionedMetadata<>(firstEpochInOldFormat, new Version.IntVersion(0));
doReturn(CompletableFuture.completedFuture(expectedEpochRecord)).when(storeHelper).getCachedOrLoad(anyString(), eq(epoch0Key), any(), anyLong(), anyLong());
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.createStream(scope, testStream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(scope, testStream, State.ACTIVE, null, executor).join();
assertEquals(firstEpochInOldFormat, streamStore.getEpoch(scope, testStream, 0, null, executor).join());
ArrayList<Map.Entry<Double, Double>> newRange = new ArrayList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
// start with manual scale
ScaleOpEvent event = new ScaleOpEvent(scope, testStream, Lists.newArrayList(0L), newRange, true, System.currentTimeMillis(), System.currentTimeMillis());
streamStore.submitScale(scope, testStream, Lists.newArrayList(0L), new ArrayList<>(newRange), System.currentTimeMillis(), null, null, executor).join();
// perform scaling
scaleRequestHandler.execute(event).join();
assertEquals(State.ACTIVE, streamStore.getState(scope, testStream, true, null, executor).join());
assertEquals(1, streamStore.getActiveEpoch(scope, testStream, null, true, executor).join().getEpoch());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentIdenticalScaleRun.
private void concurrentIdenticalScaleRun(String stream, String func, boolean isManual, Predicate<Throwable> firstExceptionPredicate, boolean expectFailureOnSecondJob, Predicate<Throwable> secondExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, func, signal, wait);
// the processing will stall at start scale
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
if (!expectFailureOnSecondJob) {
scaleRequestHandler2.execute(event).join();
} else {
AssertExtensions.assertSuppliedFutureThrows("second job should fail", () -> scaleRequestHandler2.execute(event), secondExceptionPredicate);
}
// verify that scale is complete
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(2, getVersionNumber(versioned));
assertEquals(1, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
Aggregations