use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamMetadataStoreTest method concurrentStartScaleTest.
@Test(timeout = 30000)
public void concurrentStartScaleTest() throws Exception {
final String scope = "ScopeScale";
final String stream = "StreamScale1";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// set minimum number of segments to 1
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// region concurrent start scale
// Test scenario where one request starts and completes as the other is waiting on StartScale.createEpochTransition
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 1.0);
List<Long> segmentsToSeal = Arrays.asList(0L, 1L);
long scaleTs = System.currentTimeMillis();
@SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
PersistentStreamBase streamObjSpied = spy(streamObj);
CompletableFuture<Void> latch = new CompletableFuture<>();
CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
doAnswer(x -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
doAnswer(x -> CompletableFuture.runAsync(() -> {
VersionedMetadata<EpochTransitionRecord> argument = x.getArgument(0);
EpochTransitionRecord record = argument.getObject();
if (record.getSegmentsToSeal().containsAll(segmentsToSeal)) {
// wait until we create epoch transition outside of this method
updateEpochTransitionCalled.complete(null);
latch.join();
}
}).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
StreamOperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
// the following should be stuck at createEpochTransition
CompletableFuture<VersionedMetadata<EpochTransitionRecord>> response = store.submitScale(scope, stream, segmentsToSeal, Collections.singletonList(segment2), scaleTs, null, context, executor);
updateEpochTransitionCalled.join();
// create new epochs corresponding to new scale as the previous scale waits to create epoch transition record
SimpleEntry<Double, Double> segment2p = new SimpleEntry<>(0.0, 0.5);
List<Long> segmentsToSeal2 = Collections.singletonList(0L);
long scaleTs2 = System.currentTimeMillis();
streamObjSpied.getEpochRecord(0, context).thenCompose(epochRecord -> {
EpochTransitionRecord record = RecordHelper.computeEpochTransition(epochRecord, segmentsToSeal2, Collections.singletonList(segment2p), scaleTs2);
return streamObjSpied.getEpochTransition(context).thenCompose(existing -> streamObjSpied.updateEpochTransitionNode(new VersionedMetadata<>(record, existing.getVersion()), context)).thenApply(v -> new VersionedMetadata<>(record, v));
}).thenCompose(epochRecord -> store.getVersionedState(scope, stream, context, executor).thenCompose(state -> store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).thenCompose(updatedState -> store.startScale(scope, stream, false, epochRecord, updatedState, context, executor)).thenCompose(x -> store.scaleCreateNewEpochs(scope, stream, epochRecord, context, executor)).thenCompose(x -> store.scaleSegmentsSealed(scope, stream, segmentsToSeal2.stream().collect(Collectors.toMap(r -> r, r -> 0L)), epochRecord, context, executor)).thenCompose(x -> store.completeScale(scope, stream, epochRecord, context, executor)))).thenCompose(y -> store.setState(scope, stream, State.ACTIVE, context, executor)).join();
latch.complete(null);
// first scale should fail in attempting to update epoch transition record.
AssertExtensions.assertSuppliedFutureThrows("WriteConflict in start scale", () -> response, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
VersionedMetadata<EpochTransitionRecord> versioned = streamObj.getEpochTransition(context).join();
EpochTransitionRecord epochTransitionRecord = versioned.getObject();
assertEquals(EpochTransitionRecord.EMPTY, epochTransitionRecord);
// now that start scale succeeded, we should set the state to scaling.
VersionedMetadata<State> state = store.getVersionedState(scope, stream, context, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).join();
// now call first step of scaling -- createNewSegments. this should throw exception
AssertExtensions.assertFutureThrows("epoch transition was supposed to be invalid", store.startScale(scope, stream, false, versioned, state, context, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
// verify that state is reset to ACTIVE
assertEquals(State.ACTIVE, store.getState(scope, stream, true, context, executor).join());
// endregion
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method testCreateStream.
@Test(timeout = 30000L)
public void testCreateStream() {
OperationContext context = getContext();
PersistentStreamBase stream = createStream("scope", "stream", System.currentTimeMillis(), 2, 0);
assertEquals(State.ACTIVE, stream.getState(true, context).join());
EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
assertEquals(0, activeEpoch.getEpoch());
assertEquals(2, activeEpoch.getSegments().size());
VersionedMetadata<StreamTruncationRecord> truncationRecord = stream.getTruncationRecord(context).join();
assertEquals(StreamTruncationRecord.EMPTY, truncationRecord.getObject());
VersionedMetadata<EpochTransitionRecord> etr = stream.getEpochTransition(context).join();
assertEquals(EpochTransitionRecord.EMPTY, etr.getObject());
VersionedMetadata<CommittingTransactionsRecord> ctr = stream.getVersionedCommitTransactionsRecord(context).join();
assertEquals(CommittingTransactionsRecord.EMPTY, ctr.getObject());
assertEquals(activeEpoch, stream.getEpochRecord(0, context).join());
AssertExtensions.assertFutureThrows("", stream.getEpochRecord(1, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class StreamTestBase method createScaleAndRollStreamForMultiChunkTests.
// region multiple chunks test
private PersistentStreamBase createScaleAndRollStreamForMultiChunkTests(String name, String scope, int startingSegmentNumber, Supplier<Long> time) {
OperationContext context = getContext();
createScope(scope, context);
PersistentStreamBase stream = createStream(scope, name, time.get(), 5, startingSegmentNumber, 2, 2);
UUID txnId = createAndCommitTransaction(stream, 0, 0L);
// scale the stream 5 times so that over all we have 6 epochs and hence 3 chunks.
for (int i = 0; i < 5; i++) {
StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
Map<Long, Long> sealedSizeMap = new HashMap<>();
sealedSizeMap.put(first.segmentId(), 100L);
scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
}
EpochRecord activeEpoch = stream.getActiveEpoch(true, context).join();
// now roll transaction so that we have 2 more epochs added for overall 8 epochs and 4 chunks
Map<Long, Long> map1 = stream.getEpochRecord(0, context).join().getSegmentIds().stream().collect(Collectors.toMap(x -> computeSegmentId(NameUtils.getSegmentNumber(x), activeEpoch.getEpoch() + 1), x -> 100L));
Map<Long, Long> map2 = activeEpoch.getSegmentIds().stream().collect(Collectors.toMap(x -> x, x -> 100L));
rollTransactions(stream, time.get(), 0, activeEpoch.getEpoch(), map1, map2);
// scale the stream 5 times so that over all we have 13 epochs and hence 7 chunks.
for (int i = 0; i < 5; i++) {
StreamSegmentRecord first = stream.getActiveSegments(context).join().get(0);
ArrayList<Long> sealedSegments = Lists.newArrayList(first.segmentId());
List<Map.Entry<Double, Double>> newRanges = new LinkedList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(first.getKeyStart(), first.getKeyEnd()));
Map<Long, Long> sealedSizeMap = new HashMap<>();
sealedSizeMap.put(first.segmentId(), 100L);
scaleStream(stream, time.get(), sealedSegments, newRanges, sealedSizeMap);
}
return stream;
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class RequestHandlersTest method testScaleIgnoreFairness.
@Test
public void testScaleIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start scale --> this should fail with a retryable exception while talking to segment store!
ScaleOpEvent scaleEvent = new ScaleOpEvent(fairness, fairness, Collections.singletonList(0L), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// verify that scale was started
assertEquals(State.SCALING, streamStore.getState(fairness, fairness, true, null, executor).join());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(true)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(scaleEvent, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(fairness, fairness, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new scale. it should fail because of waiting processor.
ScaleOpEvent scaleEvent2 = new ScaleOpEvent(fairness, fairness, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.store.stream.records.EpochRecord in project pravega by pravega.
the class ControllerEventProcessorTest method testCommitEventForSealingStream.
@Test(timeout = 60000)
public void testCommitEventForSealingStream() {
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
SealStreamTask sealStreamTask = new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor);
String stream = "commitWithSeal";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamStore.createStream(SCOPE, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(SCOPE, stream, State.ACTIVE, null, executor).join();
UUID txnOnEpoch0 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
VersionedTransactionData txnData0 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch0, 10000, 10000, null, executor).join();
Assert.assertNotNull(txnData0);
checkTransactionState(SCOPE, stream, txnOnEpoch0, TxnStatus.OPEN);
streamStore.sealTransaction(SCOPE, stream, txnData0.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTING);
// scale stream
List<Map.Entry<Double, Double>> newRange = new LinkedList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
scaleTask.execute(new ScaleOpEvent(SCOPE, stream, Collections.singletonList(0L), newRange, false, System.currentTimeMillis(), 0L)).join();
UUID txnOnEpoch1 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
VersionedTransactionData txnData1 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch1, 10000, 10000, null, executor).join();
Assert.assertNotNull(txnData1);
checkTransactionState(SCOPE, stream, txnOnEpoch1, TxnStatus.OPEN);
streamStore.sealTransaction(SCOPE, stream, txnData1.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
// set the stream to SEALING
streamStore.setState(SCOPE, stream, State.SEALING, null, executor).join();
// attempt to seal the stream. This should fail with postponement.
AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
// now attempt to commit the transaction on epoch 1. epoch in commit event is ignored and transactions on lowest epoch
// should be committed first.
CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTED);
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
EpochRecord activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
// attempt to seal the stream. This should still fail with postponement.
AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
// now attempt to commit the transaction on epoch 1.
commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTED);
// verify transaction has rolled over
activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
// now attempt to seal the stream. it should complete.
sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)).join();
}
Aggregations