use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequest.
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testScaleRequest() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
// Send number of splits = 1
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
AutoScaleEvent scaleUpEvent = new AutoScaleEvent(scope, stream, 2, AutoScaleEvent.UP, System.currentTimeMillis(), 1, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleUpEvent, () -> false)));
// verify that one scaleOp event is written into the stream
assertEquals(1, writer.queue.size());
ControllerEvent event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
ScaleOpEvent scaleOpEvent = (ScaleOpEvent) event;
double start = 2.0 / 3.0;
double end = 1.0;
double middle = (start + end) / 2;
assertEquals(2, scaleOpEvent.getNewRanges().size());
double delta = 0.0000000000001;
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(middle, scaleOpEvent.getNewRanges().get(1).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(1).getValue(), delta);
assertEquals(1, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(2L));
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
// verify that the event is processed successfully
List<StreamSegmentRecord> activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == 2L));
// verify that two splits are created even when we sent 1 as numOfSplits in AutoScaleEvent.
long three = computeSegmentId(3, 1);
long four = computeSegmentId(4, 1);
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
// process first scale down event. it should only mark the segment as cold
AutoScaleEvent scaleDownEvent = new AutoScaleEvent(scope, stream, four, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent, () -> false)));
assertTrue(writer.queue.isEmpty());
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.size() == 4);
assertTrue(streamStore.isCold(scope, stream, four, null, executor).join());
AutoScaleEvent scaleDownEvent2 = new AutoScaleEvent(scope, stream, three, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false, System.currentTimeMillis());
assertTrue(Futures.await(multiplexer.process(scaleDownEvent2, () -> false)));
assertTrue(streamStore.isCold(scope, stream, three, null, executor).join());
// verify that a new event has been posted
assertEquals(1, writer.queue.size());
event = writer.queue.take();
assertTrue(event instanceof ScaleOpEvent);
scaleOpEvent = (ScaleOpEvent) event;
assertEquals(1, scaleOpEvent.getNewRanges().size());
assertEquals(start, scaleOpEvent.getNewRanges().get(0).getKey(), delta);
assertEquals(end, scaleOpEvent.getNewRanges().get(0).getValue(), delta);
assertEquals(2, scaleOpEvent.getSegmentsToSeal().size());
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(three));
assertTrue(scaleOpEvent.getSegmentsToSeal().contains(four));
// process scale down event
assertTrue(Futures.await(multiplexer.process(scaleOpEvent, () -> false)));
long five = computeSegmentId(5, 2);
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
// make it throw a non retryable failure so that test does not wait for number of retries.
// This will bring down the test duration drastically because a retryable failure can keep retrying for few seconds.
// And if someone changes retry durations and number of attempts in retry helper, it will impact this test's running time.
// hence sending incorrect segmentsToSeal list which will result in a non retryable failure and this will fail immediately
assertFalse(Futures.await(multiplexer.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(five), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false)));
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == three));
assertTrue(activeSegments.stream().noneMatch(z -> z.segmentId() == four));
assertTrue(activeSegments.stream().anyMatch(z -> z.segmentId() == five));
assertTrue(activeSegments.size() == 3);
assertFalse(Futures.await(multiplexer.process(new AbortEvent(scope, stream, 0, UUID.randomUUID(), 11L), () -> false)));
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasks method sealTxnBody.
/**
* Seals a txn and transitions it to COMMITTING (resp. ABORTING) state if commit param is true (resp. false).
*
* Post-condition:
* 1. If seal completes successfully, then
* (a) txn state is COMMITTING/ABORTING,
* (b) CommitEvent/AbortEvent is present in the commit stream/abort stream,
* (c) txn is removed from host-txn index,
* (d) txn is removed from the timeout service.
*
* 2. If process fails after transitioning txn to COMMITTING/ABORTING state, but before responding to client, then
* since txn is present in the host-txn index, some other controller process shall put CommitEvent/AbortEvent to
* commit stream/abort stream.
*
* @param host host id. It is different from hostId iff invoked from TxnSweeper for aborting orphaned txn.
* @param scope scope name.
* @param stream stream name.
* @param commit boolean indicating whether to commit txn.
* @param txnId txn id.
* @param version expected version of txn node in store.
* @param ctx context.
* @return Txn status after sealing it.
*/
CompletableFuture<TxnStatus> sealTxnBody(final String host, final String scope, final String stream, final boolean commit, final UUID txnId, final Integer version, final OperationContext ctx) {
TxnResource resource = new TxnResource(scope, stream, txnId);
Optional<Integer> versionOpt = Optional.ofNullable(version);
// Step 1. Add txn to current host's index, if it is not already present
CompletableFuture<Void> addIndex = host.equals(hostId) && !timeoutService.containsTxn(scope, stream, txnId) ? // then txn would no longer be open.
streamMetadataStore.addTxnToIndex(hostId, resource, Integer.MAX_VALUE) : CompletableFuture.completedFuture(null);
addIndex.whenComplete((v, e) -> {
if (e != null) {
log.debug("Txn={}, already present/newly added to host-txn index of host={}", txnId, hostId);
} else {
log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
}
});
// Step 2. Seal txn
CompletableFuture<AbstractMap.SimpleEntry<TxnStatus, Integer>> sealFuture = addIndex.thenComposeAsync(x -> streamMetadataStore.sealTransaction(scope, stream, txnId, commit, versionOpt, ctx, executor), executor).whenComplete((v, e) -> {
if (e != null) {
log.debug("Txn={}, failed sealing txn", txnId);
} else {
log.debug("Txn={}, sealed successfully, commit={}", txnId, commit);
}
});
// Step 3. write event to corresponding stream.
return sealFuture.thenComposeAsync(pair -> {
TxnStatus status = pair.getKey();
switch(status) {
case COMMITTING:
return writeCommitEvent(scope, stream, pair.getValue(), txnId, status);
case ABORTING:
return writeAbortEvent(scope, stream, pair.getValue(), txnId, status);
case ABORTED:
case COMMITTED:
return CompletableFuture.completedFuture(status);
case OPEN:
case UNKNOWN:
default:
// exception would be thrown.
return CompletableFuture.completedFuture(status);
}
}, executor).thenComposeAsync(status -> {
// Step 4. Remove txn from timeoutService, and from the index.
timeoutService.removeTxn(scope, stream, txnId);
log.debug("Txn={}, removed from timeout service", txnId);
return streamMetadataStore.removeTxnFromIndex(host, resource, true).whenComplete((v, e) -> {
if (e != null) {
log.debug("Txn={}, failed removing txn from host-txn index of host={}", txnId, hostId);
} else {
log.debug("Txn={}, removed txn from host-txn index of host={}", txnId, hostId);
}
}).thenApply(x -> status);
}, executor);
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasks method writeAbortEvent.
CompletableFuture<TxnStatus> writeAbortEvent(String scope, String stream, int epoch, UUID txnId, TxnStatus status) {
String key = txnId.toString();
AbortEvent event = new AbortEvent(scope, stream, epoch, txnId);
return TaskStepsRetryHelper.withRetries(() -> writeEvent(abortEventEventStreamWriter, abortStreamName, key, event, txnId, status), executor);
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamMetadataTasksTest method sealStreamWithTxnTest.
@Test(timeout = 30000)
public void sealStreamWithTxnTest() throws Exception {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String streamWithTxn = "streamWithTxn";
// region seal a stream with transactions
long start = System.currentTimeMillis();
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, streamWithTxn, config, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, streamWithTxn, State.ACTIVE, null, executor).get();
// create txn
VersionedTransactionData openTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData committingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData abortingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
// set transaction to committing
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, committingTxn.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// set transaction to aborting
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// Mock getActiveTransactions call such that we return committing txn as OPEN txn.
Map<UUID, ActiveTxnRecord> activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
Map<UUID, ActiveTxnRecord> retVal = activeTxns.entrySet().stream().map(tx -> {
if (!tx.getValue().getTxnStatus().equals(TxnStatus.OPEN) && !tx.getValue().getTxnStatus().equals(TxnStatus.ABORTING)) {
ActiveTxnRecord txRecord = tx.getValue();
return new AbstractMap.SimpleEntry<>(tx.getKey(), new ActiveTxnRecord(txRecord.getTxCreationTimestamp(), txRecord.getLeaseExpiryTime(), txRecord.getMaxExecutionExpiryTime(), TxnStatus.OPEN));
} else {
return tx;
}
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
List<AbortEvent> abortListBefore = abortWriter.getEventList();
streamMetadataTasks.sealStream(SCOPE, streamWithTxn, 0L);
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
requestEventWriter.eventQueue.take();
reset(streamStorePartialMock);
// verify that the txn status is set to aborting
VersionedTransactionData txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.ABORTING);
assertEquals(requestEventWriter.getEventQueue().size(), 1);
// verify that events are posted for the abort txn.
List<AbortEvent> abortListAfter = abortWriter.getEventList();
assertEquals(abortListAfter.size(), abortListBefore.size() + 2);
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(openTxn.getId())));
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(abortingTxn.getId())));
txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.COMMITTING);
// Mock getActiveTransactions call such that we return some non existent transaction id so that DataNotFound is simulated.
// returning a random transaction with list of active txns such that when its abort is attempted, Data Not Found Exception gets thrown
retVal = new HashMap<>();
retVal.put(UUID.randomUUID(), new ActiveTxnRecord(1L, 1L, 1L, TxnStatus.OPEN));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
reset(streamStorePartialMock);
// Now complete all existing transactions and verify that seal completes
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), null, executor).join();
((AbstractStreamMetadataStore) streamStorePartialMock).commitTransaction(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
assertTrue(activeTxns.isEmpty());
assertTrue(Futures.await(processEvent(requestEventWriter)));
// endregion
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method writerRoutingKeyTest.
@Test(timeout = 10000)
public void writerRoutingKeyTest() throws InterruptedException {
StreamMetadataStore streamStoreMock = StreamStoreFactory.createZKStore(zkClient, executor);
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
streamStore.createScope(SCOPE, null, executor).join();
streamStore.createStream(SCOPE, STREAM, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), 1L, null, executor).join();
streamStore.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
TestEventStreamWriter<CommitEvent> commitWriter = new TestEventStreamWriter<>();
TestEventStreamWriter<AbortEvent> abortWriter = new TestEventStreamWriter<>();
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
UUID txnId = UUID.randomUUID();
txnTasks.writeAbortEvent(SCOPE, STREAM, 0, txnId, TxnStatus.ABORTING, 0L).join();
Pair<String, AbortEvent> request = abortWriter.requestsReceived.take();
assertEquals(request.getKey(), request.getValue().getKey());
txnTasks.writeAbortEvent(new AbortEvent(SCOPE, STREAM, 0, txnId, 10L)).join();
Pair<String, AbortEvent> request2 = abortWriter.requestsReceived.take();
assertEquals(request2.getKey(), request2.getValue().getKey());
// verify that both use the same key
assertEquals(request.getKey(), request2.getKey());
txnTasks.writeCommitEvent(SCOPE, STREAM, 0, txnId, TxnStatus.COMMITTING, 0L).join();
Pair<String, CommitEvent> request3 = commitWriter.requestsReceived.take();
assertEquals(request3.getKey(), request3.getValue().getKey());
txnTasks.writeCommitEvent(new CommitEvent(SCOPE, STREAM, 0)).join();
Pair<String, CommitEvent> request4 = commitWriter.requestsReceived.take();
assertEquals(request4.getKey(), request4.getValue().getKey());
// verify that both use the same key
assertEquals(request3.getKey(), request4.getKey());
}
Aggregations