use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class RequestHandlersTest method scopeDeleteTest.
@Test
public void scopeDeleteTest() {
final String testScope = "testScope";
final String testStream = "testStream";
final String testRG = "_RGTestRG";
final String testKVT = "testKVT";
StreamMetadataStore streamStoreSpied = spy(getStore());
KVTableMetadataStore kvtStoreSpied = spy(getKvtStore());
StreamMetadataTasks streamMetadataTasks1 = mock(StreamMetadataTasks.class);
TableMetadataTasks kvtTasksMocked = mock(TableMetadataTasks.class);
streamStoreSpied.createScope(testScope, null, executor).join();
OperationContext ctx = new OperationContext() {
@Override
public long getOperationStartTime() {
return 0;
}
@Override
public long getRequestId() {
return 0;
}
};
UUID scopeId = streamStoreSpied.getScopeId(testScope, ctx, executor).join();
doAnswer(x -> {
CompletableFuture<UUID> cf = new CompletableFuture<>();
cf.complete(scopeId);
return cf;
}).when(streamStoreSpied).getScopeId(eq(testScope), eq(ctx), eq(executor));
doAnswer(invocation -> {
CompletableFuture<Boolean> cf = new CompletableFuture<>();
cf.complete(true);
return cf;
}).when(streamStoreSpied).isScopeSealed(eq(testScope), any(), any());
createStreamInStore(testStream, testScope);
createStreamInStore(testRG, testScope);
assertTrue(streamStore.checkStreamExists(testScope, testStream, ctx, executor).join());
doAnswer(invocation -> {
CompletableFuture<Controller.UpdateStreamStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.UpdateStreamStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).sealStream(anyString(), anyString(), anyLong());
doAnswer(invocation -> {
CompletableFuture<Controller.DeleteStreamStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteStreamStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).deleteStream(anyString(), anyString(), anyLong());
// Create Reader Group
ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().stream(NameUtils.getScopedStreamName(testScope, testStream)).build();
final ReaderGroupConfig config = ReaderGroupConfig.cloneConfig(rgConfig, UUID.randomUUID(), 123L);
Controller.ReaderGroupConfiguration expectedConfig = ModelHelper.decode(testScope, testRG, config);
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.CreateReaderGroupResponse.Status> createRG = new CompletableFuture<>();
createRG.complete(Controller.CreateReaderGroupResponse.Status.SUCCESS);
return createRG;
}).when(streamMetadataTasks1).createReaderGroup(anyString(), any(), any(), anyLong(), anyLong());
doAnswer(invocation -> CompletableFuture.completedFuture(Controller.ReaderGroupConfigResponse.newBuilder().setStatus(Controller.ReaderGroupConfigResponse.Status.SUCCESS).setConfig(expectedConfig).build())).when(streamMetadataTasks1).getReaderGroupConfig(eq(testScope), anyString(), anyLong());
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.DeleteReaderGroupStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteReaderGroupStatus.Status.SUCCESS);
return future;
}).when(streamMetadataTasks1).deleteReaderGroup(anyString(), anyString(), anyString(), anyLong());
// Create KVT
KeyValueTableConfiguration kvtConfig = KeyValueTableConfiguration.builder().partitionCount(1).primaryKeyLength(1).secondaryKeyLength(1).build();
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.CreateKeyValueTableStatus.Status> fut = new CompletableFuture<>();
fut.complete(Controller.CreateKeyValueTableStatus.Status.SUCCESS);
return fut;
}).when(kvtTasksMocked).createKeyValueTable(anyString(), anyString(), any(), anyLong(), anyLong());
List<String> tableList = new ArrayList<>();
tableList.add(testKVT);
Pair<List<String>, String> listOfKVTables = new ImmutablePair<>(tableList, "");
doAnswer(invocationOnMock -> CompletableFuture.completedFuture(listOfKVTables)).doAnswer(invocationOnMock -> CompletableFuture.completedFuture(new ImmutablePair<>(Collections.emptyList(), invocationOnMock.getArgument(0)))).when(kvtStoreSpied).listKeyValueTables(anyString(), any(), anyInt(), any(), any());
doAnswer(invocationOnMock -> {
CompletableFuture<Controller.DeleteKVTableStatus.Status> future = new CompletableFuture<>();
future.complete(Controller.DeleteKVTableStatus.Status.SUCCESS);
return future;
}).when(kvtTasksMocked).deleteKeyValueTable(anyString(), anyString(), anyLong());
Controller.CreateKeyValueTableStatus.Status status = kvtTasksMocked.createKeyValueTable(testScope, testKVT, kvtConfig, System.currentTimeMillis(), 123L).join();
assertEquals(status, Controller.CreateKeyValueTableStatus.Status.SUCCESS);
DeleteScopeTask requestHandler = new DeleteScopeTask(streamMetadataTasks1, streamStoreSpied, kvtStoreSpied, kvtTasksMocked, executor);
DeleteScopeEvent event = new DeleteScopeEvent(testScope, 123L, scopeId);
CompletableFuture<Void> future = requestHandler.execute(event);
future.join();
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class AbortRequestHandler method processEvent.
@Override
public CompletableFuture<Void> processEvent(AbortEvent event) {
String scope = event.getScope();
String stream = event.getStream();
int epoch = event.getEpoch();
UUID txId = event.getTxid();
long requestId = event.getRequestId();
if (requestId == 0L) {
requestId = ControllerService.nextRequestId();
}
Timer timer = new Timer();
OperationContext context = streamMetadataStore.createStreamContext(scope, stream, requestId);
log.info(requestId, "Aborting transaction {} on stream {}/{}", event.getTxid(), event.getScope(), event.getStream());
return Futures.toVoid(streamMetadataStore.getSegmentsInEpoch(event.getScope(), event.getStream(), epoch, context, executor).thenApply(segments -> segments.stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList())).thenCompose(segments -> streamMetadataTasks.notifyTxnAbort(scope, stream, segments, txId, context.getRequestId())).thenCompose(x -> streamMetadataStore.abortTransaction(scope, stream, txId, context, executor)).whenComplete((result, error) -> {
if (error != null) {
log.warn(context.getRequestId(), "Failed aborting transaction {} on stream {}/{}", event.getTxid(), event.getScope(), event.getStream());
TransactionMetrics.getInstance().abortTransactionFailed(scope, stream);
} else {
log.info(context.getRequestId(), "Successfully aborted transaction {} on stream {}/{}", event.getTxid(), event.getScope(), event.getStream());
if (processedEvents != null) {
processedEvents.offer(event);
}
TransactionMetrics.getInstance().abortTransaction(scope, stream, timer.getElapsed());
}
}));
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class CommitRequestHandler method execute.
/**
* This method attempts to collect all transactions in the epoch that are marked for commit and decides if they can be
* committed in active epoch or if it needs to roll the transactions.
*
* @param event event to process
* @return Completable future which indicates completion of processing of commit event.
*/
@Override
public CompletableFuture<Void> execute(CommitEvent event) {
String scope = event.getScope();
String stream = event.getStream();
long requestId = streamMetadataTasks.getRequestId(null);
OperationContext context = streamMetadataStore.createStreamContext(scope, stream, requestId);
log.debug(requestId, "Attempting to commit available transactions on stream {}/{}", event.getScope(), event.getStream());
CompletableFuture<Void> future = new CompletableFuture<>();
// Note: we will ignore the epoch in the event. It has been deprecated.
// The logic now finds the smallest epoch with transactions and commits them.
tryCommitTransactions(scope, stream, context).whenComplete((r, e) -> {
if (e != null) {
Throwable cause = Exceptions.unwrap(e);
// for operation not allowed, we will report the event
if (cause instanceof StoreException.OperationNotAllowedException) {
log.debug(requestId, "Cannot commit transaction on stream {}/{}. Postponing", scope, stream);
} else {
log.warn(requestId, "Exception while attempting to commit transaction on stream {}/{}", scope, stream, e);
TransactionMetrics.getInstance().commitTransactionFailed(scope, stream);
}
future.completeExceptionally(cause);
} else {
if (r >= 0) {
log.info(requestId, "Successfully committed transactions on epoch {} on stream {}/{}", r, scope, stream);
} else {
log.info(requestId, "No transactions found in committing state on stream {}/{}", scope, stream);
}
if (processedEvents != null) {
try {
processedEvents.offer(event);
} catch (Exception ex) {
// ignore, this processed events is only added for enabling unit testing this class
}
}
future.complete(null);
}
});
return future;
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class CreateReaderGroupTask method execute.
@Override
public CompletableFuture<Void> execute(final CreateReaderGroupEvent request) {
String scope = request.getScope();
String readerGroup = request.getRgName();
UUID readerGroupId = request.getReaderGroupId();
ReaderGroupConfig config = getConfigFromEvent(request);
long requestId = request.getRequestId();
OperationContext context = streamMetadataStore.createRGContext(scope, readerGroup, requestId);
return streamMetadataStore.isScopeSealed(scope, context, executor).thenCompose(exists -> {
if (exists) {
log.warn(requestId, "Scope {} already in sealed state", scope);
return CompletableFuture.completedFuture(null);
}
return RetryHelper.withRetriesAsync(() -> streamMetadataStore.getReaderGroupId(scope, readerGroup, context, executor).thenCompose(rgId -> {
if (!rgId.equals(readerGroupId)) {
log.warn(requestId, "Skipping processing of CreateReaderGroupEvent with stale UUID.");
return CompletableFuture.completedFuture(null);
}
return streamMetadataTasks.isRGCreationComplete(scope, readerGroup, context).thenCompose(complete -> {
if (!complete) {
return Futures.toVoid(streamMetadataTasks.createReaderGroupTasks(scope, readerGroup, config, request.getCreateTimeStamp(), context));
}
return CompletableFuture.completedFuture(null);
});
}), e -> Exceptions.unwrap(e) instanceof RetryableException, Integer.MAX_VALUE, executor);
});
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class DeleteReaderGroupTask method execute.
@Override
public CompletableFuture<Void> execute(final DeleteReaderGroupEvent request) {
String scope = request.getScope();
String readerGroup = request.getRgName();
long requestId = request.getRequestId();
UUID readerGroupId = request.getReaderGroupId();
final OperationContext context = streamMetadataStore.createRGContext(scope, readerGroup, requestId);
return streamMetadataStore.getReaderGroupId(scope, readerGroup, context, executor).thenCompose(id -> {
if (!id.equals(readerGroupId)) {
log.warn(requestId, "Skipping processing of Reader Group delete request {} as UUIDs did not match.", requestId);
return CompletableFuture.completedFuture(null);
}
return streamMetadataStore.getReaderGroupConfigRecord(scope, readerGroup, context, executor).thenCompose(configRecord -> {
if (!ReaderGroupConfig.StreamDataRetention.values()[configRecord.getObject().getRetentionTypeOrdinal()].equals(ReaderGroupConfig.StreamDataRetention.NONE)) {
String scopedRGName = NameUtils.getScopedReaderGroupName(scope, readerGroup);
// update Stream metadata tables, if RG is a Subscriber
Iterator<String> streamIter = configRecord.getObject().getStartingStreamCuts().keySet().iterator();
return Futures.loop(streamIter::hasNext, () -> {
Stream stream = Stream.of(streamIter.next());
OperationContext streamContext = streamMetadataStore.createStreamContext(stream.getScope(), stream.getStreamName(), requestId);
return streamMetadataStore.deleteSubscriber(stream.getScope(), stream.getStreamName(), scopedRGName, configRecord.getObject().getGeneration(), streamContext, executor);
}, executor);
}
return CompletableFuture.completedFuture(null);
}).thenCompose(v -> {
String rgStreamContext = NameUtils.getStreamForReaderGroup(readerGroup);
OperationContext streamContext = streamMetadataStore.createStreamContext(scope, rgStreamContext, requestId);
return streamMetadataTasks.sealStream(scope, rgStreamContext, streamContext).thenCompose(z -> streamMetadataTasks.deleteStream(scope, rgStreamContext, streamContext));
}).thenCompose(v1 -> streamMetadataStore.deleteReaderGroup(scope, readerGroup, context, executor));
});
}
Aggregations