use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class ZookeeperKVTMetadataStore method recordLastKVTableSegment.
@Override
CompletableFuture<Void> recordLastKVTableSegment(String scope, String kvtable, int lastActiveSegment, OperationContext context, Executor executor) {
final String deletePath = String.format(DELETED_KVTABLES_PATH, getScopedKVTName(scope, kvtable));
byte[] maxSegmentNumberBytes = new byte[Integer.BYTES];
BitConverter.writeInt(maxSegmentNumberBytes, 0, lastActiveSegment);
return storeHelper.getData(deletePath, x -> BitConverter.readInt(x, 0)).exceptionally(e -> {
if (e instanceof StoreException.DataNotFoundException) {
return null;
} else {
throw new CompletionException(e);
}
}).thenCompose(data -> {
log.debug("Recording last segment {} for stream {}/{} on deletion.", lastActiveSegment, scope, kvtable);
if (data == null) {
return Futures.toVoid(storeHelper.createZNodeIfNotExist(deletePath, maxSegmentNumberBytes));
} else {
final int oldLastActiveSegment = data.getObject();
Preconditions.checkArgument(lastActiveSegment >= oldLastActiveSegment, "Old last active segment ({}) for {}/{} is higher than current one {}.", oldLastActiveSegment, scope, kvtable, lastActiveSegment);
return Futures.toVoid(storeHelper.setData(deletePath, maxSegmentNumberBytes, data.getVersion()));
}
});
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class ControllerServiceTest method setup.
@Before
public void setup() throws Exception {
final TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor);
final HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), new SocketConnectionFactoryImpl(ClientConfig.builder().build()));
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(connectionPool, hostStore, executor), executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// createScope
streamStore.createScope(SCOPE, null, executor).get();
// region createStream
startTs = System.currentTimeMillis();
OperationContext context = streamStore.createStreamContext(SCOPE, stream1, 0L);
streamStore.createStream(SCOPE, stream1, configuration1, startTs, context, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, context, executor).get();
OperationContext context2 = streamStore.createStreamContext(SCOPE, stream2, 0L);
streamStore.createStream(SCOPE, stream2, configuration2, startTs, context2, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, context2, executor).get();
// endregion
// region scaleSegments
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
scaleTs = System.currentTimeMillis();
VersionedMetadata<EpochTransitionRecord> record = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), startTs, null, null, executor).get();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream1, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream1, record, null, executor).get();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment4 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment5 = new SimpleEntry<>(0.75, 1.0);
sealedSegments = Arrays.asList(0L, 1L, 2L);
record = streamStore.submitScale(SCOPE, stream2, sealedSegments, Arrays.asList(segment3, segment4, segment5), scaleTs, null, null, executor).get();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).get();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
record = streamStore.startScale(SCOPE, stream2, false, record, state, null, executor).get();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, record, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), record, null, executor).get();
streamStore.completeScale(SCOPE, stream2, record, null, executor).get();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).get();
// endregion
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class CreateTableTask method execute.
@Override
public CompletableFuture<Void> execute(final CreateTableEvent request) {
String scope = request.getScopeName();
String kvt = request.getKvtName();
int partitionCount = request.getPartitionCount();
int primaryKeyLength = request.getPrimaryKeyLength();
int secondaryKeyLength = request.getSecondaryKeyLength();
long creationTime = request.getTimestamp();
long requestId = request.getRequestId();
long rolloverSize = request.getRolloverSizeBytes();
String kvTableId = request.getTableId().toString();
KeyValueTableConfiguration config = KeyValueTableConfiguration.builder().partitionCount(partitionCount).primaryKeyLength(primaryKeyLength).secondaryKeyLength(secondaryKeyLength).rolloverSizeBytes(rolloverSize).build();
final OperationContext context = kvtMetadataStore.createContext(scope, kvt, requestId);
return RetryHelper.withRetriesAsync(() -> getKeyValueTable(scope, kvt).thenCompose(table -> table.getId(context)).thenCompose(id -> {
if (!id.equals(kvTableId)) {
log.debug(requestId, "Skipped processing create event for KeyValueTable {}/{} with Id:{} as UUIDs did not match.", scope, kvt, id);
return CompletableFuture.completedFuture(null);
} else {
return kvtMetadataStore.isScopeSealed(scope, context, executor).thenCompose(isScopeSealed -> {
if (isScopeSealed) {
log.warn(requestId, "Scope {} is in sealed state: ", scope);
throw new IllegalStateException("Scope in sealed state: " + scope);
}
return this.kvtMetadataStore.createKeyValueTable(scope, kvt, config, creationTime, context, executor).thenComposeAsync(response -> {
// segments and change the state of the kvtable to active.
if (response.getStatus().equals(CreateKVTableResponse.CreateStatus.NEW) || response.getStatus().equals(CreateKVTableResponse.CreateStatus.EXISTS_CREATING)) {
final int startingSegmentNumber = response.getStartingSegmentNumber();
final int minNumSegments = response.getConfiguration().getPartitionCount();
final int keyLength = response.getConfiguration().getPrimaryKeyLength() + response.getConfiguration().getSecondaryKeyLength();
List<Long> newSegments = IntStream.range(startingSegmentNumber, startingSegmentNumber + minNumSegments).boxed().map(x -> NameUtils.computeSegmentId(x, 0)).collect(Collectors.toList());
kvtMetadataTasks.createNewSegments(scope, kvt, newSegments, keyLength, requestId, config.getRolloverSizeBytes()).thenCompose(y -> {
kvtMetadataStore.getVersionedState(scope, kvt, context, executor).thenCompose(state -> {
if (state.getObject().equals(KVTableState.CREATING)) {
kvtMetadataStore.updateVersionedState(scope, kvt, KVTableState.ACTIVE, state, context, executor);
}
return CompletableFuture.completedFuture(null);
});
return CompletableFuture.completedFuture(null);
});
}
return CompletableFuture.completedFuture(null);
}, executor);
});
}
}), e -> Exceptions.unwrap(e) instanceof RetryableException, Integer.MAX_VALUE, executor);
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class TruncateStreamTask method processTruncate.
private CompletableFuture<Void> processTruncate(String scope, String stream, VersionedMetadata<StreamTruncationRecord> versionedTruncationRecord, VersionedMetadata<State> versionedState, OperationContext context, long requestId) {
String delegationToken = this.streamMetadataTasks.retrieveDelegationToken();
StreamTruncationRecord truncationRecord = versionedTruncationRecord.getObject();
log.info(requestId, "Truncating stream {}/{} at stream cut: {}", scope, stream, truncationRecord.getStreamCut());
return Futures.toVoid(streamMetadataStore.updateVersionedState(scope, stream, State.TRUNCATING, versionedState, context, executor).thenCompose(update -> notifyTruncateSegments(scope, stream, truncationRecord.getStreamCut(), delegationToken, requestId).thenCompose(x -> notifyDeleteSegments(scope, stream, truncationRecord.getToDelete(), delegationToken, requestId)).thenAccept(x -> DYNAMIC_LOGGER.reportGaugeValue(TRUNCATED_SIZE, versionedTruncationRecord.getObject().getSizeTill(), streamTags(scope, stream))).thenCompose(deleted -> streamMetadataStore.completeTruncation(scope, stream, versionedTruncationRecord, context, executor)).thenCompose(x -> streamMetadataStore.updateVersionedState(scope, stream, State.ACTIVE, update, context, executor))));
}
use of io.pravega.controller.store.stream.OperationContext in project pravega by pravega.
the class ZKScope method listStreams.
@Override
public CompletableFuture<Pair<List<String>, String>> listStreams(int limit, String continuationToken, Executor executor, OperationContext context) {
// Stream references are stored under a hierarchy of nodes as described in `addStreamsInScope` method.
// A continuation token is essentially a serialized integer that is broken into three parts -
// msb 2 bytes, middle 4 bytes and lsb 4 bytes.
// stream references are stored as /01/2345/stream-6789.
// So effectively all streams under the scope are ordered by the stream position. Stream position is strictly
// increasing and any new stream that is added to the scope will be done at a higher position.
// Streams can be deleted though.
// So now the continuation token basically signifies the position of last stream that was returned in previous iteration.
// And continuing from the continuation token, we simply retrieve next `limit` number of streams under the scope.
// Since the streams are stored under the aforesaid hierarchy, we start with all children of the `middle` part of
// continuation token and only include streams whose position is greater than token.
// If we are not able to get `limit` number of streams from this, we go to the next higher available `middle` znode
// and fetch its children. If middle is exhausted, we increment the `msb` and repeat until we have found `limit`
// number of streams or reached the end.
List<String> toReturn = new LinkedList<>();
AtomicInteger remaining = new AtomicInteger(limit);
Token floor = Token.fromString(continuationToken);
AtomicReference<Token> lastPos = new AtomicReference<>(floor);
// compute on all available top level children (0-99) that are greater than floor.getLeft
return computeOnChildren(streamsInScopePath, topChild -> {
if (topChild >= floor.getMsb()) {
String topPath = ZKPaths.makePath(streamsInScopePath, topChild.toString());
// set middle floor = supplied floor OR 0 if top floor has been incremented
int middleFloor = topChild.intValue() == floor.getMsb() ? floor.getMiddle() : 0;
// compute on all available middle level children (0-9999) of current top level that are greater than middle floor
CompletableFuture<Void> voidCompletableFuture = computeOnChildren(topPath, middleChild -> {
if (middleChild >= middleFloor) {
String middlePath = ZKPaths.makePath(topPath, middleChild.toString());
return store.getChildren(middlePath).thenAccept(streams -> {
// set bottom floor = -1 if we have incremented either top or middle floors
int bottomFloor = topChild.intValue() == floor.getMsb() && middleChild.intValue() == floor.getMiddle() ? floor.getLsb() : -1;
Pair<List<String>, Integer> retVal = filterStreams(streams, bottomFloor, remaining.get());
if (!retVal.getKey().isEmpty()) {
toReturn.addAll(retVal.getKey());
remaining.set(limit - toReturn.size());
lastPos.set(new Token(topChild, middleChild, retVal.getValue()));
}
}).thenApply(v -> remaining.get() > 0);
} else {
return CompletableFuture.completedFuture(true);
}
}, executor);
return voidCompletableFuture.thenApply(v -> remaining.get() > 0);
} else {
return CompletableFuture.completedFuture(true);
}
}, executor).thenApply(v -> new ImmutablePair<>(toReturn, lastPos.get().toString()));
}
Aggregations