use of io.pravega.controller.store.stream.records.ActiveTxnRecord in project pravega by pravega.
the class ControllerDescribeStreamCommand method execute.
@Override
public void execute() {
ensureArgCount(2);
final String scope = getArg(0);
final String stream = getArg(1);
try {
@Cleanup CuratorFramework zkClient = createZKClient();
ScheduledExecutorService executor = getCommandArgs().getState().getExecutor();
@Cleanup ConnectionPool pool = createConnectionPool();
// The Pravega Controller service may store metadata either at Zookeeper or the Segment Store service
// (tables). We need to instantiate the correct type of metadata store object based on the cluster at hand.
StreamMetadataStore store;
@Cleanup SegmentHelper segmentHelper = null;
if (getCLIControllerConfig().getMetadataBackend().equals(CLIConfig.MetadataBackends.ZOOKEEPER.name())) {
store = StreamStoreFactory.createZKStore(zkClient, executor);
} else {
segmentHelper = instantiateSegmentHelper(zkClient, pool);
GrpcAuthHelper authHelper;
authHelper = GrpcAuthHelper.getDisabledAuthHelper();
store = StreamStoreFactory.createPravegaTablesStore(segmentHelper, authHelper, zkClient, executor);
}
// Output the configuration of this Stream.
CompletableFuture<StreamConfiguration> streamConfig = store.getConfiguration(scope, stream, null, executor);
prettyJSONOutput("stream_config", streamConfig.join());
// Output the state for this Stream.
prettyJSONOutput("stream_state", store.getState(scope, stream, true, null, executor).join());
// Output the total number of segments for this Stream.
Set<Long> segments = store.getAllSegmentIds(scope, stream, null, executor).join();
prettyJSONOutput("segment_count", segments.size());
// Check if the Stream is sealed.
prettyJSONOutput("is_sealed", store.isSealed(scope, stream, null, executor).join());
// Output the active epoch for this Stream.
prettyJSONOutput("active_epoch", store.getActiveEpoch(scope, stream, null, true, executor).join());
// Output the number of active Transactions for ths Stream.
Map<UUID, ActiveTxnRecord> activeTxn = store.getActiveTxns(scope, stream, null, getCommandArgs().getState().getExecutor()).join();
if (!activeTxn.isEmpty()) {
prettyJSONOutput("active_transactions", activeTxn);
}
// Output Truncation point.
prettyJSONOutput("truncation_record", store.getTruncationRecord(scope, stream, null, executor).join().getObject());
// Output the metadata that describes all the scaling information for this Stream.
prettyJSONOutput("scaling_info", store.getScaleMetadata(scope, stream, segments.stream().min(Long::compareTo).get(), segments.stream().max(Long::compareTo).get(), null, executor).join());
// Cleanup resources.
if (segmentHelper != null) {
segmentHelper.close();
store.close();
}
} catch (Exception e) {
System.err.println("Exception accessing the metadata store: " + e.getMessage());
}
}
use of io.pravega.controller.store.stream.records.ActiveTxnRecord in project pravega by pravega.
the class ControllerMetadataJsonSerializerTest method testActiveTxnRecord.
@Test
public void testActiveTxnRecord() {
ActiveTxnRecord record = new ActiveTxnRecord(1L, 1L, 1L, TxnStatus.OPEN, "a", 1L, 1L, ImmutableMap.of(1L, 2L, 3L, 4L));
testRecordSerialization(record, ActiveTxnRecord.class);
}
use of io.pravega.controller.store.stream.records.ActiveTxnRecord in project pravega by pravega.
the class PersistentStreamBase method createTransaction.
@Override
public CompletableFuture<VersionedTransactionData> createTransaction(final UUID txnId, final long lease, final long maxExecutionTime, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
final long current = System.currentTimeMillis();
final long leaseTimestamp = current + lease;
final long maxExecTimestamp = current + maxExecutionTime;
// extract epoch from txnid
final int epoch = RecordHelper.getTransactionEpoch(txnId);
ActiveTxnRecord record = ActiveTxnRecord.builder().txnStatus(TxnStatus.OPEN).leaseExpiryTime(leaseTimestamp).txCreationTimestamp(current).maxExecutionExpiryTime(maxExecTimestamp).writerId(Optional.empty()).commitTime(Optional.empty()).commitOrder(Optional.empty()).build();
return verifyNotSealed(context).thenCompose(v -> createNewTransaction(epoch, txnId, record, context).thenApply(version -> new VersionedTransactionData(epoch, txnId, version, TxnStatus.OPEN, current, maxExecTimestamp, "", Long.MIN_VALUE, Long.MIN_VALUE, ImmutableMap.of())));
}
use of io.pravega.controller.store.stream.records.ActiveTxnRecord in project pravega by pravega.
the class PersistentStreamBase method generateMarksForTransactions.
/**
* This method takes the list of transactions in the committing transactions record, and tries to report marks
* for writers for these transactions, if the information about writer is present in the record. The information
* about writer and commit time is optionally provided by the client. A client not interested in watermarking may not
* report writer id and time during commit request. Similarly older clients will not report writer id and time either.
* WriterId, commit time and commit offsets are recorded in ActiveTxnRecord for each transaction.
* For transactions where above fields are present, a mark is recorded for them.
* This method will ignore any INVALID_TIME or INVALID_POSITION related failures in noting marks for writers.
* This is because those will typically arise from idempotent commit case where this and a transaction with higher
* position and time may already have been committed and the overall mark for the writer may already have progressed.
*
* @return A completableFuture, which when completed will have marks reported for all transactions in the committing
* transaction record for which a writer with time and position information is available.
*/
CompletableFuture<Void> generateMarksForTransactions(OperationContext context, Map<String, TxnWriterMark> writerMarks) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
Preconditions.checkArgument(writerMarks != null);
// For each writerId we will take the transaction with the time and position pair (which is to take
// max of all transactions for the said writer).
// Note: if multiple transactions from same writer have same time, we will take any one arbitrarily and
// use its position for watermarks. Other positions and times would be ignored.
val noteTimeFutures = writerMarks.entrySet().stream().map(x -> Futures.exceptionallyExpecting(noteWriterMark(x.getKey(), x.getValue().getTimestamp(), x.getValue().getPosition(), context), DATA_NOT_FOUND_PREDICATE, null)).collect(Collectors.toList());
return Futures.allOf(noteTimeFutures);
}
use of io.pravega.controller.store.stream.records.ActiveTxnRecord in project pravega by pravega.
the class PersistentStreamBase method sealActiveTxn.
/**
* Seal a transaction in OPEN/COMMITTING_TXN/ABORTING state. This method does CAS on the transaction VersionedMetadata node if
* the transaction is in OPEN state, optionally checking version of transaction VersionedMetadata node, if required.
*
* @param epoch transaction epoch.
* @param txId transaction identifier.
* @param commit boolean indicating whether to commit or abort the transaction.
* @param version optional expected version of transaction node to validate before updating it.
* @param writerId writer Id
* @param timestamp commit timestamp supplied by writer
* @return a pair containing transaction status and its epoch.
*/
private CompletableFuture<SimpleEntry<TxnStatus, Integer>> sealActiveTxn(final int epoch, final UUID txId, final boolean commit, final Optional<Version> version, final String writerId, final long timestamp, OperationContext context) {
return getActiveTx(epoch, txId, context).thenCompose(data -> {
ActiveTxnRecord txnRecord = data.getObject();
Version dataVersion = version.orElseGet(data::getVersion);
TxnStatus status = txnRecord.getTxnStatus();
switch(status) {
case OPEN:
return sealActiveTx(epoch, txId, commit, txnRecord, dataVersion, writerId, timestamp, context).thenApply(y -> new SimpleEntry<>(commit ? TxnStatus.COMMITTING : TxnStatus.ABORTING, epoch));
case COMMITTING:
case COMMITTED:
if (commit) {
return CompletableFuture.completedFuture(new SimpleEntry<>(status, epoch));
} else {
throw StoreException.create(StoreException.Type.ILLEGAL_STATE, "Stream: " + getName() + " Transaction: " + txId.toString() + " State: " + status.name());
}
case ABORTING:
case ABORTED:
if (commit) {
throw StoreException.create(StoreException.Type.ILLEGAL_STATE, "Stream: " + getName() + " Transaction: " + txId.toString() + " State: " + status.name());
} else {
return CompletableFuture.completedFuture(new SimpleEntry<>(status, epoch));
}
default:
throw StoreException.create(StoreException.Type.DATA_NOT_FOUND, "Stream: " + getName() + " Transaction: " + txId.toString());
}
});
}
Aggregations