use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method mergeWith.
/**
* Merges the Transaction StreamSegment with given metadata into this one at the current offset.
*
* @param transactionMetadata The metadata of the Transaction StreamSegment to merge.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will contain the number of bytes that were merged into this
* StreamSegment. If failed, the Future will contain the exception that caused it.
*/
private CompletableFuture<FlushResult> mergeWith(UpdateableSegmentMetadata transactionMetadata, MergeTransactionOperation mergeOp, TimeoutTimer timer) {
if (transactionMetadata.isDeleted()) {
return Futures.failedFuture(new DataCorruptionException(String.format("Attempted to merge with deleted Transaction segment '%s'.", transactionMetadata.getName())));
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "mergeWith", transactionMetadata.getId(), transactionMetadata.getName(), transactionMetadata.isSealedInStorage());
FlushResult result = new FlushResult();
if (!transactionMetadata.isSealedInStorage() || transactionMetadata.getLength() > transactionMetadata.getStorageLength()) {
// Nothing to do. Given Transaction is not eligible for merger yet.
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return CompletableFuture.completedFuture(result);
}
AtomicLong mergedLength = new AtomicLong();
return this.storage.getStreamSegmentInfo(transactionMetadata.getName(), timer.getRemaining()).thenAcceptAsync(transProperties -> {
// Check that the Storage agrees with our metadata (if not, we have a problem ...)
if (transProperties.getLength() != transactionMetadata.getStorageLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because its metadata disagrees with the Storage. Metadata.StorageLength=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), transactionMetadata.getStorageLength(), transProperties.getLength())));
}
if (transProperties.getLength() != mergeOp.getLength()) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' cannot be merged into parent '%s' because the declared length in the operation disagrees with the Storage. Operation.Length=%d, Storage.StorageLength=%d", transactionMetadata.getName(), this.metadata.getName(), mergeOp.getLength(), transProperties.getLength())));
}
mergedLength.set(transProperties.getLength());
}, this.executor).thenComposeAsync(v1 -> storage.concat(this.handle.get(), mergeOp.getStreamSegmentOffset(), transactionMetadata.getName(), timer.getRemaining()), this.executor).thenComposeAsync(v2 -> storage.getStreamSegmentInfo(this.metadata.getName(), timer.getRemaining()), this.executor).thenApplyAsync(segmentProperties -> {
// We have processed a MergeTransactionOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeTransactionOperation : "First outstanding operation was not a MergeTransactionOperation";
assert ((MergeTransactionOperation) processedOperation).getTransactionSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeTransactionOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergedLength.get();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergedLength.get(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata);
this.lastFlush.set(this.timer.getElapsed());
result.withMergedBytes(mergedLength.get());
LoggerHelpers.traceLeave(log, this.traceObjectId, "mergeWith", traceId, result);
return result;
}, this.executor).exceptionally(ex -> {
Throwable realEx = Exceptions.unwrap(ex);
if (realEx instanceof BadOffsetException || realEx instanceof StreamSegmentNotExistsException) {
// We either attempted to write at an offset that already contained other data or the Transaction
// Segment no longer exists. This can happen for a number of reasons, but we do not have enough
// information here to determine why. We need to enter reconciliation mode, and hope for the best.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAggregator method flush.
// endregion
// region Flushing and Merging
/**
* Flushes the contents of the Aggregator to the Storage.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain a summary of the flush operation. If any errors
* occurred during the flush, the Future will be completed with the appropriate exception.
*/
CompletableFuture<FlushResult> flush(Duration timeout) {
ensureInitializedAndNotClosed();
if (this.metadata.isDeleted()) {
// Segment has been deleted; don't do anything else.
return CompletableFuture.completedFuture(new FlushResult());
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flush");
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<FlushResult> result;
try {
switch(this.state.get()) {
case Writing:
result = flushNormally(timer);
break;
case ReconciliationNeeded:
result = beginReconciliation(timer).thenComposeAsync(v -> reconcile(timer), this.executor);
break;
case Reconciling:
result = reconcile(timer);
break;
// $CASES-OMITTED$
default:
result = Futures.failedFuture(new IllegalStateException(String.format("Unexpected state for SegmentAggregator (%s) for segment '%s'.", this.state, this.metadata.getName())));
break;
}
} catch (Exception ex) {
// Convert synchronous errors into async errors - it's easier to handle on the receiving end.
result = Futures.failedFuture(ex);
}
return result.thenApply(r -> {
LoggerHelpers.traceLeave(log, this.traceObjectId, "flush", traceId, r);
return r;
});
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class ZooKeeperServiceRunner method waitForSSLServerUp.
private static boolean waitForSSLServerUp(String address, long timeout, String keyStore, String keyStorePasswdPath, String trustStore, String trustStorePasswordPath) {
TimeoutTimer timeoutTimer = new TimeoutTimer(Duration.ofMillis(timeout));
String[] split = address.split(":");
String host = split[0];
int port = Integer.parseInt(split[1]);
while (true) {
try {
SSLContext context = SSLContext.getInstance("TLS");
TrustManagerFactory trustManager = getTrustManager(trustStore, trustStorePasswordPath);
KeyManagerFactory keyFactory = getKeyManager(keyStore, keyStorePasswdPath);
context.init(keyFactory.getKeyManagers(), trustManager.getTrustManagers(), null);
try (Socket sock = context.getSocketFactory().createSocket(new Socket(host, port), host, port, true);
OutputStream outstream = sock.getOutputStream()) {
outstream.write("stat".getBytes());
outstream.flush();
BufferedReader reader = new BufferedReader(new InputStreamReader(sock.getInputStream()));
String line = reader.readLine();
if (line != null && line.startsWith("Zookeeper version:")) {
log.info("Server UP");
return true;
}
}
} catch (IOException | CertificateException | NoSuchAlgorithmException | KeyStoreException | KeyManagementException | UnrecoverableKeyException e) {
// ignore as this is expected
log.warn("server {} not up.", address, e);
}
if (!timeoutTimer.hasRemaining()) {
break;
}
Exceptions.handleInterrupted(() -> Thread.sleep(250));
}
return false;
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class WriterTableProcessorTests method checkIndex.
private void checkIndex(HashMap<BufferView, TableEntry> existingEntries, HashMap<BufferView, UUID> allKeys, TestContext context) throws Exception {
// Get all the buckets associated with the given keys.
val timer = new TimeoutTimer(TIMEOUT);
val bucketsByHash = context.indexReader.locateBuckets(context.segmentMock, allKeys.values(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Index the existing Keys by their current offsets.
val keysByOffset = existingEntries.entrySet().stream().collect(Collectors.toMap(e -> e.getValue().getKey().getVersion(), Map.Entry::getKey));
// Load up all the offsets for all buckets.
val buckets = bucketsByHash.values().stream().distinct().collect(Collectors.toMap(b -> b, b -> context.indexReader.getBucketOffsets(context.segmentMock, b, timer).join()));
// Loop through all the bucket's offsets and verify that those offsets do point to existing keys.
for (val e : buckets.entrySet()) {
val bucketOffsets = e.getValue();
for (val offset : bucketOffsets) {
Assert.assertTrue("Found Bucket Offset that points to non-existing key.", keysByOffset.containsKey(offset));
}
}
// TableBucket, otherwise it is not included in any bucket.
for (val e : allKeys.entrySet()) {
val key = e.getKey();
val tableEntry = existingEntries.get(key);
val bucket = bucketsByHash.get(e.getValue());
Assert.assertNotNull("Test error: no bucket found.", bucket);
val bucketOffsets = buckets.get(bucket);
if (tableEntry != null) {
// This key should exist: just verify the TableEntry's offset (Key Version) exists in the Bucket's offset list.
Assert.assertTrue("Non-deleted key was not included in a Table Bucket.", bucketOffsets.contains(tableEntry.getKey().getVersion()));
} else {
// Verify that all the keys that the Table Bucket points to do not match our key. Use our existing offset-key cache for that.
for (val offset : bucketOffsets) {
val keyAtOffset = keysByOffset.get(offset);
Assert.assertNotEquals("Deleted key was still included in a Table Bucket.", key, keyAtOffset);
}
}
}
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class DurableLog method truncate.
@Override
public CompletableFuture<Void> truncate(long upToSequenceNumber, Duration timeout) {
ensureRunning();
Preconditions.checkArgument(this.metadata.isValidTruncationPoint(upToSequenceNumber), "Invalid Truncation Point. Must refer to a MetadataCheckpointOperation.");
// The SequenceNumber we were given points directly to a MetadataCheckpointOperation. We must not remove it!
// Instead, it must be the first operation that does survive, so we need to adjust our SeqNo to the one just
// before it.
long actualTruncationSequenceNumber = upToSequenceNumber - 1;
// Find the closest Truncation Marker (that does not exceed it).
LogAddress truncationFrameAddress = this.metadata.getClosestTruncationMarker(actualTruncationSequenceNumber);
if (truncationFrameAddress == null) {
// Nothing to truncate.
return CompletableFuture.completedFuture(null);
}
TimeoutTimer timer = new TimeoutTimer(timeout);
log.info("{}: Truncate (OperationSequenceNumber = {}, DataFrameAddress = {}).", this.traceObjectId, upToSequenceNumber, truncationFrameAddress);
// info will be readily available upon recovery without delay.
return add(new StorageMetadataCheckpointOperation(), OperationPriority.SystemCritical, timer.getRemaining()).thenComposeAsync(v -> this.durableDataLog.truncate(truncationFrameAddress, timer.getRemaining()), this.executor).thenRunAsync(() -> this.metadata.removeTruncationMarkers(actualTruncationSequenceNumber), this.executor);
}
Aggregations