use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class TableBucketReaderTests method testFindEntryNotExists.
/**
* Tests the ability to (not) locate Table Entries in a Table Bucket for deleted and inexistent keys.
*/
@Test
public void testFindEntryNotExists() throws Exception {
val segment = new SegmentMock(executorService());
// Generate our test data.
val es = new EntrySerializer();
val entries = generateEntries(es);
// Deleted key (that was previously indexed).
val deletedKey = entries.get(0).getKey();
val data = es.serializeRemoval(Collections.singleton(deletedKey));
segment.append(data, null, TIMEOUT).join();
val reader = TableBucketReader.entry(segment, // No backpointers.
(s, offset, timeout) -> CompletableFuture.completedFuture(-1L), executorService());
val deletedResult = reader.find(deletedKey.getKey(), 0L, new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("Expecting a TableEntry with null value for deleted key.", deletedResult.getValue());
// Inexistent key (that did not exist previously).
val inexistentKey = entries.get(1).getKey();
val inexistentResult = reader.find(inexistentKey.getKey(), 0L, new TimeoutTimer(TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull("Not expecting any result for key that was never present.", inexistentResult);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class MetadataStore method assignSegmentId.
/**
* Attempts to map a Segment to an Id, by first trying to retrieve an existing id, and, should that not exist,
* assign a new one. If the operation failed, either synchronously, or asynchronously, the segment assignment will be
* failed with the causing exception.
*
* @param segmentName The name of the Segment to assign id for.
* @param timeout Timeout for the operation.
*/
private void assignSegmentId(String segmentName, Duration timeout) {
try {
TimeoutTimer timer = new TimeoutTimer(timeout);
Futures.exceptionListener(getSegmentInfoInternal(segmentName, timer.getRemaining()).thenComposeAsync(si -> submitAssignmentWithRetry(SegmentInfo.deserialize(si), timer.getRemaining()), this.executor), ex -> failAssignment(segmentName, ex));
} catch (Throwable ex) {
log.warn("{}: Unable to assign Id for segment '{}'.", this.traceObjectId, segmentName, ex);
failAssignment(segmentName, ex);
}
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAttributeBTreeIndex method initialize.
/**
* Initializes the SegmentAttributeIndex.
*
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will indicate the operation has succeeded.
*/
CompletableFuture<Void> initialize(Duration timeout) {
TimeoutTimer timer = new TimeoutTimer(timeout);
Preconditions.checkState(!this.index.isInitialized(), "SegmentAttributeIndex is already initialized.");
String attributeSegmentName = NameUtils.getAttributeSegmentName(this.segmentMetadata.getName());
// make the first write.
return Futures.exceptionallyExpecting(this.storage.openWrite(attributeSegmentName).thenAccept(this.handle::set), ex -> ex instanceof StreamSegmentNotExistsException, null).thenComposeAsync(v -> this.index.initialize(timer.getRemaining()), this.executor).thenRun(() -> log.debug("{}: Initialized.", this.traceObjectId)).exceptionally(this::handleIndexOperationException);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class SegmentAttributeBTreeIndex method writePages.
private CompletableFuture<Long> writePages(List<BTreeIndex.WritePage> pages, Collection<Long> obsoleteOffsets, long truncateOffset, Duration timeout) {
// The write offset is the offset of the first page to be written in the list.
long writeOffset = pages.get(0).getOffset();
// Collect the data to be written.
val streams = new ArrayList<InputStream>();
AtomicInteger length = new AtomicInteger();
for (val p : pages) {
// Validate that the given pages are indeed in the correct order.
Preconditions.checkArgument(p.getOffset() == writeOffset + length.get(), "Unexpected page offset.");
// Collect the pages (as InputStreams) and record their lengths.
streams.add(p.getContents().getReader());
length.addAndGet(p.getContents().getLength());
}
// Create the Attribute Segment in Storage (if needed), then write the new data to it and truncate if necessary.
TimeoutTimer timer = new TimeoutTimer(timeout);
return createAttributeSegmentIfNecessary(() -> writeToSegment(streams, writeOffset, length.get(), timer), timer.getRemaining()).thenApplyAsync(v -> {
Exceptions.checkNotClosed(this.closed.get(), this);
// Trigger an async truncation. There is no need to wait for it.
truncateAsync(truncateOffset, timer.getRemaining());
// Store data in cache and remove obsolete pages.
storeInCache(pages, obsoleteOffsets);
// Return the current length of the Segment Attribute Index.
return writeOffset + length.get();
}, this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class ReadOnlySegmentContainer method read.
@Override
public CompletableFuture<ReadResult> read(String streamSegmentName, long offset, int maxLength, Duration timeout) {
Exceptions.checkNotClosed(this.closed.get(), this);
TimeoutTimer timer = new TimeoutTimer(timeout);
return READ_RETRY.run(() -> getStreamSegmentInfo(streamSegmentName, timer.getRemaining()).thenApply(si -> StreamSegmentStorageReader.read(si, offset, maxLength, MAX_READ_AT_ONCE_BYTES, this.storage)));
}
Aggregations