use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class ContainerTableExtensionImpl method deleteSegment.
@Override
public CompletableFuture<Void> deleteSegment(@NonNull String segmentName, boolean mustBeEmpty, Duration timeout) {
Exceptions.checkNotClosed(this.closed.get(), this);
logRequest("deleteSegment", segmentName, mustBeEmpty);
val timer = new TimeoutTimer(timeout);
return this.segmentContainer.forSegment(segmentName, timer.getRemaining()).thenComposeAsync(segment -> selectLayout(segment.getInfo()).deleteSegment(segmentName, mustBeEmpty, timer.getRemaining()), this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class HashTableCompactor method excludeObsolete.
/**
* Processes the given {@link CompactionArgs} and eliminates all {@link Candidate}s that meet at least one of the
* following criteria:
* - The Key's Table Bucket is no longer part of the index (removal)
* - The Key exists in the Index, but the Index points to a newer version of it.
*
* @param args A {@link CompactionArgs} representing the set of {@link Candidate}s for compaction. This set
* will be modified based on the outcome of this method.
* @param buckets The Buckets retrieved via the {@link IndexReader} for the {@link Candidate}s.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed, will indicate the operation has finished.
*/
private CompletableFuture<Void> excludeObsolete(IndexedCompactionArgs args, Map<UUID, TableBucket> buckets, TimeoutTimer timer) {
// Exclude all those Table Entries whose buckets altogether do not exist.
val deletedBuckets = args.candidatesByHash.keySet().stream().filter(k -> {
val bucket = buckets.get(k);
return bucket == null || !bucket.exists();
}).collect(Collectors.toList());
// Do this in a separate loop since we are modifying args.candidatesByHash with removeBucket().
for (val bucket : deletedBuckets) {
args.removeBucket(bucket);
}
// For every Bucket that still exists, find all its Keys and match with our candidates and figure out if our
// candidates are still eligible for compaction.
val br = TableBucketReader.key(this.segment, this.indexReader::getBackpointerOffset, this.executor);
val candidateBuckets = args.candidatesByHash.keySet().iterator();
return Futures.loop(candidateBuckets::hasNext, () -> {
val bucketId = candidateBuckets.next();
long bucketOffset = buckets.get(bucketId).getSegmentOffset();
return br.findAll(bucketOffset, args::handleExistingKey, timer);
}, this.executor);
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class WriterTableProcessorTests method checkRelocatedIndex.
/**
* Same outcome as {@link #checkIndex}, but does the verification by actually reading the Table Entries from the
* segment. This method is slower than {@link #checkIndex} so it should only be used when needing access to the actual,
* serialized Table Entry (such as in compaction testing).
*/
private void checkRelocatedIndex(HashMap<BufferView, TableEntry> existingEntries, HashMap<BufferView, UUID> allKeys, TestContext context) throws Exception {
// Get all the buckets associated with the given keys.
val timer = new TimeoutTimer(TIMEOUT);
val bucketsByHash = context.indexReader.locateBuckets(context.segmentMock, allKeys.values(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
for (val e : allKeys.entrySet()) {
val key = e.getKey();
val expectedEntry = existingEntries.get(key);
val bucket = bucketsByHash.get(e.getValue());
Assert.assertNotNull("Test error: no bucket found.", bucket);
if (expectedEntry != null) {
// This key should exist.
val actualEntry = TableBucketReader.entry(context.segmentMock, context.indexReader::getBackpointerOffset, executorService()).find(key, bucket.getSegmentOffset(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected entry.", expectedEntry, actualEntry);
} else {
// This key should not exist.
if (bucket.exists()) {
val actualEntry = TableBucketReader.entry(context.segmentMock, context.indexReader::getBackpointerOffset, executorService()).find(key, bucket.getSegmentOffset(), timer).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNull(actualEntry);
}
}
}
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class IndexReaderWriterTests method testGroupByBucket.
// endregion
// region IndexWriter specific tests
/**
* Tests the {@link IndexWriter#groupByBucket} method.
*/
@Test
public void testGroupByBucket() {
int bucketCount = 5;
int hashesPerBucket = 5;
val hashToBuckets = new HashMap<UUID, TableBucket>();
val bucketsToKeys = new HashMap<TableBucket, ArrayList<BucketUpdate.KeyUpdate>>();
val rnd = new Random(0);
for (int i = 0; i < bucketCount; i++) {
val bucket = new TableBucket(UUID.randomUUID(), i);
// Keep track of all KeyUpdates for this bucket.
val keyUpdates = new ArrayList<BucketUpdate.KeyUpdate>();
bucketsToKeys.put(bucket, keyUpdates);
// Generate keys, and record them where needed.
for (int j = 0; j < hashesPerBucket; j++) {
byte[] key = new byte[KeyHasher.HASH_SIZE_BYTES * 4];
long offset = i * hashesPerBucket + j;
keyUpdates.add(new BucketUpdate.KeyUpdate(new ByteArraySegment(key), offset, offset, true));
rnd.nextBytes(key);
hashToBuckets.put(KeyHashers.DEFAULT_HASHER.hash(key), bucket);
}
}
// Group updates by bucket. Since we override locateBucket, we do not need a segment access, hence safe to pass null.
val w = new CustomLocateBucketIndexer(KeyHashers.DEFAULT_HASHER, executorService(), hashToBuckets);
val allKeyUpdates = new ArrayList<BucketUpdate.KeyUpdate>();
bucketsToKeys.values().forEach(allKeyUpdates::addAll);
val bucketUpdates = w.groupByBucket(null, allKeyUpdates, new TimeoutTimer(TIMEOUT)).join().stream().map(BucketUpdate.Builder::build).collect(Collectors.toList());
Assert.assertEquals("Unexpected number of Bucket Updates.", bucketCount, bucketUpdates.size());
for (BucketUpdate bu : bucketUpdates) {
Assert.assertTrue("Not expecting Existing Keys to be populated.", bu.getExistingKeys().isEmpty());
val expected = bucketsToKeys.get(bu.getBucket());
Assert.assertNotNull("Found extra bucket.", expected);
AssertExtensions.assertContainsSameElements("Unexpected updates grouped.", expected, bu.getKeyUpdates(), (u1, u2) -> u1.getKey().equals(u2.getKey()) && u1.getOffset() == u2.getOffset() ? 0 : 1);
}
}
use of io.pravega.common.TimeoutTimer in project pravega by pravega.
the class IndexReaderWriterTests method checkIndex.
private void checkIndex(Collection<BufferView> allKeys, Map<Long, BufferView> existingKeysByOffset, IndexWriter w, KeyHasher hasher, SegmentMock segment) {
val timer = new TimeoutTimer(TIMEOUT);
// Group all keys by their full hash (each hash should translate to a bucket), and make sure they're ordered by
// offset (in descending order - so we can verify backpointer ordering).
val existingKeys = existingKeysByOffset.entrySet().stream().collect(Collectors.toMap(Map.Entry::getValue, Map.Entry::getKey));
val keysByHash = allKeys.stream().map(key -> new BucketUpdate.KeyInfo(key, existingKeys.getOrDefault(key, NO_OFFSET), existingKeys.getOrDefault(key, NO_OFFSET))).sorted(// Reverse order.
(k1, k2) -> Long.compare(k2.getOffset(), k1.getOffset())).collect(Collectors.groupingBy(keyInfo -> hasher.hash(keyInfo.getKey())));
int existentBucketCount = 0;
val buckets = w.locateBuckets(segment, keysByHash.keySet(), timer).join();
for (val e : keysByHash.entrySet()) {
val hash = e.getKey();
val keys = e.getValue();
val bucket = buckets.get(hash);
Assert.assertNotNull("No bucket found for hash " + hash, bucket);
boolean allDeleted = keys.stream().allMatch(k -> k.getOffset() == NO_OFFSET);
Assert.assertNotEquals("Only expecting inexistent bucket when all its keys are deleted " + hash, allDeleted, bucket.exists());
val bucketOffsets = w.getBucketOffsets(segment, bucket, timer).join();
// Verify that we didn't return too many or too few keys.
if (allDeleted) {
Assert.assertEquals("Not expecting any offsets to be returned for bucket: " + hash, 0, bucketOffsets.size());
} else {
AssertExtensions.assertGreaterThan("Expected at least one offset to be returned for bucket: " + hash, 0, bucketOffsets.size());
existentBucketCount++;
}
AssertExtensions.assertLessThanOrEqual("Too many offsets returned for bucket: " + hash, keys.size(), bucketOffsets.size());
// Verify returned keys are as expected.
for (int i = 0; i < bucketOffsets.size(); i++) {
long actualOffset = bucketOffsets.get(i);
long expectedOffset = keys.get(i).getOffset();
String id = String.format("{%s[%s]}", hash, i);
// In this loop, we do not expect to have Deleted Keys. If our Expected Offset indicates this key should
// have been deleted, then getBucketOffsets() should not have returned this.
Assert.assertNotEquals("Expecting a deleted key but found existing one: " + id, NO_OFFSET, expectedOffset);
Assert.assertEquals("Unexpected key offset in bucket " + id, expectedOffset, actualOffset);
}
if (bucketOffsets.size() < keys.size()) {
val prevKeyOffset = keys.get(bucketOffsets.size()).getOffset();
Assert.assertEquals("Missing key from bucket " + hash, NO_OFFSET, prevKeyOffset);
}
}
checkEntryCount(existingKeysByOffset.size(), segment);
checkBucketCount(existentBucketCount, segment);
}
Aggregations