use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testRecoveryEmptyMergeOperation.
/**
* Tests a scenario where a MergeSegmentOperation needs to be recovered but which has already been merged in Storage.
*/
@Test
public void testRecoveryEmptyMergeOperation() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a parent segment and one transaction segment.
context.segmentAggregator.initialize(TIMEOUT).join();
// Part 1: When the source segment is missing from Storage, but metadata does not reflect that.
SegmentAggregator ta0 = context.transactionAggregators[0];
context.storage.create(ta0.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(ta0.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
val txn0Metadata = context.containerMetadata.getStreamSegmentMetadata(ta0.getMetadata().getId());
txn0Metadata.markSealed();
txn0Metadata.markSealedInStorage();
ta0.initialize(TIMEOUT).join();
context.storage.delete(context.storage.openWrite(txn0Metadata.getName()).join(), TIMEOUT).join();
// This is the operation that should be reconciled.
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta0.getMetadata().getId(), context));
// Verify the operation was ack-ed.
AtomicBoolean mergeAcked = new AtomicBoolean();
context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
// Part 2: When the source segment's metadata indicates it was deleted.
SegmentAggregator ta1 = context.transactionAggregators[1];
context.storage.create(ta1.getMetadata().getName(), TIMEOUT).join();
context.storage.openWrite(ta1.getMetadata().getName()).thenCompose(txnHandle -> context.storage.seal(txnHandle, TIMEOUT)).join();
val txn1Metadata = context.containerMetadata.getStreamSegmentMetadata(ta1.getMetadata().getId());
txn1Metadata.markDeleted();
// This is the operation that should be reconciled.
context.segmentAggregator.add(generateMergeTransactionAndUpdateMetadata(ta1.getMetadata().getId(), context));
// Verify the operation was ack-ed.
mergeAcked.set(false);
context.dataSource.setCompleteMergeCallback((target, source) -> mergeAcked.set(true));
context.segmentAggregator.flush(TIMEOUT).join();
// Finally, verify that all operations were ack-ed back.
Assert.assertTrue("Merge was not ack-ed for deleted source segment.", mergeAcked.get());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testEmptySegment.
/**
* Tests the ability to process and flush various Operations on empty (not yet created) Segments:
* - Append
* - Seal
* - Truncate
* - Merge (empty source or targets)
*/
@Test
public void testEmptySegment() throws Exception {
final int appendLength = DEFAULT_CONFIG.getFlushThresholdBytes();
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
val append = context.transactionAggregators[0];
val seal = context.transactionAggregators[1];
val truncate = context.transactionAggregators[2];
// Empty SOURCE to non-empty target.
val mergeEmptySource = context.transactionAggregators[3];
// Empty source to non-empty TARGET.
val mergeEmptySourceTarget = context.transactionAggregators[4];
// Empty/non-empty source to empty TARGET.
val mergeEmptyTarget = context.transactionAggregators[5];
// Empty SOURCE to empty target.
val mergeEmptyTargetEmptySource = context.transactionAggregators[6];
// Non-empty SOURCE to empty target.
val mergeEmptyTargetNonEmptySource = context.transactionAggregators[7];
val allAggregators = new SegmentAggregator[] { append, seal, truncate, mergeEmptySource, mergeEmptySourceTarget, mergeEmptyTarget, mergeEmptyTargetEmptySource, mergeEmptyTargetNonEmptySource };
val finalAggregators = new SegmentAggregator[] { append, seal, truncate, mergeEmptySourceTarget, mergeEmptyTarget };
// Create zero-length segment which will be used as a target of a merge with a zero-length, not-yet-created segment.
context.storage.create(mergeEmptySourceTarget.getMetadata().getName(), TIMEOUT).join();
// Create a non-zero-length segment which will be used to merge into empty target.
val nonEmptySourceMetadata = (UpdateableSegmentMetadata) mergeEmptyTargetNonEmptySource.getMetadata();
nonEmptySourceMetadata.setLength(1L);
nonEmptySourceMetadata.setStorageLength(1L);
nonEmptySourceMetadata.markSealed();
context.storage.create(nonEmptySourceMetadata.getName(), TIMEOUT).thenCompose(v -> context.storage.openWrite(mergeEmptyTargetNonEmptySource.getMetadata().getName())).thenCompose(handle -> context.storage.write(handle, 0L, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT).thenCompose(v -> context.storage.seal(handle, TIMEOUT))).join();
// Initialize all the aggregators now, before adding operations for processing.
for (val a : allAggregators) {
a.initialize(TIMEOUT).join();
}
// Append on empty segment.
append.add(generateAppendAndUpdateMetadata(append.getMetadata().getId(), new byte[appendLength], context));
append.flush(TIMEOUT).join();
Assert.assertEquals("Unexpected segment length after first write.", appendLength, context.storage.getStreamSegmentInfo(append.getMetadata().getName(), TIMEOUT).join().getLength());
// Seal on empty segment.
seal.add(generateSealAndUpdateMetadata(seal.getMetadata().getId(), context));
seal.flush(TIMEOUT).join();
Assert.assertTrue("Unexpected Metadata.isSealedInStorage after seal.", seal.getMetadata().isSealedInStorage());
Assert.assertFalse("Not expecting segment to have been created in storage after seal.", context.storage.exists(seal.getMetadata().getName(), TIMEOUT).join());
// Truncate on empty segment (a no-op).
truncate.add(generateTruncateAndUpdateMetadata(truncate.getMetadata().getId(), context));
truncate.flush(TIMEOUT).join();
Assert.assertFalse("Not expecting segment to have been created in storage after truncate.", context.storage.exists(truncate.getMetadata().getName(), TIMEOUT).join());
// Merge a zero-length, not-yet-created segment into a zero-length, created segment.
mergeEmptySourceTarget.add(generateMergeTransactionAndUpdateMetadata(mergeEmptySourceTarget.getMetadata().getId(), mergeEmptySource.getMetadata().getId(), context));
mergeEmptySourceTarget.flush(TIMEOUT).join();
Assert.assertFalse("Merge source was created for initially empty segment.", context.storage.exists(mergeEmptySource.getMetadata().getName(), TIMEOUT).join());
Assert.assertEquals("Unexpected length of pre-existing target segment after merge with empty segment.", 0, context.storage.getStreamSegmentInfo(mergeEmptySourceTarget.getMetadata().getName(), TIMEOUT).join().getLength());
Assert.assertTrue("Unexpected Metadata.IsDeletedInStorage for empty source", mergeEmptySource.getMetadata().isDeletedInStorage());
// Merge an empty source into an empty target.
mergeEmptyTarget.add(generateMergeTransactionAndUpdateMetadata(mergeEmptyTarget.getMetadata().getId(), mergeEmptyTargetEmptySource.getMetadata().getId(), context));
mergeEmptyTarget.flush(TIMEOUT).join();
Assert.assertFalse("Merge source was created for initially empty segment.", context.storage.exists(mergeEmptyTargetEmptySource.getMetadata().getName(), TIMEOUT).join());
Assert.assertFalse("Merge target was created for initially empty segment.", context.storage.exists(mergeEmptyTarget.getMetadata().getName(), TIMEOUT).join());
Assert.assertTrue("Unexpected Metadata.IsDeletedInStorage for empty source", mergeEmptyTargetEmptySource.getMetadata().isDeletedInStorage());
// Merge a non-empty source segment into an empty target.
mergeEmptyTarget.add(generateMergeTransactionAndUpdateMetadata(mergeEmptyTarget.getMetadata().getId(), mergeEmptyTargetNonEmptySource.getMetadata().getId(), context));
mergeEmptyTarget.flush(TIMEOUT).join();
Assert.assertFalse("Merge source still exists for initially non-empty segment.", context.storage.exists(mergeEmptyTargetNonEmptySource.getMetadata().getName(), TIMEOUT).join());
Assert.assertEquals("Unexpected length of target segment after merge with non-empty segment.", 1, context.storage.getStreamSegmentInfo(mergeEmptyTarget.getMetadata().getName(), TIMEOUT).join().getLength());
Assert.assertTrue("Unexpected Metadata.IsDeletedInStorage for empty source", mergeEmptyTargetNonEmptySource.getMetadata().isDeletedInStorage());
// Finally, check that everything was marked as flushed out of the aggregators.
for (val a : finalAggregators) {
Assert.assertFalse("Unexpected mustFlush() after flush", a.mustFlush());
AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testReconcileDelete.
/**
* Tests the ability to reconcile a {@link DeleteSegmentOperation} on Segments in various states:
* - Empty (not yet created).
* - Empty (created, but no data).
* - Not empty, not sealed.
* - Sealed (empty or not).
*
* Reconciling a {@link DeleteSegmentOperation} is different from any other operation. Even if there are other
* operations to reconcile, the simple presence of a Delete will bypass any other one and simply delete the segment.
*/
@Test
public void testReconcileDelete() throws Exception {
final int appendLength = DEFAULT_CONFIG.getFlushThresholdBytes();
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
val notExistsWithAppend = context.transactionAggregators[0];
val existsWithAppend = context.transactionAggregators[1];
val existsWithSeal = context.transactionAggregators[2];
val allAggregators = new SegmentAggregator[] { notExistsWithAppend, existsWithAppend, existsWithSeal };
for (val a : allAggregators) {
// Create the segment, and add 1 byte to it. This will cause initialize() to not treat it as empty.
context.storage.create(a.getMetadata().getName(), TIMEOUT).thenCompose(v -> context.storage.openWrite(a.getMetadata().getName())).thenCompose(handle -> {
((UpdateableSegmentMetadata) a.getMetadata()).setLength(1L);
((UpdateableSegmentMetadata) a.getMetadata()).setStorageLength(1L);
return context.storage.write(handle, 0, new ByteArrayInputStream(new byte[] { 1 }), 1, TIMEOUT);
}).thenCompose(v -> a.initialize(TIMEOUT)).join();
// Add enough data to trigger a flush.
a.add(generateAppendAndUpdateMetadata(a.getMetadata().getId(), new byte[appendLength], context));
if (a == existsWithSeal) {
// Add a Seal for that segment that should be sealed.
a.add(generateSealAndUpdateMetadata(existsWithSeal.getMetadata().getId(), context));
}
// Delete the Segment from Storage.
Futures.exceptionallyExpecting(context.storage.openWrite(a.getMetadata().getName()).thenCompose(handle -> context.storage.delete(handle, TIMEOUT)), ex -> ex instanceof StreamSegmentNotExistsException, null).join();
Assert.assertTrue("Unexpected value from mustFlush() before first flush().", a.mustFlush());
// First attempt should fail.
AssertExtensions.assertSuppliedFutureThrows("First invocation of flush() should fail.", () -> a.flush(TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
Assert.assertTrue("Unexpected value from mustFlush() after failed flush().", a.mustFlush());
// Add the DeleteSegmentOperation - this should cause reconciliation to succeed.
a.add(generateDeleteAndUpdateMetadata(a.getMetadata().getId(), context));
a.flush(TIMEOUT).join();
Assert.assertFalse("Unexpected value from mustFlush() after Deletion.", a.mustFlush());
AssertExtensions.assertLessThan("Unexpected LUSN after flush.", 0, a.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Unexpected value from isDeleted() after Deletion.", a.getMetadata().isDeleted());
Assert.assertTrue("Unexpected value from isDeletedInStorage() after Deletion.", a.getMetadata().isDeletedInStorage());
// Verify that no segment exists in Storage after the flush.
boolean existsInStorage = context.storage.exists(a.getMetadata().getName(), TIMEOUT).join();
Assert.assertFalse("Segment still exists in Storage after Deletion.", existsInStorage);
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class TableServiceTests method check.
private void check(HashMap<BufferView, EntryData> keyInfo, TableStore tableStore) throws Exception {
val bySegment = keyInfo.entrySet().stream().collect(Collectors.groupingBy(e -> e.getValue().segmentName));
// Check inexistent keys.
val searchFutures = new ArrayList<CompletableFuture<List<TableEntry>>>();
val iteratorFutures = new ArrayList<CompletableFuture<List<TableEntry>>>();
// Delta Iteration does not support fixed-key-length TableSegments.
val unsortedIteratorFutures = new ArrayList<CompletableFuture<List<TableEntry>>>();
val offsetIteratorFutures = new ArrayList<CompletableFuture<List<IteratorItem<TableEntry>>>>();
val expectedResult = new ArrayList<Map.Entry<BufferView, EntryData>>();
for (val e : bySegment.entrySet()) {
String segmentName = e.getKey();
boolean fixedKeyLength = isFixedKeyLength(segmentName);
val info = tableStore.getInfo(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals(segmentName, info.getName());
AssertExtensions.assertGreaterThan("Unexpected length for " + segmentName, 0, info.getLength());
val expectedKeyLength = isFixedKeyLength(segmentName) ? getFixedKeyLength(segmentName) : 0;
Assert.assertEquals("Unexpected key length for " + segmentName, expectedKeyLength, info.getKeyLength());
Assert.assertEquals(fixedKeyLength, info.getType().isFixedKeyLengthTableSegment());
val keys = new ArrayList<BufferView>();
for (val se : e.getValue()) {
keys.add(se.getKey());
expectedResult.add(se);
}
searchFutures.add(tableStore.get(segmentName, keys, TIMEOUT));
CompletableFuture<List<TableEntry>> entryIteratorFuture = tableStore.entryIterator(segmentName, IteratorArgs.builder().fetchTimeout(TIMEOUT).build()).thenCompose(ei -> {
val result = new ArrayList<TableEntry>();
return ei.forEachRemaining(i -> result.addAll(i.getEntries()), executorService()).thenApply(v -> {
if (fixedKeyLength) {
checkSortedOrder(result);
}
return result;
});
});
iteratorFutures.add(entryIteratorFuture);
if (!fixedKeyLength) {
unsortedIteratorFutures.add(entryIteratorFuture);
// For simplicity, always start from beginning of TableSegment.
offsetIteratorFutures.add(tableStore.entryDeltaIterator(segmentName, 0L, TIMEOUT).thenCompose(ei -> {
val result = new ArrayList<IteratorItem<TableEntry>>();
return ei.forEachRemaining(result::add, executorService()).thenApply(v -> result);
}));
}
}
// Check search results.
val actualResults = Futures.allOfWithResults(searchFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).stream().flatMap(List::stream).collect(Collectors.toList());
Assert.assertEquals("Unexpected number of search results.", expectedResult.size(), actualResults.size());
for (int i = 0; i < expectedResult.size(); i++) {
val expectedKey = expectedResult.get(i).getKey();
val expectedEntry = expectedResult.get(i).getValue();
val actual = actualResults.get(i);
if (expectedEntry.isDeleted()) {
// Deleted keys will be returned as nulls.
if (actual != null) {
val r2 = tableStore.get(expectedEntry.segmentName, Collections.singletonList(expectedKey), TIMEOUT).join();
}
Assert.assertNull("Not expecting a value for a deleted Key ", actual);
} else {
Assert.assertEquals("Unexpected value for non-deleted Key.", expectedEntry.getValue(), actual.getValue());
Assert.assertEquals("Unexpected key for non-deleted Key.", expectedKey, actual.getKey().getKey());
Assert.assertEquals("Unexpected TableKey.Version for non-deleted Key.", expectedEntry.getVersion(), actual.getKey().getVersion());
}
}
// Check iterator results. We sort it (and actualResults) by Version/Offset to ease the comparison.
val actualIteratorResults = Futures.allOfWithResults(iteratorFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).stream().flatMap(List::stream).sorted(Comparator.comparingLong(e -> e.getKey().getVersion())).collect(Collectors.toList());
val expectedIteratorResults = actualResults.stream().filter(Objects::nonNull).sorted(Comparator.comparingLong(e -> e.getKey().getVersion())).collect(Collectors.toList());
// These lists are used to compare non-delta based iteration with delta based iteration.
val actualUnsortedIteratorResults = Futures.allOfWithResults(unsortedIteratorFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).stream().flatMap(List::stream).sorted(Comparator.comparingLong(e -> e.getKey().getVersion())).collect(Collectors.toList());
val expectedUnsortedIteratorResults = actualUnsortedIteratorResults.stream().filter(Objects::nonNull).sorted(Comparator.comparingLong(e -> e.getKey().getVersion())).collect(Collectors.toList());
val actualOffsetIteratorList = Futures.allOfWithResults(offsetIteratorFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).stream().flatMap(List::stream).collect(Collectors.toList());
val actualOffsetIteratorResults = processDeltaIteratorItems(actualOffsetIteratorList).stream().sorted(Comparator.comparingLong(e -> e.getKey().getVersion())).collect(Collectors.toList());
AssertExtensions.assertListEquals("Unexpected result from entryIterator().", expectedIteratorResults, actualIteratorResults, TableEntry::equals);
for (val entry : expectedUnsortedIteratorResults) {
Assert.assertNotNull("Missing expected TableEntry from deltaEntryIterator()", actualOffsetIteratorResults.contains(entry));
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class StreamSegmentContainerTests method testTableSegmentReadAfterCompactionAndRecovery.
/**
* Tests a non-trivial scenario in which ContainerKeyIndex may be tail-caching a stale version of a key if the
* following conditions occur:
* 1. StorageWriter processes values v0...vn for k1 and {@link WriterTableProcessor} indexes them.
* 2. As a result of {@link WriterTableProcessor} activity, the last value vn for k1 is moved to the tail of the Segment.
* 3. While TableCompactor works, a new PUT operation is appended to the Segment with new value vn+1 for k1.
* 4. At this point, the StorageWriter stops its progress and the container restarts without processing neither the
* new value vn+1 nor the compacted value vn for k1.
* 5. A subsequent restart will trigger the tail-caching from the last indexed offset, which points to vn+1.
* 6. The bug, which consists of the tail-caching process not taking care of table entry versions, would overwrite
* vn+1 with vn, just because it has a higher offset as it was written later in the Segment.
*/
@Test
public void testTableSegmentReadAfterCompactionAndRecovery() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, NO_TRUNCATIONS_DURABLE_LOG_CONFIG, DEFAULT_WRITER_CONFIG, null);
val durableLog = new AtomicReference<OperationLog>();
val durableLogFactory = new WatchableOperationLogFactory(context.operationLogFactory, durableLog::set);
// Data size and count to be written in this test.
int serializedEntryLength = 28;
int writtenEntries = 7;
@Cleanup StreamSegmentContainer container = new StreamSegmentContainer(CONTAINER_ID, DEFAULT_CONFIG, durableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
container.startAsync().awaitRunning();
Assert.assertNotNull(durableLog.get());
val tableStore = container.getExtension(ContainerTableExtension.class);
// 1. Create the Table Segment and get a DirectSegmentAccess to it to monitor its size.
String tableSegmentName = getSegmentName(0) + "_Table";
val type = SegmentType.builder(getSegmentType(tableSegmentName)).tableSegment().build();
tableStore.createSegment(tableSegmentName, type, TIMEOUT).join();
DirectSegmentAccess directTableSegment = container.forSegment(tableSegmentName, TIMEOUT).join();
// 2. Add some entries to the table segments. Note tha we write multiple values to each key, so the TableCompactor
// can find entries to move to the tail.
final BiFunction<String, Integer, TableEntry> createTableEntry = (key, value) -> TableEntry.unversioned(new ByteArraySegment(key.getBytes()), new ByteArraySegment(String.format("Value_%s", value).getBytes()));
// 3. This callback will run when the StorageWriter writes data to Storage. At this point, StorageWriter would
// have completed its first iteration, so it is the time to add a new value for key1 while TableCompactor is working.
val compactedEntry = List.of(TableEntry.versioned(new ByteArraySegment("key1".getBytes(StandardCharsets.UTF_8)), new ByteArraySegment("3".getBytes(StandardCharsets.UTF_8)), serializedEntryLength * 2L));
// Simulate that Table Compactor moves [k1, 3] to the tail of the Segment as a result of compacting the first 4 entries.
val compactedEntryUpdate = EntrySerializerTests.generateUpdateWithExplicitVersion(compactedEntry);
CompletableFuture<Void> callbackExecuted = new CompletableFuture<>();
context.storageFactory.getPostWriteCallback().set((segmentHandle, offset) -> {
if (segmentHandle.getSegmentName().contains("Segment_0_Table$attributes.index") && !callbackExecuted.isDone()) {
// New PUT with the newest value.
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key1", 4)), TIMEOUT)).join();
// Simulates a compacted entry append performed by Table Compactor.
directTableSegment.append(compactedEntryUpdate, null, TIMEOUT).join();
callbackExecuted.complete(null);
}
});
// Do the actual puts.
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key1", 1)), TIMEOUT)).join();
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key1", 2)), TIMEOUT)).join();
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key1", 3)), TIMEOUT)).join();
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key2", 1)), TIMEOUT)).join();
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key2", 2)), TIMEOUT)).join();
Futures.toVoid(tableStore.put(tableSegmentName, Collections.singletonList(createTableEntry.apply("key2", 3)), TIMEOUT)).join();
// 4. Above, the test does 7 puts, each one 28 bytes in size (6 entries directly, 1 via callback). Now, we need
// to wait for the TableCompactor writing the entry (key1, 3) to the tail of the Segment.
callbackExecuted.join();
AssertExtensions.assertEventuallyEquals(true, () -> directTableSegment.getInfo().getLength() > (long) serializedEntryLength * writtenEntries, 5000);
// 5. The TableCompactor has moved the entry, so we immediately stop the container to prevent StorageWriter from
// making more progress.
container.close();
// 6. Create a new container instance that will recover from existing data.
@Cleanup val container2 = new StreamSegmentContainer(CONTAINER_ID, DEFAULT_CONFIG, durableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
container2.startAsync().awaitRunning();
// 7. Verify that (key1, 4) is the actual value after performing the tail-caching process, which now takes care
// of entry versions.
val expected = createTableEntry.apply("key1", 4);
val tableStore2 = container2.getExtension(ContainerTableExtension.class);
val actual = tableStore2.get(tableSegmentName, Collections.singletonList(expected.getKey().getKey()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(0);
Assert.assertEquals(actual.getKey().getKey(), expected.getKey().getKey());
Assert.assertEquals(actual.getValue(), expected.getValue());
}
Aggregations