use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class IndexTest method findEntriesSinceOneByOneTest.
/**
* Uses {@link PersistentIndex#findEntriesSince(FindToken, long)} to get entries one by one.
* @throws StoreException
*/
private void findEntriesSinceOneByOneTest() throws StoreException {
Offset journalStartOffset = state.index.journal.getFirstOffset();
StoreFindToken startToken = new StoreFindToken();
Offset stoppedAt = null;
for (Map.Entry<Offset, TreeMap<MockId, TreeSet<IndexValue>>> indexEntry : state.referenceIndex.entrySet()) {
Offset indexSegmentStartOffset = indexEntry.getKey();
// 2. The size of entries being obtained is <= the size of records in the current index segment
if (indexSegmentStartOffset.compareTo(journalStartOffset) >= 0) {
stoppedAt = indexSegmentStartOffset;
break;
}
IndexSegment segmentOfToken = state.index.getIndexSegments().get(indexSegmentStartOffset);
for (Map.Entry<MockId, TreeSet<IndexValue>> indexSegmentEntry : indexEntry.getValue().entrySet()) {
MockId id = indexSegmentEntry.getKey();
StoreFindToken expectedEndToken = new StoreFindToken(id, indexSegmentStartOffset, state.sessionId, state.incarnationId, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(state.index.getAbsolutePositionInLogForOffset(indexSegmentStartOffset));
doFindEntriesSinceTest(startToken, getSizeOfAllValues(indexSegmentEntry.getValue()), Collections.singleton(id), expectedEndToken);
startToken = expectedEndToken;
}
}
Map.Entry<Offset, Pair<MockId, CuratedLogIndexState.LogEntry>> logEntry = state.logOrder.floorEntry(stoppedAt);
while (logEntry != null) {
Offset startOffset = logEntry.getKey();
MockId id = logEntry.getValue().getFirst();
// size returned is the size of the most recent record
long size = state.getExpectedValue(id, EnumSet.allOf(PersistentIndex.IndexEntryType.class), null).getSize();
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(startOffset).getValue();
StoreFindToken expectedEndToken = new StoreFindToken(startOffset, state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
Offset endOffset = state.log.getFileSpanForMessage(startOffset, size).getEndOffset();
expectedEndToken.setBytesRead(state.index.getAbsolutePositionInLogForOffset(endOffset));
doFindEntriesSinceTest(startToken, size, Collections.singleton(id), expectedEndToken);
startToken = expectedEndToken;
logEntry = state.logOrder.higherEntry(logEntry.getKey());
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class CuratedLogIndexState method forceAddPutEntry.
/**
* Add an existing Put IndexEntry forcely to index to create duplicate PUTs in index.
* @param id The {@link MockId} of this duplicate put.
* @param value The {@link IndexValue} of this duplicate put.
* @param bytes The content of this duplicate put.
* @throws StoreException
*/
void forceAddPutEntry(MockId id, IndexValue value, byte[] bytes) throws StoreException {
if (!value.isPut()) {
throw new IllegalArgumentException("Value has to be a put: " + value);
}
Offset endOffsetOfPrevMsg = index.getCurrentEndOffset();
ByteBuffer buffer = ByteBuffer.wrap(bytes);
ReadableByteChannel channel = Channels.newChannel(new ByteBufferInputStream(buffer));
log.appendFrom(channel, buffer.capacity());
FileSpan fileSpan = log.getFileSpanForMessage(endOffsetOfPrevMsg, bytes.length);
Offset indexSegmentStartOffset = generateReferenceIndexSegmentStartOffset(fileSpan.getStartOffset());
if (!referenceIndex.containsKey(indexSegmentStartOffset)) {
// rollover will occur
advanceTime(DELAY_BETWEEN_LAST_MODIFIED_TIMES_MS);
referenceIndex.put(indexSegmentStartOffset, new TreeMap<>());
}
IndexValue newValue = new IndexValue(value);
newValue.setNewOffset(fileSpan.getStartOffset());
IndexEntry entry = new IndexEntry(id, newValue);
logOrder.put(fileSpan.getStartOffset(), new Pair<>(id, new LogEntry(bytes, value)));
allKeys.computeIfAbsent(id, k -> new TreeSet<>()).add(value);
referenceIndex.get(indexSegmentStartOffset).computeIfAbsent(id, k -> new TreeSet<>()).add(value);
long expiresAtMs = value.getExpiresAtMs();
if (expiresAtMs != Utils.Infinite_Time && expiresAtMs < time.milliseconds()) {
expiredKeys.add(id);
} else {
liveKeys.add(id);
}
index.addToIndex(Collections.singletonList(entry), fileSpan);
lastModifiedTimesInSecs.put(indexSegmentStartOffset, value.getOperationTimeInMs() / Time.MsPerSec);
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class IndexTest method findDeletedEntriesSinceOneByOneTest.
/**
* Uses {@link PersistentIndex#findDeletedEntriesSince(FindToken, long, long)} to get entries one by one.
* @throws StoreException
*/
private void findDeletedEntriesSinceOneByOneTest() throws StoreException {
Offset journalStartOffset = state.index.journal.getFirstOffset();
StoreFindToken startToken = new StoreFindToken();
Offset stoppedAt = null;
for (Map.Entry<Offset, TreeMap<MockId, TreeSet<IndexValue>>> indexEntry : state.referenceIndex.entrySet()) {
Offset indexSegmentStartOffset = indexEntry.getKey();
// 2. The size of entries being obtained is <= the size of records in the current index segment
if (indexSegmentStartOffset.compareTo(journalStartOffset) >= 0) {
stoppedAt = indexSegmentStartOffset;
break;
}
IndexSegment segmentOfToken = state.index.getIndexSegments().get(indexSegmentStartOffset);
for (Map.Entry<MockId, TreeSet<IndexValue>> indexSegmentEntry : indexEntry.getValue().entrySet()) {
MockId id = indexSegmentEntry.getKey();
boolean isDeleted = indexSegmentEntry.getValue().last().isDelete() && state.getExpectedValue(indexSegmentEntry.getKey(), EnumSet.of(PersistentIndex.IndexEntryType.UNDELETE), null) == null;
StoreFindToken expectedEndToken = new StoreFindToken(id, indexSegmentStartOffset, state.sessionId, state.incarnationId, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
long size = getSizeOfAllValues(indexSegmentEntry.getValue());
doFindDeletedEntriesSinceTest(startToken, size, isDeleted ? Collections.singleton(id) : Collections.emptySet(), expectedEndToken);
startToken = expectedEndToken;
}
}
Map.Entry<Offset, Pair<MockId, CuratedLogIndexState.LogEntry>> logEntry = state.logOrder.floorEntry(stoppedAt);
while (logEntry != null) {
Offset startOffset = logEntry.getKey();
MockId id = logEntry.getValue().getFirst();
IndexValue value = state.getExpectedValue(id, false);
boolean isDeleted = value.isDelete() && state.getExpectedValue(id, EnumSet.of(PersistentIndex.IndexEntryType.UNDELETE), null) == null;
// size returned is the size of the delete if the key has been deleted.
long size = value.getSize();
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(startOffset).getValue();
StoreFindToken expectedEndToken = new StoreFindToken(startOffset, state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
doFindDeletedEntriesSinceTest(startToken, size, isDeleted ? Collections.singleton(id) : Collections.emptySet(), expectedEndToken);
startToken = expectedEndToken;
logEntry = state.logOrder.higherEntry(logEntry.getKey());
}
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class StorageManagerTest method createFilesAndDirsAtPath.
// unrecognizedDirsOnDiskTest() helpers
/**
* Creates {@code fileCount} files and {@code dirCount} directories at {@code dir}.
* @param dir the directory to create the files and dirs at
* @param fileCount the number of files to be created
* @param dirCount the number of directories to be created
* @return the list of files,dirs created as a pair.
* @throws IOException
*/
private Pair<List<File>, List<File>> createFilesAndDirsAtPath(File dir, int fileCount, int dirCount) throws IOException {
List<File> createdFiles = new ArrayList<>();
for (int i = 0; i < fileCount; i++) {
File createdFile = new File(dir, "created-file-" + i);
if (!createdFile.exists()) {
assertTrue("Could not create " + createdFile, createdFile.createNewFile());
}
createdFile.deleteOnExit();
createdFiles.add(createdFile);
}
List<File> createdDirs = new ArrayList<>();
for (int i = 0; i < dirCount; i++) {
File createdDir = new File(dir, "created-dir-" + i);
assertTrue("Could not create " + createdDir + " now", createdDir.mkdir());
createdDir.deleteOnExit();
createdDirs.add(createdDir);
}
return new Pair<>(createdFiles, createdDirs);
}
use of com.github.ambry.utils.Pair in project ambry by linkedin.
the class StoreFindTokenTest method equalityTest.
/**
* Tests the correctness of {@link StoreFindToken#equals(Object)}.
*/
@Test
public void equalityTest() {
UUID sessionId = UUID.randomUUID();
UUID incarnationId = UUID.randomUUID();
LogSegmentName logSegmentName = LogSegmentName.generateFirstSegmentName(isLogSegmented);
Offset offset = new Offset(logSegmentName, 0);
Offset otherOffset = new Offset(logSegmentName, 1);
MockId key = new MockId(TestUtils.getRandomString(10));
MockId otherKey = new MockId(TestUtils.getRandomString(10));
MockId resetKey = new MockId(TestUtils.getRandomString(10));
PersistentIndex.IndexEntryType resetKeyType = PersistentIndex.IndexEntryType.values()[random.nextInt(PersistentIndex.IndexEntryType.values().length)];
short resetKeyVersion = (short) random.nextInt(5);
StoreFindToken initToken = new StoreFindToken();
StoreFindToken otherInitToken = new StoreFindToken();
StoreFindToken indexToken = new StoreFindToken(key, offset, sessionId, incarnationId, resetKey, resetKeyType, resetKeyVersion);
StoreFindToken otherIndexToken = new StoreFindToken(key, offset, sessionId, incarnationId, resetKey, resetKeyType, resetKeyVersion);
StoreFindToken journalToken = new StoreFindToken(offset, sessionId, incarnationId, false, resetKey, resetKeyType, resetKeyVersion);
StoreFindToken otherJournalToken = new StoreFindToken(offset, sessionId, incarnationId, false, resetKey, resetKeyType, resetKeyVersion);
StoreFindToken inclusiveJournalToken = new StoreFindToken(offset, sessionId, incarnationId, true, resetKey, resetKeyType, resetKeyVersion);
StoreFindToken otherInclusiveJournalToken = new StoreFindToken(offset, sessionId, incarnationId, true, resetKey, resetKeyType, resetKeyVersion);
// equality
compareTokens(initToken, initToken);
compareTokens(initToken, otherInitToken);
compareTokens(indexToken, indexToken);
compareTokens(indexToken, otherIndexToken);
compareTokens(journalToken, journalToken);
compareTokens(journalToken, otherJournalToken);
compareTokens(inclusiveJournalToken, otherInclusiveJournalToken);
UUID newSessionId = getRandomUUID(sessionId);
UUID newIncarnationId = getRandomUUID(incarnationId);
// equality even if session IDs are different
compareTokens(indexToken, new StoreFindToken(key, offset, newSessionId, incarnationId, resetKey, resetKeyType, resetKeyVersion));
compareTokens(journalToken, new StoreFindToken(offset, newSessionId, incarnationId, false, resetKey, resetKeyType, resetKeyVersion));
// equality even if incarnation IDs are different
compareTokens(indexToken, new StoreFindToken(key, offset, sessionId, newIncarnationId, resetKey, resetKeyType, resetKeyVersion));
compareTokens(journalToken, new StoreFindToken(offset, sessionId, newIncarnationId, false, resetKey, resetKeyType, resetKeyVersion));
// inequality if some fields differ
List<Pair<StoreFindToken, StoreFindToken>> unequalPairs = new ArrayList<>();
unequalPairs.add(new Pair<>(initToken, indexToken));
unequalPairs.add(new Pair<>(initToken, journalToken));
unequalPairs.add(new Pair<>(initToken, inclusiveJournalToken));
unequalPairs.add(new Pair<>(indexToken, journalToken));
unequalPairs.add(new Pair<>(indexToken, inclusiveJournalToken));
unequalPairs.add(new Pair<>(indexToken, new StoreFindToken(key, otherOffset, sessionId, incarnationId, null, null, UNINITIALIZED_RESET_KEY_VERSION)));
unequalPairs.add(new Pair<>(indexToken, new StoreFindToken(otherKey, offset, sessionId, incarnationId, null, null, UNINITIALIZED_RESET_KEY_VERSION)));
unequalPairs.add(new Pair<>(journalToken, new StoreFindToken(otherOffset, sessionId, incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION)));
unequalPairs.add(new Pair<>(inclusiveJournalToken, journalToken));
unequalPairs.add(new Pair<>(indexToken, new StoreFindToken(key, offset, sessionId, incarnationId, resetKey, resetKeyType, UNINITIALIZED_RESET_KEY_VERSION)));
for (Pair<StoreFindToken, StoreFindToken> unequalPair : unequalPairs) {
StoreFindToken first = unequalPair.getFirst();
StoreFindToken second = unequalPair.getSecond();
assertFalse("StoreFindTokens [" + first + "] and [" + second + "] should not be equal", unequalPair.getFirst().equals(unequalPair.getSecond()));
}
}
Aggregations