use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findEntriesSinceOnCrashRestartTest.
/**
* Tests behaviour of {@link PersistentIndex#findEntriesSince(FindToken, long)} on crash restart for the following
* scenario
* After restart, lets say no new writes have gone into the store.
* For a findEntriesSince(offset beyond logEndOffsetOnStartup with different session id) call, index will reset the
* token to logEndOffsetOnStartup and returns the same.
* On the subsequent findEntriesSince() call, the index should start returning entries
* starting from that offset and should not consider that token as non-inclusive
* @throws StoreException
*/
@Test
public void findEntriesSinceOnCrashRestartTest() throws StoreException {
UUID oldSessionId = state.sessionId;
state.addPutEntries(3, CuratedLogIndexState.PUT_RECORD_SIZE, Utils.Infinite_Time);
state.reloadIndex(true, true);
// create a token that will be past the log end offset on start up after restart.
StoreFindToken startToken = new StoreFindToken(new Offset(state.log.getEndOffset().getName(), (state.log.getEndOffset().getOffset() + (2 * CuratedLogIndexState.PUT_RECORD_SIZE))), oldSessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
// end token should point to log end offset on startup
long bytesRead = state.index.getAbsolutePositionInLogForOffset(state.log.getEndOffset());
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(state.log.getEndOffset()).getValue();
StoreFindToken expectedEndToken = new StoreFindToken(state.log.getEndOffset(), state.sessionId, state.incarnationId, true, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(bytesRead);
// Fetch the FindToken returned from findEntriesSince
FindInfo findInfo = state.index.findEntriesSince(startToken, Long.MAX_VALUE);
assertEquals("EndToken mismatch ", expectedEndToken, findInfo.getFindToken());
assertEquals("No entries should have been returned ", 0, findInfo.getMessageEntries().size());
// add 2 entries to index and log
Set<MockId> expectedEntries = new HashSet<>();
state.addPutEntries(1, CuratedLogIndexState.PUT_RECORD_SIZE, Utils.Infinite_Time);
expectedEntries.add(state.logOrder.lastEntry().getValue().getFirst());
state.addPutEntries(1, CuratedLogIndexState.PUT_RECORD_SIZE, Utils.Infinite_Time);
expectedEntries.add(state.logOrder.lastEntry().getValue().getFirst());
bytesRead = state.index.getAbsolutePositionInLogForOffset(state.index.getCurrentEndOffset());
segmentOfToken = state.index.getIndexSegments().floorEntry(state.index.journal.getLastOffset()).getValue();
expectedEndToken = new StoreFindToken(state.index.journal.getLastOffset(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(bytesRead);
doFindEntriesSinceTest((StoreFindToken) findInfo.getFindToken(), Long.MAX_VALUE, expectedEntries, expectedEndToken);
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findEntriesSinceTtlUpdateAndPutInJournalTest.
/**
* Test the refinements to the size limit logic for cases where a journal query returns both a put entry and a TTL
* update entry. In these cases, the put entry size will be counted against the size limit since the index assumes
* that the replicator probably wants to fetch the original blob as well as the TTL update.
* @throws StoreException
*/
private void findEntriesSinceTtlUpdateAndPutInJournalTest() throws StoreException {
state.closeAndClearIndex();
state.properties.setProperty("store.index.max.number.of.inmem.elements", "10");
state.reloadIndex(false, false);
long expiresAtMs = state.time.milliseconds() + TimeUnit.HOURS.toMillis(1);
List<IndexEntry> putEntries = state.addPutEntries(4, PUT_RECORD_SIZE, expiresAtMs);
state.makePermanent((MockId) putEntries.get(0).getKey(), false);
state.makePermanent((MockId) putEntries.get(1).getKey(), false);
StoreFindToken startToken = new StoreFindToken();
Offset tokenOffset = putEntries.get(0).getValue().getOffset();
StoreFindToken expectedEndToken = getExpectedJournalEndToken(tokenOffset, putEntries.get(0).getKey());
Set<MockId> expectedKeys = Collections.singleton((MockId) putEntries.get(0).getKey());
// only one ID should be returned since the put size should be counted against the size limit
doFindEntriesSinceTest(startToken, TTL_UPDATE_RECORD_SIZE * 2, expectedKeys, expectedEndToken);
startToken = expectedEndToken;
tokenOffset = putEntries.get(1).getValue().getOffset();
expectedEndToken = getExpectedJournalEndToken(tokenOffset, putEntries.get(1).getKey());
expectedKeys = Collections.singleton((MockId) putEntries.get(1).getKey());
// only one ID should be returned since the put size should be counted against the size limit
doFindEntriesSinceTest(startToken, TTL_UPDATE_RECORD_SIZE * 2, expectedKeys, expectedEndToken);
startToken = expectedEndToken;
// the last key is the TTL update for the second blob
expectedEndToken = getExpectedJournalEndToken(state.logOrder.lastKey(), putEntries.get(1).getKey());
expectedKeys = putEntries.stream().map(entry -> (MockId) entry.getKey()).collect(Collectors.toSet());
// expect the last two put entries and the two TTL update journal entries to be processed
doFindEntriesSinceTest(startToken, 2 * PUT_RECORD_SIZE + 2 * TTL_UPDATE_RECORD_SIZE, expectedKeys, expectedEndToken);
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findEntriesSinceInEmptyIndexTest.
// findEntriesSinceTest(), findDeletedEntriesSinceTest() and findEntriesSinceOnRestartTest() helpers
/**
* Tests the case where {@link PersistentIndex#findEntriesSince(FindToken, long)} or
* {@link PersistentIndex#findDeletedEntriesSince(FindToken, long, long)} is run on an empty index.
* @param deletedEntries if {@code true}, the test is on
* {@link PersistentIndex#findDeletedEntriesSince(FindToken, long, long)}.
* @throws StoreException
*/
private void findEntriesSinceInEmptyIndexTest(boolean deletedEntries) throws StoreException {
state.closeAndClearIndex();
state.reloadIndex(true, false);
StoreFindToken token = new StoreFindToken();
token.setBytesRead(0);
if (deletedEntries) {
doFindDeletedEntriesSinceTest(token, Long.MAX_VALUE, Collections.emptySet(), token);
} else {
doFindEntriesSinceTest(token, Long.MAX_VALUE, Collections.emptySet(), token);
}
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findEntriesSinceOnRestartTest.
/**
* Tests behaviour of {@link PersistentIndex#findEntriesSince(FindToken, long)} on crash-restart of index and some
* recovery. Specifically tests cases where tokens have been handed out before the "crash" failure.
* @throws IOException
* @throws StoreException
*/
@Test
public void findEntriesSinceOnRestartTest() throws IOException, StoreException {
Offset lastRecordOffset = state.index.journal.getLastOffset();
state.appendToLog(2 * CuratedLogIndexState.PUT_RECORD_SIZE);
// this record will be recovered.
FileSpan firstRecordFileSpan = state.log.getFileSpanForMessage(state.index.getCurrentEndOffset(), CuratedLogIndexState.PUT_RECORD_SIZE);
// this record will not be recovered.
FileSpan secondRecordFileSpan = state.log.getFileSpanForMessage(firstRecordFileSpan.getEndOffset(), CuratedLogIndexState.PUT_RECORD_SIZE);
// if there is no bad shutdown but the store token is past the index end offset, it is an error state
StoreFindToken startToken = new StoreFindToken(secondRecordFileSpan.getStartOffset(), new UUID(1, 1), state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
doFindEntriesSinceFailureTest(startToken, StoreErrorCodes.Unknown_Error);
UUID oldSessionId = state.sessionId;
final MockId newId = state.getUniqueId();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
long operationTimeMs = state.time.milliseconds();
// add to allKeys() so that doFindEntriesSinceTest() works correctly.
IndexValue putValue = new IndexValue(CuratedLogIndexState.PUT_RECORD_SIZE, firstRecordFileSpan.getStartOffset(), Utils.Infinite_Time, operationTimeMs, accountId, containerId);
state.allKeys.computeIfAbsent(newId, k -> new TreeSet<>()).add(putValue);
state.recovery = (read, startOffset, endOffset, factory) -> Collections.singletonList(new MessageInfo(newId, CuratedLogIndexState.PUT_RECORD_SIZE, accountId, containerId, operationTimeMs));
state.reloadIndex(true, true);
// If there is no incarnationId in the incoming token, for backwards compatibility purposes we consider it as valid
// and proceed with session id validation and so on.
UUID[] incarnationIds = new UUID[] { state.incarnationId, null };
for (UUID incarnationIdToTest : incarnationIds) {
long bytesRead = state.index.getAbsolutePositionInLogForOffset(firstRecordFileSpan.getEndOffset());
// create a token that will be past the index end offset on startup after recovery.
if (incarnationIdToTest == null) {
startToken = getTokenWithNullIncarnationId(new StoreFindToken(secondRecordFileSpan.getEndOffset(), oldSessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION));
assertNull("IncarnationId is expected to be null ", startToken.getIncarnationId());
} else {
startToken = new StoreFindToken(secondRecordFileSpan.getEndOffset(), oldSessionId, incarnationIdToTest, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
}
// token should get reset internally, no keys should be returned and the returned token should be correct (offset
// in it will be the current log end offset = firstRecordFileSpan.getEndOffset()). The returned token should have correct reset key info.
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(firstRecordFileSpan.getEndOffset()).getValue();
StoreFindToken expectedEndToken = new StoreFindToken(firstRecordFileSpan.getEndOffset(), state.sessionId, state.incarnationId, true, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(bytesRead);
doFindEntriesSinceTest(startToken, Long.MAX_VALUE, Collections.emptySet(), expectedEndToken);
// create a token that is not past the index end offset on startup after recovery. Should work as expected
if (incarnationIdToTest == null) {
startToken = getTokenWithNullIncarnationId(new StoreFindToken(lastRecordOffset, oldSessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION));
assertNull("IncarnationId is expected to be null ", startToken.getIncarnationId());
} else {
startToken = new StoreFindToken(lastRecordOffset, oldSessionId, incarnationIdToTest, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
}
segmentOfToken = state.index.getIndexSegments().floorEntry(firstRecordFileSpan.getStartOffset()).getValue();
expectedEndToken = new StoreFindToken(firstRecordFileSpan.getStartOffset(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(bytesRead);
doFindEntriesSinceTest(startToken, Long.MAX_VALUE, Collections.singleton(newId), expectedEndToken);
}
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findDeletedEntriesSinceToJournalBasedTest.
/**
* Tests all cases of {@link PersistentIndex#findDeletedEntriesSince(FindToken, long, long)} that result in an journal
* based {@link StoreFindToken} being returned.
* 1. Uninitialized -> Journal
* 2. Index -> Journal
* 3. Journal -> Journal
* 4. No movement.
* @throws StoreException
*/
private void findDeletedEntriesSinceToJournalBasedTest() throws StoreException {
IndexSegment segmentOfToken = state.index.getIndexSegments().lastEntry().getValue();
StoreFindToken absoluteEndToken = new StoreFindToken(state.logOrder.lastKey(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
// ------------------
// 1. Uninitialized -> Journal
doFindDeletedEntriesSinceTest(new StoreFindToken(), Long.MAX_VALUE, state.deletedKeys, absoluteEndToken);
// ------------------
// 2. Index -> Journal
Offset secondIndexSegmentStartOffset = state.referenceIndex.higherKey(state.referenceIndex.firstKey());
// second index segment contains the first delete entry
StoreKey firstDeletedKey = getDeletedKeyFromIndexSegment(secondIndexSegmentStartOffset);
StoreFindToken startToken = new StoreFindToken(firstDeletedKey, secondIndexSegmentStartOffset, state.sessionId, state.incarnationId, null, null, UNINITIALIZED_RESET_KEY_VERSION);
Set<MockId> expectedKeys = new HashSet<>(state.deletedKeys);
expectedKeys.remove(firstDeletedKey);
doFindDeletedEntriesSinceTest(startToken, Long.MAX_VALUE, expectedKeys, absoluteEndToken);
// ------------------
// 3. Journal -> Journal
// a. Token no longer in journal
startToken = new StoreFindToken(state.getExpectedValue((MockId) firstDeletedKey, false).getOffset(), state.sessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
doFindDeletedEntriesSinceTest(startToken, Long.MAX_VALUE, state.deletedKeys, absoluteEndToken);
// b. Token still in journal
startToken = new StoreFindToken(state.index.journal.getFirstOffset(), state.sessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
expectedKeys.clear();
for (Map.Entry<Offset, Pair<MockId, CuratedLogIndexState.LogEntry>> entry : state.logOrder.tailMap(startToken.getOffset(), false).entrySet()) {
if (entry.getValue().getSecond().indexValue.isDelete() && state.getExpectedValue(entry.getValue().getFirst(), EnumSet.of(PersistentIndex.IndexEntryType.UNDELETE), null) == null) {
expectedKeys.add(entry.getValue().getFirst());
}
}
doFindDeletedEntriesSinceTest(startToken, Long.MAX_VALUE, expectedKeys, absoluteEndToken);
// ------------------
// 4. Journal no change
doFindDeletedEntriesSinceTest(absoluteEndToken, Long.MAX_VALUE, Collections.emptySet(), absoluteEndToken);
}
Aggregations