use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class ServerTestUtil method checkReplicaTokens.
/**
* Repeatedly check the replication token file until a certain offset value on all nodes on a certain
* partition is found. Fail if {@code numTries} is exceeded or a token offset larger than the target
* is found.
* @param clusterMap the cluster map that contains the data node to inspect
* @param dataNodeId the data node to inspect
* @param targetOffset the token offset to look for in the {@code targetPartition}
* @param targetPartition the name of the partition to look for the {@code targetOffset}
* @throws Exception
*/
private static void checkReplicaTokens(MockClusterMap clusterMap, DataNodeId dataNodeId, long targetOffset, String targetPartition) throws Exception {
List<String> mountPaths = ((MockDataNodeId) dataNodeId).getMountPaths();
// we should have an entry for each partition - remote replica pair
Set<String> completeSetToCheck = new HashSet<>();
List<ReplicaId> replicaIds = clusterMap.getReplicaIds(dataNodeId);
int numRemoteNodes = 0;
for (ReplicaId replicaId : replicaIds) {
List<? extends ReplicaId> peerReplicas = replicaId.getPeerReplicaIds();
if (replicaId.getPartitionId().isEqual(targetPartition)) {
numRemoteNodes = peerReplicas.size();
}
for (ReplicaId peerReplica : peerReplicas) {
completeSetToCheck.add(replicaId.getPartitionId().toString() + peerReplica.getDataNodeId().getHostname() + peerReplica.getDataNodeId().getPort());
}
}
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", clusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
int numTries = 4;
boolean foundTarget = false;
while (!foundTarget && numTries > 0) {
Thread.sleep(5000);
numTries--;
Set<String> setToCheck = new HashSet<String>(completeSetToCheck);
int numFound = 0;
for (String mountPath : mountPaths) {
File replicaTokenFile = new File(mountPath, "replicaTokens");
if (replicaTokenFile.exists()) {
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(replicaTokenFile));
DataInputStream dataInputStream = new DataInputStream(crcStream);
try {
short version = dataInputStream.readShort();
assertEquals(1, version);
while (dataInputStream.available() > 8) {
// read partition id
PartitionId partitionId = clusterMap.getPartitionIdFromStream(dataInputStream);
// read remote node host name
String hostname = Utils.readIntString(dataInputStream);
// read remote replica path
Utils.readIntString(dataInputStream);
// read remote port
int port = dataInputStream.readInt();
assertTrue(setToCheck.contains(partitionId.toString() + hostname + port));
setToCheck.remove(partitionId.toString() + hostname + port);
// read total bytes read from local store
dataInputStream.readLong();
// read replica type
ReplicaType replicaType = ReplicaType.values()[dataInputStream.readShort()];
// read replica token
StoreFindToken token = (StoreFindToken) factory.getFindToken(dataInputStream);
System.out.println("partitionId " + partitionId + " hostname " + hostname + " port " + port + " token " + token);
Offset endTokenOffset = token.getOffset();
long parsedToken = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
System.out.println("The parsed token is " + parsedToken);
if (partitionId.isEqual(targetPartition)) {
assertFalse("Parsed offset: " + parsedToken + " must not be larger than target value: " + targetOffset, parsedToken > targetOffset);
if (parsedToken == targetOffset) {
numFound++;
}
} else {
assertEquals("Tokens should remain at -1 offsets on unmodified partitions", -1, parsedToken);
}
}
long crc = crcStream.getValue();
assertEquals(crc, dataInputStream.readLong());
} catch (IOException e) {
fail();
} finally {
dataInputStream.close();
}
}
}
if (numFound == numRemoteNodes) {
foundTarget = true;
}
}
if (!foundTarget) {
fail("Could not find target token offset: " + targetOffset);
}
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class BlobStoreCompactor method isDeleteTombstoneRemovable.
/**
* Check if given delete tombstone is removable. There are two cases where delete tombstone can be safely removed:
* 1. delete record has finite expiration time and it has expired already;
* 2. all peer replica tokens have passed position of this delete (That is, they have replicated this delete already)
* @param deleteIndexEntry the {@link IndexEntry} associated with delete tombstone
* @param currentIndexSegment the {@link IndexSegment} delete tombstone comes from.
* @return {@code true} if this delete tombstone can be safely removed. {@code false} otherwise.
*/
private boolean isDeleteTombstoneRemovable(IndexEntry deleteIndexEntry, IndexSegment currentIndexSegment) throws StoreException {
if (srcIndex.isExpired(deleteIndexEntry.getValue())) {
return true;
}
if (remoteTokenTracker == null) {
return false;
}
for (Map.Entry<String, FindToken> entry : remoteTokenTracker.getPeerReplicaAndToken().entrySet()) {
FindToken token = srcIndex.resetTokenIfRequired((StoreFindToken) entry.getValue());
if (!token.equals(entry.getValue())) {
// incarnation id has changed or there is unclean shutdown
return false;
}
token = srcIndex.revalidateFindToken(entry.getValue());
if (!token.equals(entry.getValue())) {
// the log segment (token refers to) has been compacted already
return false;
}
switch(token.getType()) {
case Uninitialized:
return false;
case JournalBased:
// performs on segments out of journal, this journal-based token must be past the delete tombstone.
break;
case IndexBased:
// if code reaches here, the index-based token is valid (didn't get reset). We check two following rules:
// 1. token's index segment is behind delete tombstone's index segment
// 2. if they are in the same segment, compare the store key (sealed index segment is sorted based on key)
StoreFindToken indexBasedToken = (StoreFindToken) token;
if (indexBasedToken.getOffset().compareTo(currentIndexSegment.getStartOffset()) < 0) {
// index-based token is ahead of current index segment (hasn't reached this delete tombstone)
return false;
}
if (indexBasedToken.getOffset().compareTo(currentIndexSegment.getStartOffset()) == 0) {
// index-based token refers to current index segment, we need to compare the key
if (indexBasedToken.getStoreKey().compareTo(deleteIndexEntry.getKey()) <= 0) {
return false;
}
}
// if tokens start offset > current index segment start offset, then token is obviously past tombstone
break;
default:
throw new IllegalArgumentException("Unsupported token type in compaction: " + token.getType());
}
}
srcMetrics.permanentDeleteTombstonePurgeCount.inc();
return true;
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class ServerHardDeleteTest method ensureCleanupTokenCatchesUp.
/**
* Waits and ensures that the hard delete cleanup token catches up to the expected token value.
* @param path the path to the cleanup token.
* @param mockClusterMap the {@link MockClusterMap} being used for the cluster.
* @param expectedTokenValue the expected value that the cleanup token should contain. Until this value is reached,
* the method will keep reopening the file and read the value or until a predefined
* timeout is reached.
* @throws Exception if there were any I/O errors or the sleep gets interrupted.
*/
void ensureCleanupTokenCatchesUp(String path, MockClusterMap mockClusterMap, long expectedTokenValue) throws Exception {
final int TIMEOUT = 10000;
File cleanupTokenFile = new File(path, "cleanuptoken");
StoreFindToken endToken;
long parsedTokenValue = -1;
long endTime = SystemTime.getInstance().milliseconds() + TIMEOUT;
do {
if (cleanupTokenFile.exists()) {
/* The cleanup token format is as follows:
--
token_version
startTokenForRecovery
endTokenForRecovery
numBlobsInRange
pause flag
--
blob1_blobReadOptions {version, offset, sz, ttl, key}
blob2_blobReadOptions
....
blobN_blobReadOptions
--
length_of_blob1_messageStoreRecoveryInfo
blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion,
blobType, blobStreamSize}
length_of_blob2_messageStoreRecoveryInfo
blob2_messageStoreRecoveryInfo
....
length_of_blobN_messageStoreRecoveryInfo
blobN_messageStoreRecoveryInfo
crc
---
*/
CrcInputStream crcStream = new CrcInputStream(new FileInputStream(cleanupTokenFile));
DataInputStream stream = new DataInputStream(crcStream);
try {
short version = stream.readShort();
Assert.assertEquals(version, HardDeleter.Cleanup_Token_Version_V1);
StoreKeyFactory storeKeyFactory = Utils.getObj("com.github.ambry.commons.BlobIdFactory", mockClusterMap);
FindTokenFactory factory = Utils.getObj("com.github.ambry.store.StoreFindTokenFactory", storeKeyFactory);
factory.getFindToken(stream);
endToken = (StoreFindToken) factory.getFindToken(stream);
Offset endTokenOffset = endToken.getOffset();
parsedTokenValue = endTokenOffset == null ? -1 : endTokenOffset.getOffset();
boolean pauseFlag = stream.readByte() == (byte) 1;
int num = stream.readInt();
List<StoreKey> storeKeyList = new ArrayList<StoreKey>(num);
for (int i = 0; i < num; i++) {
// Read BlobReadOptions
short blobReadOptionsVersion = stream.readShort();
switch(blobReadOptionsVersion) {
case 1:
Offset.fromBytes(stream);
stream.readLong();
stream.readLong();
StoreKey key = storeKeyFactory.getStoreKey(stream);
storeKeyList.add(key);
break;
default:
Assert.assertFalse(true);
}
}
for (int i = 0; i < num; i++) {
int length = stream.readInt();
short headerVersion = stream.readShort();
short userMetadataVersion = stream.readShort();
int userMetadataSize = stream.readInt();
short blobRecordVersion = stream.readShort();
if (blobRecordVersion == MessageFormatRecord.Blob_Version_V2) {
short blobType = stream.readShort();
}
long blobStreamSize = stream.readLong();
StoreKey key = storeKeyFactory.getStoreKey(stream);
Assert.assertTrue(storeKeyList.get(i).equals(key));
}
long crc = crcStream.getValue();
Assert.assertEquals(crc, stream.readLong());
Thread.sleep(1000);
} finally {
stream.close();
}
}
} while (SystemTime.getInstance().milliseconds() < endTime && parsedTokenValue < expectedTokenValue);
Assert.assertEquals(expectedTokenValue, parsedTokenValue);
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method findEntriesSinceTtlUpdateCornerCaseTest.
/**
* Tests the case where the PUT and TTL update entries are in different index segments and journal has the TTL update
* entry (immaterial whether it has the PUT or not).
* @throws StoreException
*/
private void findEntriesSinceTtlUpdateCornerCaseTest() throws StoreException {
state.closeAndClearIndex();
state.properties.setProperty("store.index.max.number.of.inmem.elements", "1");
state.reloadIndex(false, false);
long expiresAtMs = state.time.milliseconds() + TimeUnit.HOURS.toMillis(1);
MockId id = (MockId) state.addPutEntries(1, PUT_RECORD_SIZE, expiresAtMs).get(0).getKey();
state.makePermanent(id, false);
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(state.logOrder.lastKey()).getValue();
StoreFindToken expectedEndToken = new StoreFindToken(state.logOrder.lastKey(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
expectedEndToken.setBytesRead(state.index.getLogUsedCapacity());
doFindEntriesSinceTest(new StoreFindToken(), Long.MAX_VALUE, Collections.singleton(id), expectedEndToken);
findEntriesSinceOneByOneTest();
}
use of com.github.ambry.store.StoreFindToken in project ambry by linkedin.
the class IndexTest method tokenRevalidationTest.
/**
* Tests the cases where tokens have to be revalidated on a call to
* {@link PersistentIndex#findEntriesSince(FindToken, long)}.
* @throws StoreException
*/
@Test
public void tokenRevalidationTest() throws StoreException {
// this test is valid only when the log has > 1 log segments i.e. when it can undergo compaction.
if (isLogSegmented) {
IndexSegment segmentOfToken = state.index.getIndexSegments().floorEntry(state.logOrder.lastKey()).getValue();
StoreFindToken absoluteEndToken = new StoreFindToken(state.logOrder.lastKey(), state.sessionId, state.incarnationId, false, segmentOfToken.getResetKey(), segmentOfToken.getResetKeyType(), segmentOfToken.getResetKeyLifeVersion());
absoluteEndToken.setBytesRead(state.index.getLogUsedCapacity());
Offset firstIndexSegmentStartOffset = state.referenceIndex.firstKey();
assertTrue("The first index segment must have an offset > 0", firstIndexSegmentStartOffset.getOffset() > 0);
// generate an offset that does not exist.
LogSegmentName newName = firstIndexSegmentStartOffset.getName().getNextGenerationName();
long newOffset = firstIndexSegmentStartOffset.getOffset() + 1;
// Generate an offset that is below the index start offset
long offsetBeforeStart = firstIndexSegmentStartOffset.getOffset() - 1;
Offset[] invalidOffsets = { new Offset(newName, newOffset), new Offset(firstIndexSegmentStartOffset.getName(), offsetBeforeStart) };
MockId firstIdInFirstIndexSegment = state.referenceIndex.firstEntry().getValue().firstKey();
for (Offset invalidOffset : invalidOffsets) {
// Generate an index based token from invalidOffset
StoreFindToken startToken = new StoreFindToken(firstIdInFirstIndexSegment, invalidOffset, state.sessionId, state.incarnationId, null, null, UNINITIALIZED_RESET_KEY_VERSION);
doFindEntriesSinceTest(startToken, Long.MAX_VALUE, state.allKeys.keySet(), absoluteEndToken);
// Generate a journal based token from invalidOffset (not in journal and is invalid)
startToken = new StoreFindToken(invalidOffset, state.sessionId, state.incarnationId, false, null, null, UNINITIALIZED_RESET_KEY_VERSION);
doFindEntriesSinceTest(startToken, Long.MAX_VALUE, state.allKeys.keySet(), absoluteEndToken);
}
}
}
Aggregations