use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class BlobStoreCompactorTest method compactDeleteTombstoneTwiceTest.
/**
* Tests compacting delete tombstone with both invalid and journal based tokens.
* @throws Exception
*/
@Test
public void compactDeleteTombstoneTwiceTest() throws Exception {
assumeTrue(purgeDeleteTombstone);
refreshState(false, true, false);
List<LogSegmentName> segmentsUnderCompaction = getLogSegments(0, 2);
CompactionDetails details = new CompactionDetails(state.time.milliseconds(), segmentsUnderCompaction, null);
List<MockReplicaId> localAndPeerReplicas = generateLocalAndPeerReplicas();
RemoteTokenTracker tokenTracker = new RemoteTokenTracker(localAndPeerReplicas.get(0));
MockId tombstone1 = state.permanentDeleteTombstones.get(0);
MockId tombstone2 = state.permanentDeleteTombstones.get(1);
IndexValue deleteIndexValue2 = state.index.findKey(tombstone2);
IndexSegment indexSegment2 = state.index.getIndexSegments().floorEntry(deleteIndexValue2.getOffset()).getValue();
MockId keyInToken = (MockId) indexSegment2.iterator().next().getKey();
UUID invalidIncarnationId;
do {
invalidIncarnationId = UUID.randomUUID();
} while (invalidIncarnationId.equals(state.incarnationId));
// invalid token with other incarnation id
StoreFindToken invalidToken = new StoreFindToken(keyInToken, indexSegment2.getStartOffset(), state.sessionId, invalidIncarnationId, indexSegment2.getResetKey(), indexSegment2.getResetKeyType(), indexSegment2.getResetKeyLifeVersion());
StoreFindToken peerToken2 = new StoreFindToken(keyInToken, indexSegment2.getStartOffset(), state.sessionId, state.incarnationId, indexSegment2.getResetKey(), indexSegment2.getResetKeyType(), indexSegment2.getResetKeyLifeVersion());
MockReplicaId peerReplica1 = localAndPeerReplicas.get(1);
MockReplicaId peerReplica2 = localAndPeerReplicas.get(2);
tokenTracker.updateTokenFromPeerReplica(invalidToken, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath());
tokenTracker.updateTokenFromPeerReplica(peerToken2, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath());
// initiate compaction
compactor = getCompactor(state.log, DISK_IO_SCHEDULER, tokenTracker, false);
compactor.initialize(state.index);
try {
compactor.compact(details, bundleReadBuffer);
} finally {
compactor.close(0);
}
// both tombstones should exist
assertNotNull("Delete tombstone should be present", state.index.findKey(tombstone1));
assertNotNull("Delete tombstone should be present", state.index.findKey(tombstone2));
// update remote token tracker with journal based tokens and compact again.
IndexSegment lastIndexSegment = state.index.getIndexSegments().lastEntry().getValue();
peerToken2 = new StoreFindToken(lastIndexSegment.getStartOffset(), state.sessionId, state.incarnationId, true, lastIndexSegment.getResetKey(), lastIndexSegment.getResetKeyType(), lastIndexSegment.getResetKeyLifeVersion());
tokenTracker.updateTokenFromPeerReplica(peerToken2, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath());
tokenTracker.updateTokenFromPeerReplica(peerToken2, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath());
// initiate compaction
segmentsUnderCompaction = getLogSegments(0, 2);
details = new CompactionDetails(state.time.milliseconds(), segmentsUnderCompaction, null);
compactor = getCompactor(state.log, DISK_IO_SCHEDULER, tokenTracker, false);
compactor.initialize(state.index);
try {
compactor.compact(details, bundleReadBuffer);
} finally {
compactor.close(0);
}
// both tombstones should be compacted
assertNull("Delete tombstone should be compacted", state.index.findKey(tombstone1));
assertNull("Delete tombstone should be compacted", state.index.findKey(tombstone2));
}
use of com.github.ambry.clustermap.MockReplicaId in project ambry by linkedin.
the class BlobStoreCompactorTest method deleteTombstoneCleanupTest.
/**
* Tests that permanent delete tombstone can be compacted once all peer tokens are past its position.
* @throws Exception
*/
@Test
public void deleteTombstoneCleanupTest() throws Exception {
assumeTrue(purgeDeleteTombstone);
refreshState(false, true, false);
List<LogSegmentName> segmentsUnderCompaction = getLogSegments(0, 2);
CompactionDetails details = new CompactionDetails(state.time.milliseconds(), segmentsUnderCompaction, null);
List<MockReplicaId> localAndPeerReplicas = generateLocalAndPeerReplicas();
RemoteTokenTracker tokenTracker = new RemoteTokenTracker(localAndPeerReplicas.get(0));
// Generate tokens for peer replicas and make sure they are both past position of 1st delete tombstone and haven't
// reached position of 2nd tombstone.
MockId tombstone1 = state.permanentDeleteTombstones.get(0);
MockId tombstone2 = state.permanentDeleteTombstones.get(1);
IndexValue deleteIndexValue1 = state.index.findKey(tombstone1);
IndexValue deleteIndexValue2 = state.index.findKey(tombstone2);
IndexSegment indexSegment1 = state.index.getIndexSegments().floorEntry(deleteIndexValue1.getOffset()).getValue();
IndexSegment indexSegment2 = state.index.getIndexSegments().floorEntry(deleteIndexValue2.getOffset()).getValue();
// find a key that is behind tombstone1 and the other key that falls between tombstone1 and tombstone2
IndexSegment segmentBehindSegment1 = state.index.getIndexSegments().higherEntry(indexSegment1.getStartOffset()).getValue();
MockId keyInToken1 = (MockId) segmentBehindSegment1.iterator().next().getKey();
MockId keyInToken2 = (MockId) indexSegment2.iterator().next().getKey();
StoreFindToken peerToken1 = new StoreFindToken(keyInToken1, segmentBehindSegment1.getStartOffset(), state.sessionId, state.incarnationId, segmentBehindSegment1.getResetKey(), segmentBehindSegment1.getResetKeyType(), segmentBehindSegment1.getResetKeyLifeVersion());
StoreFindToken peerToken2 = new StoreFindToken(keyInToken2, indexSegment2.getStartOffset(), state.sessionId, state.incarnationId, indexSegment2.getResetKey(), indexSegment2.getResetKeyType(), indexSegment2.getResetKeyLifeVersion());
// update token associated with peer replica
MockReplicaId peerReplica1 = localAndPeerReplicas.get(1);
MockReplicaId peerReplica2 = localAndPeerReplicas.get(2);
tokenTracker.updateTokenFromPeerReplica(peerToken1, peerReplica1.getDataNodeId().getHostname(), peerReplica1.getReplicaPath());
tokenTracker.updateTokenFromPeerReplica(peerToken2, peerReplica2.getDataNodeId().getHostname(), peerReplica2.getReplicaPath());
// initiate compaction
compactor = getCompactor(state.log, DISK_IO_SCHEDULER, tokenTracker, false);
compactor.initialize(state.index);
try {
compactor.compact(details, bundleReadBuffer);
} finally {
compactor.close(0);
}
// the first delete tombstone should be compacted
assertNull("Delete tombstone should be compacted", state.index.findKey(tombstone1));
// the second delete tombstone should exist
assertNotNull("Delete tombstone should be present as at least one token hasn't reached its position", state.index.findKey(tombstone2));
}
Aggregations