use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method doComprehensiveTest.
/**
* Comprehensive tests for {@link IndexSegment}.
* 1. Creates a segment and checks the getters to make sure they return the right values
* 2. Adds some put entries with random sizes, checks getters again and exercises {@link IndexSegment#find(StoreKey)}
* and {@link IndexSegment#getEntriesSince(StoreKey, FindEntriesCondition, List, AtomicLong, boolean)}.
* 3. Adds some delete entries (deletes some existing put entries and creates deletes for puts not in this segment)
* and does the same checks as #2.
* 4. Writes index to a file and loads it sealed and not sealed and does all the checks in #2 once again along with
* checking that journal entries are populated correctly.
* @throws IOException
* @throws StoreException
*/
private void doComprehensiveTest(short version, boolean includeSmallKeys, boolean includeLargeKeys) throws IOException, StoreException {
LogSegmentName[] logSegmentNames = { LogSegmentName.generateFirstSegmentName(false), StoreTestUtils.getRandomLogSegmentName(null) };
int valueSize;
switch(version) {
case PersistentIndex.VERSION_0:
valueSize = IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V0;
break;
case PersistentIndex.VERSION_1:
case PersistentIndex.VERSION_2:
valueSize = IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V1_V2;
break;
case PersistentIndex.VERSION_3:
case PersistentIndex.VERSION_4:
valueSize = IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V3_V4;
break;
default:
fail("Unknown PersistentIndex formatVersion");
valueSize = -1;
}
for (LogSegmentName logSegmentName : logSegmentNames) {
long writeStartOffset = Utils.getRandomLong(TestUtils.RANDOM, 1000);
Offset startOffset = new Offset(logSegmentName, writeStartOffset);
NavigableMap<MockId, NavigableSet<IndexValue>> referenceIndex = new TreeMap<>();
// advance time so that last modified time for VERSION_1 has different last modified times for different index
// segments
time.sleep(10 * Time.MsPerSec);
PersistentIndex.cleanupIndexSegmentFilesForLogSegment(tempDir.getAbsolutePath(), logSegmentName);
IndexSegment indexSegment = generateIndexSegment(startOffset, STORE_KEY_FACTORY);
StoreKey resetKey = null;
PersistentIndex.IndexEntryType resetKeyType = null;
short resetKeyLifeVersion = StoreFindToken.UNINITIALIZED_RESET_KEY_VERSION;
verifyIndexSegmentDetails(indexSegment, startOffset, 0, 0, false, startOffset.getOffset(), time.milliseconds(), null, null, resetKeyLifeVersion);
int numItems = 10;
int numSmallKeys = 0;
int numLargeKeys = 0;
List<Long> offsets = new ArrayList<>();
offsets.add(writeStartOffset);
for (int i = 0; i < numItems - 1; i++) {
// size has to be > 0 (no record is 0 sized)
long size = Utils.getRandomLong(TestUtils.RANDOM, 1000) + 1;
offsets.add(writeStartOffset + size);
writeStartOffset += size;
if (i % 3 == 1 && includeSmallKeys) {
numSmallKeys++;
}
if (i % 3 == 2 && includeLargeKeys) {
numLargeKeys++;
}
}
long lastEntrySize = Utils.getRandomLong(TestUtils.RANDOM, 1000) + 1;
long endOffset = offsets.get(offsets.size() - 1) + lastEntrySize;
List<IndexEntry> newEntries = addPutEntries(offsets, lastEntrySize, indexSegment, referenceIndex, includeSmallKeys, includeLargeKeys);
if (version != PersistentIndex.VERSION_0) {
resetKey = newEntries.get(0).getKey();
resetKeyType = PersistentIndex.IndexEntryType.PUT;
resetKeyLifeVersion = newEntries.get(0).getValue().getLifeVersion();
}
int expectedSizeWritten = SMALLER_KEY_SIZE * numSmallKeys + LARGER_KEY_SIZE * numLargeKeys + KEY_SIZE * (numItems - numSmallKeys - numLargeKeys) + numItems * valueSize;
verifyAllForIndexSegmentFromFile(referenceIndex, indexSegment, startOffset, numItems, expectedSizeWritten, false, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
int extraIdsToTtlUpdate = 5;
Set<MockId> idsToTtlUpdate = getIdsToTtlUpdate(referenceIndex, extraIdsToTtlUpdate);
addTtlUpdateEntries(idsToTtlUpdate, indexSegment, referenceIndex);
endOffset += idsToTtlUpdate.size() * TTL_UPDATE_FILE_SPAN_SIZE;
numItems += idsToTtlUpdate.size();
for (MockId id : idsToTtlUpdate) {
expectedSizeWritten += valueSize + id.sizeInBytes();
}
verifyAllForIndexSegmentFromFile(referenceIndex, indexSegment, startOffset, numItems, expectedSizeWritten, false, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
indexSegment.writeIndexSegmentToFile(indexSegment.getEndOffset());
verifyReadFromFile(referenceIndex, indexSegment.getFile(), startOffset, numItems, expectedSizeWritten, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
int extraIdsToDelete = 5;
Set<MockId> idsToDelete = getIdsToDelete(referenceIndex, extraIdsToDelete);
addDeleteEntries(idsToDelete, indexSegment, referenceIndex);
endOffset += idsToDelete.size() * DELETE_FILE_SPAN_SIZE;
numItems += idsToDelete.size();
for (MockId id : idsToDelete) {
expectedSizeWritten += valueSize + id.sizeInBytes();
}
verifyAllForIndexSegmentFromFile(referenceIndex, indexSegment, startOffset, numItems, expectedSizeWritten, false, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
indexSegment.writeIndexSegmentToFile(indexSegment.getEndOffset());
verifyReadFromFile(referenceIndex, indexSegment.getFile(), startOffset, numItems, expectedSizeWritten, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
// all combinations
offsets = new ArrayList<>();
for (int i = 0; i < 3; i++) {
offsets.add(endOffset);
endOffset += Utils.getRandomLong(TestUtils.RANDOM, 1000) + 1;
numItems++;
expectedSizeWritten += valueSize + KEY_SIZE;
}
List<IndexEntry> puts = addPutEntries(offsets, endOffset - offsets.get(offsets.size() - 1), indexSegment, referenceIndex, false, false);
// the second entry has only a TTL update
addTtlUpdateEntries(Collections.singleton((MockId) puts.get(1).getKey()), indexSegment, referenceIndex);
// the third entry has a TTL update and a delete
addTtlUpdateEntries(Collections.singleton((MockId) puts.get(2).getKey()), indexSegment, referenceIndex);
addDeleteEntries(Collections.singleton((MockId) puts.get(2).getKey()), indexSegment, referenceIndex);
// an ID that is only TTL updated
addTtlUpdateEntries(Collections.singleton(generateIds(referenceIndex, 1).get(0)), indexSegment, referenceIndex);
// an ID that is TTL updated and deleted
MockId idToTtlUpdateAndDelete = generateIds(referenceIndex, 1).get(0);
addTtlUpdateEntries(Collections.singleton(idToTtlUpdateAndDelete), indexSegment, referenceIndex);
addDeleteEntries(Collections.singleton(idToTtlUpdateAndDelete), indexSegment, referenceIndex);
// an ID that is only deleted
addDeleteEntries(Collections.singleton(generateIds(referenceIndex, 1).get(0)), indexSegment, referenceIndex);
endOffset += 4 * TTL_UPDATE_FILE_SPAN_SIZE + 3 * DELETE_FILE_SPAN_SIZE;
numItems += 7;
expectedSizeWritten += 7 * (KEY_SIZE + valueSize);
verifyAllForIndexSegmentFromFile(referenceIndex, indexSegment, startOffset, numItems, expectedSizeWritten, false, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
indexSegment.writeIndexSegmentToFile(indexSegment.getEndOffset());
verifyReadFromFile(referenceIndex, indexSegment.getFile(), startOffset, numItems, expectedSizeWritten, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
// verify that flipping StoreConfig.storeIndexMemStateName does not break anything
IndexMemState saved = config.storeIndexMemState;
for (IndexMemState state : IndexMemState.values()) {
if (!state.equals(saved)) {
setIndexMemState(state);
verifyReadFromFile(referenceIndex, indexSegment.getFile(), startOffset, numItems, expectedSizeWritten, endOffset, time.milliseconds(), resetKey, resetKeyType, resetKeyLifeVersion);
}
}
setIndexMemState(saved);
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method readFromFileFailureTest.
/**
* Test cases where exceptions occurred while reading from file.
* @throws IOException
* @throws StoreException
*/
@Test
public void readFromFileFailureTest() throws StoreException, IOException {
LogSegmentName logSegmentName = LogSegmentName.fromPositionAndGeneration(0, 0);
StoreKeyFactory mockStoreKeyFactory = Mockito.spy(STORE_KEY_FACTORY);
Journal journal = new Journal(tempDir.getAbsolutePath(), 3, 3);
IndexSegment indexSegment = generateIndexSegment(new Offset(logSegmentName, 0), mockStoreKeyFactory);
MockId id1 = new MockId("0" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 0), Utils.Infinite_Time, time.milliseconds(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), (short) 0, formatVersion);
indexSegment.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName, 1000));
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 1000));
// test IOException is captured and StoreException.IOError is thrown
doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
try {
new IndexSegment(indexSegment.getFile(), false, mockStoreKeyFactory, config, metrics, journal, time);
fail("should fail");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.Index_Creation_Failure, e.getErrorCode());
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method iteratorTest.
/**
* Test Iterator and ListIterator of index segment.
* @throws Exception
*/
@Test
public void iteratorTest() throws Exception {
LogSegmentName logSegmentName = LogSegmentName.fromPositionAndGeneration(0, 0);
MockId id1 = new MockId("0" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id2 = new MockId("1" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id3 = new MockId("2" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id4 = new MockId("3" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
short lifeVersion = (short) 0;
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 0), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value2 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 1000), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value3 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 2000), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a TTL Update
IndexValue ttlUpValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), Utils.Infinite_Time, time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
ttlUpValue2.setNewOffset(new Offset(logSegmentName, 3000));
ttlUpValue2.setNewSize(50);
ttlUpValue2.setFlag(IndexValue.Flags.Ttl_Update_Index);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a DELETE
IndexValue delValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), ttlUpValue2.getFlags(), value2.getExpiresAtMs(), value2.getOffset().getOffset(), time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
delValue2.setNewOffset(new Offset(logSegmentName, 3050));
delValue2.setNewSize(100);
delValue2.setFlag(IndexValue.Flags.Delete_Index);
// generate an UNDELETE
IndexValue undeleteValue1 = IndexValueTest.getIndexValue(value1.getSize(), value1.getOffset(), value1.getFlags(), value1.getExpiresAtMs(), value1.getOffset().getOffset(), time.milliseconds(), value1.getAccountId(), value1.getContainerId(), (short) (lifeVersion + 1), formatVersion);
undeleteValue1.setNewOffset(new Offset(logSegmentName, 3150));
undeleteValue1.setNewSize(100);
undeleteValue1.setFlag(IndexValue.Flags.Undelete_Index);
// generate a delete
IndexValue delValue3 = IndexValueTest.getIndexValue(value3.getSize(), value3.getOffset(), value3.getFlags(), value3.getExpiresAtMs(), value3.getOffset().getOffset(), time.milliseconds(), value3.getAccountId(), value3.getContainerId(), lifeVersion, formatVersion);
delValue3.setNewOffset(new Offset(logSegmentName, 3250));
delValue3.setNewSize(100);
delValue3.setFlag(IndexValue.Flags.Delete_Index);
// generate a undelete for delValue3, this will be added to index segment while iterating through the index entries.
IndexValue undeleteValue3 = IndexValueTest.getIndexValue(value3.getSize(), value3.getOffset(), value3.getFlags(), value3.getExpiresAtMs(), value1.getOffset().getOffset(), time.milliseconds(), value3.getAccountId(), value3.getContainerId(), (short) (lifeVersion + 1), formatVersion);
undeleteValue3.setNewOffset(new Offset(logSegmentName, 3350));
undeleteValue3.setNewSize(100);
undeleteValue3.setFlag(IndexValue.Flags.Undelete_Index);
IndexSegment indexSegment = generateIndexSegment(new Offset(logSegmentName, 0), STORE_KEY_FACTORY);
// inserting in the opposite order by design to ensure that writes are based on offset ordering and not key ordering
indexSegment.addEntry(new IndexEntry(id3, value1), new Offset(logSegmentName, 1000));
indexSegment.addEntry(new IndexEntry(id2, value2), new Offset(logSegmentName, 2000));
indexSegment.addEntry(new IndexEntry(id1, value3), new Offset(logSegmentName, 3000));
indexSegment.addEntry(new IndexEntry(id2, ttlUpValue2), new Offset(logSegmentName, 3050));
indexSegment.addEntry(new IndexEntry(id2, delValue2), new Offset(logSegmentName, 3150));
indexSegment.addEntry(new IndexEntry(id3, undeleteValue1), new Offset(logSegmentName, 3250));
indexSegment.addEntry(new IndexEntry(id1, delValue3), new Offset(logSegmentName, 3350));
// This is a unseal index segment.
List<IndexEntry> expectedEntries = new ArrayList<>();
expectedEntries.add(new IndexEntry(id1, value3));
expectedEntries.add(new IndexEntry(id1, delValue3));
expectedEntries.add(new IndexEntry(id2, value2));
expectedEntries.add(new IndexEntry(id2, ttlUpValue2));
expectedEntries.add(new IndexEntry(id2, delValue2));
expectedEntries.add(new IndexEntry(id3, value1));
expectedEntries.add(new IndexEntry(id3, undeleteValue1));
// Conduct two rounds of tests, first round will change the obtained index entry and add a new entry.
for (int i = 0; i < 2; i++) {
if (i == 1) {
expectedEntries.add(2, new IndexEntry(id1, undeleteValue3));
}
Iterator<IndexEntry> segmentIter = indexSegment.iterator();
Iterator<IndexEntry> listIter = expectedEntries.iterator();
boolean newEntryAdded = false;
while (listIter.hasNext()) {
assertTrue(segmentIter.hasNext());
IndexEntry expected = listIter.next();
IndexEntry obtained = segmentIter.next();
assertIndexEntryEquals(expected, obtained);
// First round, change the obtained index entries and add a new entry.
if (i == 0) {
// Modify the obtained index entry, make sure it doesn't affect index segment.
obtained.getValue().setNewSize(0);
if (!newEntryAdded) {
// Adding new entries to index segment since this is not sealed. make sure it doesn't affect the iterator.
newEntryAdded = true;
indexSegment.addEntry(new IndexEntry(id1, undeleteValue3), new Offset(logSegmentName, 3450));
}
}
}
assertFalse(segmentIter.hasNext());
}
IndexValue value4 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 3450), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
// Now test list iterator
for (int i = 0; i < 2; i++) {
if (i == 1) {
expectedEntries.add(new IndexEntry(id4, value4));
}
assertEquals(expectedEntries.size(), indexSegment.size());
ListIterator<IndexEntry> segmentListIter = indexSegment.listIterator(indexSegment.size());
ListIterator<IndexEntry> listListIter = expectedEntries.listIterator(expectedEntries.size());
boolean newEntryAdded = false;
while (listListIter.hasPrevious()) {
assertTrue(segmentListIter.hasPrevious());
IndexEntry expected = listListIter.previous();
IndexEntry obtained = segmentListIter.previous();
assertIndexEntryEquals(expected, obtained);
if (i == 0 && !newEntryAdded) {
newEntryAdded = true;
indexSegment.addEntry(new IndexEntry(id4, value4), new Offset(logSegmentName, 4450));
}
}
assertFalse(segmentListIter.hasPrevious());
}
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 4450));
indexSegment.seal();
// Now it's sealed, no entries will be added.
assertEquals(expectedEntries.size(), indexSegment.size());
Iterator<IndexEntry> segmentIter = indexSegment.iterator();
Iterator<IndexEntry> listIter = expectedEntries.iterator();
while (listIter.hasNext()) {
assertTrue(segmentIter.hasNext());
IndexEntry expected = listIter.next();
IndexEntry obtained = segmentIter.next();
assertIndexEntryEquals(expected, obtained);
}
ListIterator<IndexEntry> segmentListIter = indexSegment.listIterator(indexSegment.size());
ListIterator<IndexEntry> listListIter = expectedEntries.listIterator(expectedEntries.size());
while (listListIter.hasPrevious()) {
assertTrue(segmentListIter.hasPrevious());
IndexEntry expected = listListIter.previous();
IndexEntry obtained = segmentListIter.previous();
assertIndexEntryEquals(expected, obtained);
}
if (formatVersion >= PersistentIndex.VERSION_1) {
verifyResetKeyInfo(id3, value1, indexSegment.getResetKey(), indexSegment.getResetKeyType(), indexSegment.getResetKeyLifeVersion());
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method populateBloomFilterWithUuidTest.
/**
* Test populating bloom filter with whole blob id and with UUID respectively.
* @throws Exception
*/
@Test
public void populateBloomFilterWithUuidTest() throws Exception {
assumeTrue(formatVersion > PersistentIndex.VERSION_1);
// with default config, bloom filter will be populated by whole blob id bytes array
LogSegmentName logSegmentName1 = LogSegmentName.fromPositionAndGeneration(0, 0);
int indexValueSize = PersistentIndex.CURRENT_VERSION >= PersistentIndex.VERSION_3 ? IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V3_V4 : IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V1_V2;
IndexSegment indexSegment1 = new IndexSegment(tempDir.getAbsolutePath(), new Offset(logSegmentName1, 0), STORE_KEY_FACTORY, KEY_SIZE + indexValueSize, indexValueSize, config, metrics, time);
Random random = new Random();
short accountId1 = getRandomShort(random);
short containerId1 = getRandomShort(random);
short accountId2, containerId2;
do {
accountId2 = getRandomShort(random);
} while (accountId2 == accountId1);
do {
containerId2 = getRandomShort(random);
} while (containerId2 == containerId1);
String idStr = TestUtils.getRandomString(CUSTOM_ID_SIZE);
// generate two ids with same id string but different account/container.
MockId id1 = new MockId(idStr, accountId1, containerId1);
MockId id2 = new MockId(idStr, accountId2, containerId2);
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName1, 0), Utils.Infinite_Time, time.milliseconds(), accountId1, containerId1, (short) 0, formatVersion);
indexSegment1.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName1, 1000));
// deliberately add one more entry for later bloom rebuilding test
indexSegment1.addEntry(new IndexEntry(id2, value1), new Offset(logSegmentName1, 2000));
indexSegment1.writeIndexSegmentToFile(new Offset(logSegmentName1, 2000));
indexSegment1.seal();
// test that id1 can be found in index segment but id2 should be non-existent because bloom filter considers it not present
Set<IndexValue> findResult = indexSegment1.find(id1);
assertNotNull("Should have found the added key", findResult);
assertEquals(accountId1, findResult.iterator().next().getAccountId());
assertNull("Should have failed to find non existent key", indexSegment1.find(id2));
// create second index segment whose bloom filter is populated with UUID only.
// We add id1 into indexSegment2 and attempt to get id2 which should succeed.
Properties properties = new Properties();
properties.setProperty("store.uuid.based.bloom.filter.enabled", Boolean.toString(true));
StoreConfig storeConfig = new StoreConfig(new VerifiableProperties(properties));
LogSegmentName logSegmentName2 = LogSegmentName.fromPositionAndGeneration(1, 0);
IndexSegment indexSegment2 = new IndexSegment(tempDir.getAbsolutePath(), new Offset(logSegmentName2, 0), STORE_KEY_FACTORY, KEY_SIZE + indexValueSize, indexValueSize, storeConfig, metrics, time);
indexSegment2.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName2, 1000));
indexSegment2.writeIndexSegmentToFile(new Offset(logSegmentName2, 1000));
indexSegment2.seal();
findResult = indexSegment2.find(id1);
assertNotNull("Should have found the id1", findResult);
// test that we are able to get id2 from indexSegment2
findResult = indexSegment2.find(id2);
assertNotNull("Should have found the id2", findResult);
// verify that the found entry actually has account/container associated with id1
IndexValue indexValue = findResult.iterator().next();
assertTrue("Account or container is not expected", indexValue.getAccountId() == accountId1 && indexValue.getContainerId() == containerId1);
File bloomFile1 = new File(tempDir, generateIndexSegmentFilenamePrefix(new Offset(logSegmentName1, 0)) + BLOOM_FILE_NAME_SUFFIX);
assertTrue("The bloom file should exist", bloomFile1.exists());
long lengthBeforeRebuild = bloomFile1.length();
// rebuild bloom filter in indexSegment1
properties.setProperty("store.index.rebuild.bloom.filter.enabled", Boolean.toString(true));
storeConfig = new StoreConfig(new VerifiableProperties(properties));
IndexSegment indexSegmentFromFile = new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
// test that id2 is found in reconstructed index segment (with bloom filter rebuilt based on UUID only)
findResult = indexSegmentFromFile.find(id2);
assertNotNull("Should have found the id2", findResult);
indexValue = findResult.iterator().next();
assertTrue("Account or container is not expected", indexValue.getAccountId() == accountId1 && indexValue.getContainerId() == containerId1);
// verify that bloom file length didn't change
assertEquals("Bloom file length has changed", lengthBeforeRebuild, bloomFile1.length());
// now, change value of storeIndexMaxNumberOfInmemElements to 1
properties.setProperty("store.index.max.number.of.inmem.elements", Integer.toString(1));
storeConfig = new StoreConfig(new VerifiableProperties(properties));
// rebuild bloomFile1 again and file size should be changed
new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
assertTrue("Bloom file length has changed", lengthBeforeRebuild > bloomFile1.length());
// additional test: exception occurs when deleting previous bloom file
assertTrue("Could not make unwritable", tempDir.setWritable(false));
try {
new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, storeConfig, metrics, null, time);
fail("Deletion on unwritable file should fail");
} catch (StoreException e) {
assertEquals("Error code is not expected.", StoreErrorCodes.Index_Creation_Failure, e.getErrorCode());
} finally {
assertTrue("Could not make writable", tempDir.setWritable(true));
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method dataIntegrityCheckTest.
/**
* Tests data corruption case when loading index file to memory.
* @throws Exception
*/
@Test
public void dataIntegrityCheckTest() throws Exception {
long writeStartOffset = 0L;
LogSegmentName logSegmentName = StoreTestUtils.getRandomLogSegmentName(null);
Offset startOffset = new Offset(logSegmentName, writeStartOffset);
IndexSegment indexSegment = generateIndexSegment(startOffset, STORE_KEY_FACTORY);
List<Long> offsets = new ArrayList<>();
offsets.add(writeStartOffset);
for (int i = 0; i < 4; i++) {
// size has to be > 0 (no record is 0 sized)
long size = Utils.getRandomLong(TestUtils.RANDOM, 1000) + 1;
offsets.add(writeStartOffset + size);
writeStartOffset += size;
}
long lastEntrySize = Utils.getRandomLong(TestUtils.RANDOM, 1000) + 1;
long endOffset = offsets.get(offsets.size() - 1) + lastEntrySize;
NavigableMap<MockId, NavigableSet<IndexValue>> referenceIndex = new TreeMap<>();
addPutEntries(offsets, lastEntrySize, indexSegment, referenceIndex, false, false);
// write to file
indexSegment.writeIndexSegmentToFile(indexSegment.getEndOffset());
// verify read from file
Journal journal = new Journal(tempDir.getAbsolutePath(), Integer.MAX_VALUE, Integer.MAX_VALUE);
IndexSegment fromDisk = createIndexSegmentFromFile(indexSegment.getFile(), true, journal);
assertEquals("End offset doesn't match", endOffset, fromDisk.getEndOffset().getOffset());
// read index file to byte buffer and manipulate one of the bytes
File indexFile = indexSegment.getFile();
ByteBuffer buf = ByteBuffer.allocate((int) indexFile.length());
try (RandomAccessFile raf = new RandomAccessFile(indexFile, "r")) {
raf.getChannel().read(buf);
}
// we use heap buffer here, so buf.array() should not be null
byte[] bytes = buf.array();
// make one of the bytes corrupted (avoid last 8 bytes as they are crc value)
int corruptIndex = new Random().nextInt(bytes.length - 8);
bytes[corruptIndex] ^= 0xff;
// write corrupted bytes to file
File temp = new File(indexFile.getAbsolutePath() + ".tmp");
try (FileOutputStream stream = new FileOutputStream(temp)) {
stream.write(bytes);
stream.getChannel().force(true);
temp.renameTo(indexFile);
}
try {
createIndexSegmentFromFile(indexFile, true, journal);
fail("Should fail as index file is corrupted");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.Index_Creation_Failure, e.getErrorCode());
}
}
Aggregations