use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method getIndexEntriesCornerCasesTest.
/**
* Tests some corner cases with
* {@link IndexSegment#getIndexEntriesSince(StoreKey, FindEntriesCondition, List, AtomicLong, boolean, boolean)}
* - tests that all values of a key are returned even if the find entries condition max size expires when the first
* value is loaded
* @throws StoreException
*/
@Test
public void getIndexEntriesCornerCasesTest() throws IOException, StoreException {
LogSegmentName logSegmentName = LogSegmentName.fromPositionAndGeneration(0, 0);
MockId id1 = new MockId("0" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id2 = new MockId("1" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id3 = new MockId("2" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
short lifeVersion = (short) 0;
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 0), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value2 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 1000), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value3 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 2000), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a TTL Update
IndexValue ttlUpValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), Utils.Infinite_Time, time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
ttlUpValue2.setNewOffset(new Offset(logSegmentName, 3000));
ttlUpValue2.setNewSize(50);
ttlUpValue2.setFlag(IndexValue.Flags.Ttl_Update_Index);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a DELETE
IndexValue delValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), ttlUpValue2.getFlags(), value2.getExpiresAtMs(), value2.getOffset().getOffset(), time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
delValue2.setNewOffset(new Offset(logSegmentName, 3050));
delValue2.setNewSize(100);
delValue2.setFlag(IndexValue.Flags.Delete_Index);
IndexSegment indexSegment = generateIndexSegment(new Offset(logSegmentName, 0), STORE_KEY_FACTORY);
// inserting in the opposite order by design to ensure that writes are based on offset ordering and not key ordering
indexSegment.addEntry(new IndexEntry(id3, value1), new Offset(logSegmentName, 1000));
indexSegment.addEntry(new IndexEntry(id2, value2), new Offset(logSegmentName, 2000));
indexSegment.addEntry(new IndexEntry(id1, value3), new Offset(logSegmentName, 3000));
indexSegment.addEntry(new IndexEntry(id2, ttlUpValue2), new Offset(logSegmentName, 3050));
indexSegment.addEntry(new IndexEntry(id2, delValue2), new Offset(logSegmentName, 3150));
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 3150));
indexSegment.seal();
List<IndexEntry> entries = new ArrayList<>();
for (boolean sealed : new boolean[] { false, true }) {
Journal journal = new Journal(tempDir.getAbsolutePath(), 3, 3);
IndexSegment fromDisk = new IndexSegment(indexSegment.getFile(), sealed, STORE_KEY_FACTORY, config, metrics, journal, time);
// getIndexEntriesSince with maxSize = 0 should not return anything
FindEntriesCondition condition = new FindEntriesCondition(0);
assertFalse("getIndexEntriesSince() should not return anything", fromDisk.getIndexEntriesSince(null, condition, entries, new AtomicLong(0), false, false));
assertEquals("There should be no entries returned", 0, entries.size());
// getIndexEntriesSince with maxSize <= 1000 should return only the first key (id1)
condition = new FindEntriesCondition(1000);
assertTrue("getIndexEntriesSince() should return one entry", fromDisk.getIndexEntriesSince(null, condition, entries, new AtomicLong(0), false, false));
assertEquals("There should be one entry returned", 1, entries.size());
assertEquals("Key in entry is incorrect", id1, entries.get(0).getKey());
assertEquals("Value in entry is incorrect", value3.getBytes(), entries.get(0).getValue().getBytes());
entries.clear();
// getIndexEntriesSince with maxSize > 1000 and <= 2150 should return four entries
for (int maxSize : new int[] { 1001, 2050, 2150 }) {
condition = new FindEntriesCondition(maxSize);
assertTrue("getIndexEntriesSince() should return entries", fromDisk.getIndexEntriesSince(null, condition, entries, new AtomicLong(0), false, false));
assertEquals("There should be four entries returned", 4, entries.size());
assertEquals("Key in entry is incorrect", id1, entries.get(0).getKey());
assertEquals("Value in entry is incorrect", value3.getBytes(), entries.get(0).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(1).getKey());
assertEquals("Value in entry is incorrect", value2.getBytes(), entries.get(1).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(2).getKey());
assertEquals("Value in entry is incorrect", ttlUpValue2.getBytes(), entries.get(2).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(3).getKey());
assertEquals("Value in entry is incorrect", delValue2.getBytes(), entries.get(3).getValue().getBytes());
entries.clear();
}
// getIndexEntriesSince with maxSize > 2150 should return five entries
condition = new FindEntriesCondition(2151);
assertTrue("getIndexEntriesSince() should return entries", fromDisk.getIndexEntriesSince(null, condition, entries, new AtomicLong(0), false, false));
assertEquals("There should be five entries returned", 5, entries.size());
assertEquals("Key in entry is incorrect", id1, entries.get(0).getKey());
assertEquals("Value in entry is incorrect", value3.getBytes(), entries.get(0).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(1).getKey());
assertEquals("Value in entry is incorrect", value2.getBytes(), entries.get(1).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(2).getKey());
assertEquals("Value in entry is incorrect", ttlUpValue2.getBytes(), entries.get(2).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(3).getKey());
assertEquals("Value in entry is incorrect", delValue2.getBytes(), entries.get(3).getValue().getBytes());
assertEquals("Key in entry is incorrect", id3, entries.get(4).getKey());
assertEquals("Value in entry is incorrect", value1.getBytes(), entries.get(4).getValue().getBytes());
entries.clear();
// getIndexEntriesSince with maxSize > 0 and <= 1150 starting from id2 should return two entries
for (int maxSize : new int[] { 1, 1050, 1150 }) {
condition = new FindEntriesCondition(maxSize);
assertTrue("getIndexEntriesSince() should return entries", fromDisk.getIndexEntriesSince(id1, condition, entries, new AtomicLong(0), false, false));
assertEquals("There should be three entries returned", 3, entries.size());
assertEquals("Key in entry is incorrect", id2, entries.get(0).getKey());
assertEquals("Value in entry is incorrect", value2.getBytes(), entries.get(0).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(1).getKey());
assertEquals("Value in entry is incorrect", ttlUpValue2.getBytes(), entries.get(1).getValue().getBytes());
assertEquals("Key in entry is incorrect", id2, entries.get(2).getKey());
assertEquals("Value in entry is incorrect", delValue2.getBytes(), entries.get(2).getValue().getBytes());
entries.clear();
}
if (formatVersion >= PersistentIndex.VERSION_1) {
verifyResetKeyInfo(id3, value1, fromDisk.getResetKey(), fromDisk.getResetKeyType(), fromDisk.getResetKeyLifeVersion());
}
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method partialWriteTest.
/**
* Tests the case when {@link IndexSegment#writeIndexSegmentToFile(Offset)} is provided with different offsets <=
* {@link IndexSegment#getEndOffset()} and makes sure that only the relevant parts of the segment are written to disk.
* @throws IOException
* @throws StoreException
*/
@Test
public void partialWriteTest() throws IOException, StoreException {
LogSegmentName prevLogSegmentName = LogSegmentName.fromPositionAndGeneration(0, 0);
LogSegmentName logSegmentName = prevLogSegmentName.getNextPositionName();
Offset startOffset = new Offset(logSegmentName, 0);
MockId id1 = new MockId("0" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id2 = new MockId("1" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
MockId id3 = new MockId("2" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
short lifeVersion = 0;
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 0), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value2 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 1000), time.milliseconds() + 1, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
IndexValue value3 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 2000), Utils.Infinite_Time, time.milliseconds(), accountId, containerId, lifeVersion, formatVersion);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a TTL Update
IndexValue ttlUpValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), Utils.Infinite_Time, time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
ttlUpValue2.setNewOffset(new Offset(logSegmentName, 3000));
ttlUpValue2.setNewSize(50);
ttlUpValue2.setFlag(IndexValue.Flags.Ttl_Update_Index);
time.sleep(TimeUnit.SECONDS.toMillis(1));
// generate a DELETE
IndexValue delValue2 = IndexValueTest.getIndexValue(value2.getSize(), value2.getOffset(), ttlUpValue2.getFlags(), value2.getExpiresAtMs(), value2.getOffset().getOffset(), time.milliseconds(), value2.getAccountId(), value2.getContainerId(), lifeVersion, formatVersion);
delValue2.setNewOffset(new Offset(logSegmentName, 3050));
delValue2.setNewSize(100);
delValue2.setFlag(IndexValue.Flags.Delete_Index);
IndexSegment indexSegment = generateIndexSegment(startOffset, STORE_KEY_FACTORY);
// inserting in the opposite order by design to ensure that writes are based on offset ordering and not key ordering
indexSegment.addEntry(new IndexEntry(id3, value1), new Offset(logSegmentName, 1000));
indexSegment.addEntry(new IndexEntry(id2, value2), new Offset(logSegmentName, 2000));
indexSegment.addEntry(new IndexEntry(id1, value3), new Offset(logSegmentName, 3000));
indexSegment.addEntry(new IndexEntry(id2, ttlUpValue2), new Offset(logSegmentName, 3050));
indexSegment.addEntry(new IndexEntry(id2, delValue2), new Offset(logSegmentName, 3150));
// provide end offsets such that nothing is written
checkNonCreationOfIndexSegmentFile(indexSegment, new Offset(prevLogSegmentName, 0));
checkNonCreationOfIndexSegmentFile(indexSegment, new Offset(prevLogSegmentName, indexSegment.getStartOffset().getOffset()));
checkNonCreationOfIndexSegmentFile(indexSegment, new Offset(logSegmentName, 0));
List<MockId> shouldBeFound = new ArrayList<>();
List<MockId> shouldNotBeFound = new ArrayList<>(Arrays.asList(id3, id2, id1));
for (int safeEndPoint = 1000; safeEndPoint <= 3000; safeEndPoint += 1000) {
shouldBeFound.add(shouldNotBeFound.remove(0));
// repeat twice
for (int i = 0; i < 2; i++) {
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, safeEndPoint));
Journal journal = new Journal(tempDir.getAbsolutePath(), 3, 3);
IndexSegment fromDisk = new IndexSegment(indexSegment.getFile(), false, STORE_KEY_FACTORY, config, metrics, journal, time);
assertEquals("End offset not as expected", new Offset(logSegmentName, safeEndPoint), fromDisk.getEndOffset());
assertEquals("Number of items incorrect", shouldBeFound.size(), fromDisk.getNumberOfItems());
for (MockId id : shouldBeFound) {
verifyValues(fromDisk, id, 1, EnumSet.noneOf(IndexValue.Flags.class));
}
for (MockId id : shouldNotBeFound) {
assertNull("Values for key should not have been found", fromDisk.find(id));
}
}
}
// now persist the ttl update only
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 3050));
Journal journal = new Journal(tempDir.getAbsolutePath(), 3, 3);
IndexSegment fromDisk = new IndexSegment(indexSegment.getFile(), false, STORE_KEY_FACTORY, config, metrics, journal, time);
assertEquals("Number of items incorrect", 4, fromDisk.getNumberOfItems());
for (MockId id : new MockId[] { id1, id2, id3 }) {
int valueCount = id.equals(id2) ? 2 : 1;
EnumSet<IndexValue.Flags> flags = id.equals(id2) ? EnumSet.of(IndexValue.Flags.Ttl_Update_Index) : EnumSet.noneOf(IndexValue.Flags.class);
verifyValues(fromDisk, id, valueCount, flags);
}
// now persist the delete too
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 3150));
journal = new Journal(tempDir.getAbsolutePath(), 3, 3);
fromDisk = new IndexSegment(indexSegment.getFile(), false, STORE_KEY_FACTORY, config, metrics, journal, time);
assertEquals("Number of items incorrect", 5, fromDisk.getNumberOfItems());
for (MockId id : new MockId[] { id1, id2, id3 }) {
int valueCount = id.equals(id2) ? 3 : 1;
EnumSet<IndexValue.Flags> flags = id.equals(id2) ? EnumSet.of(IndexValue.Flags.Delete_Index, IndexValue.Flags.Ttl_Update_Index) : EnumSet.noneOf(IndexValue.Flags.class);
verifyValues(fromDisk, id, valueCount, flags);
}
// verify reset key
if (formatVersion >= PersistentIndex.VERSION_1) {
verifyResetKeyInfo(id3, value1, fromDisk.getResetKey(), fromDisk.getResetKeyType(), fromDisk.getResetKeyLifeVersion());
}
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method verifyReadFromFile.
/**
* Creates an {@link IndexSegment} from the given {@code file} and checks for both sanity and find operations.
* @param referenceIndex the index entries to be used as reference.
* @param file the {@link File} to be used to load the index.
* @param startOffset the expected start {@link Offset} of the {@link IndexSegment}
* @param numItems the expected number of items the {@code indexSegment}
* @param expectedSizeWritten the expected number of bytes written to the {@code indexSegment}
* @param endOffset the expected end offset of the {@code indexSegment}
* @param lastModifiedTimeInMs the last modified time of the index segment in ms
* @param resetKey the resetKey of the index segment
* @param resetKeyType the reset key type
* @param resetKeyLifeVersion the life version of reset key
* @throws StoreException
*/
private void verifyReadFromFile(NavigableMap<MockId, NavigableSet<IndexValue>> referenceIndex, File file, Offset startOffset, int numItems, int expectedSizeWritten, long endOffset, long lastModifiedTimeInMs, StoreKey resetKey, PersistentIndex.IndexEntryType resetKeyType, short resetKeyLifeVersion) throws StoreException {
// read from file (not sealed) and verify that everything is ok
Journal journal = new Journal(tempDir.getAbsolutePath(), Integer.MAX_VALUE, Integer.MAX_VALUE);
IndexSegment fromDisk = createIndexSegmentFromFile(file, false, journal);
verifyAllForIndexSegmentFromFile(referenceIndex, fromDisk, startOffset, numItems, expectedSizeWritten, false, endOffset, lastModifiedTimeInMs, resetKey, resetKeyType, resetKeyLifeVersion);
// journal should contain all the entries
verifyJournal(referenceIndex, journal);
File bloomFile = new File(file.getParent(), IndexSegment.generateIndexSegmentFilenamePrefix(startOffset) + BLOOM_FILE_NAME_SUFFIX);
fromDisk.seal();
assertTrue("Bloom file does not exist", bloomFile.exists());
verifyAllForIndexSegmentFromFile(referenceIndex, fromDisk, startOffset, numItems, expectedSizeWritten, true, endOffset, lastModifiedTimeInMs, resetKey, resetKeyType, resetKeyLifeVersion);
// read from file (sealed) and verify that everything is ok
journal = new Journal(tempDir.getAbsolutePath(), Integer.MAX_VALUE, Integer.MAX_VALUE);
fromDisk = createIndexSegmentFromFile(file, true, journal);
verifyAllForIndexSegmentFromFile(referenceIndex, fromDisk, startOffset, numItems, expectedSizeWritten, true, endOffset, lastModifiedTimeInMs, resetKey, resetKeyType, resetKeyLifeVersion);
// journal should not contain any entries
assertNull("Journal should not have any entries", journal.getFirstOffset());
// test bloom file recreation
// delete the bloom file
assertTrue("File could not be deleted", bloomFile.delete());
// read from file (sealed) again and verify that everything is ok
journal = new Journal(tempDir.getAbsolutePath(), Integer.MAX_VALUE, Integer.MAX_VALUE);
fromDisk = createIndexSegmentFromFile(file, true, journal);
assertTrue("Bloom file does not exist", bloomFile.exists());
verifyAllForIndexSegmentFromFile(referenceIndex, fromDisk, startOffset, numItems, expectedSizeWritten, true, endOffset, lastModifiedTimeInMs, resetKey, resetKeyType, resetKeyLifeVersion);
// journal should not contain any entries
assertNull("Journal should not have any entries", journal.getFirstOffset());
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method loadBloomFileFailureTest.
/**
* Test failure on loading bloom filter file and verify that bloom filter is rebuilt.
* @throws Exception
*/
@Test
public void loadBloomFileFailureTest() throws Exception {
assumeTrue(formatVersion > PersistentIndex.VERSION_1);
LogSegmentName logSegmentName1 = LogSegmentName.fromPositionAndGeneration(0, 0);
int indexValueSize = PersistentIndex.CURRENT_VERSION >= PersistentIndex.VERSION_3 ? IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V3_V4 : IndexValue.INDEX_VALUE_SIZE_IN_BYTES_V1_V2;
IndexSegment indexSegment1 = new IndexSegment(tempDir.getAbsolutePath(), new Offset(logSegmentName1, 0), STORE_KEY_FACTORY, KEY_SIZE + indexValueSize, indexValueSize, config, metrics, time);
Random random = new Random();
short accountId1 = getRandomShort(random);
short containerId1 = getRandomShort(random);
MockId id1 = new MockId(TestUtils.getRandomString(CUSTOM_ID_SIZE), accountId1, containerId1);
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName1, 0), Utils.Infinite_Time, time.milliseconds(), accountId1, containerId1, (short) 0, formatVersion);
indexSegment1.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName1, 1000));
indexSegment1.writeIndexSegmentToFile(new Offset(logSegmentName1, 1000));
indexSegment1.seal();
File bloomFile = new File(tempDir, generateIndexSegmentFilenamePrefix(new Offset(logSegmentName1, 0)) + BLOOM_FILE_NAME_SUFFIX);
assertTrue("The bloom file should exist", bloomFile.exists());
CrcInputStream crcBloom = new CrcInputStream(new FileInputStream(bloomFile));
IFilter bloomFilter;
try (DataInputStream stream = new DataInputStream(crcBloom)) {
bloomFilter = FilterFactory.deserialize(stream, config.storeBloomFilterMaximumPageCount);
long crcValue = crcBloom.getValue();
assertEquals("Crc mismatch", crcValue, stream.readLong());
}
// induce crc mismatch during serialization
CrcOutputStream crcStream = new CrcOutputStream(new FileOutputStream(bloomFile));
DataOutputStream stream = new DataOutputStream(crcStream);
FilterFactory.serialize(bloomFilter, stream);
long crcValue = crcStream.getValue() + 1;
stream.writeLong(crcValue);
stream.close();
// load from file, which should trigger bloom filter rebuild as crc value doesn't match
new IndexSegment(indexSegment1.getFile(), true, STORE_KEY_FACTORY, config, metrics, null, time);
assertEquals("Bloom filter rebuild count mismatch", 1, metrics.bloomRebuildOnLoadFailureCount.getCount());
}
use of com.github.ambry.store.IndexSegment in project ambry by linkedin.
the class MockIndexSegment method memoryMapFailureTest.
/**
* Test cases where exceptions occurred while performing memory mapping of index segment.
* @throws IOException
* @throws StoreException
*/
@Test
public void memoryMapFailureTest() throws IOException, StoreException {
assumeTrue(formatVersion == PersistentIndex.VERSION_1 && config.storeIndexMemState == IndexMemState.MMAP_WITHOUT_FORCE_LOAD);
LogSegmentName logSegmentName = LogSegmentName.fromPositionAndGeneration(0, 0);
StoreKeyFactory mockStoreKeyFactory = Mockito.spy(STORE_KEY_FACTORY);
IndexSegment indexSegment = generateIndexSegment(new Offset(logSegmentName, 0), mockStoreKeyFactory);
// verify that StoreErrorCodes.File_Not_Found can be captured when performing memory map
try {
indexSegment.seal();
fail("should fail");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.File_Not_Found, e.getErrorCode());
}
// verify that StoreErrorCodes.IOError can be captured when performing memory map
indexSegment = generateIndexSegment(new Offset(logSegmentName, 0), mockStoreKeyFactory);
MockId id1 = new MockId("0" + TestUtils.getRandomString(CUSTOM_ID_SIZE - 1));
IndexValue value1 = IndexValueTest.getIndexValue(1000, new Offset(logSegmentName, 0), Utils.Infinite_Time, time.milliseconds(), Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), (short) 0, formatVersion);
indexSegment.addEntry(new IndexEntry(id1, value1), new Offset(logSegmentName, 1000));
indexSegment.writeIndexSegmentToFile(new Offset(logSegmentName, 1000));
doThrow(new IOException(StoreException.IO_ERROR_STR)).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
try {
indexSegment.seal();
fail("should fail");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.IOError, e.getErrorCode());
}
// test that when IOException's error message is null, the error code should be Unknown_Error
doThrow(new IOException()).when(mockStoreKeyFactory).getStoreKey(any(DataInputStream.class));
try {
indexSegment.seal();
fail("should fail");
} catch (StoreException e) {
assertEquals("Mismatch in error code", StoreErrorCodes.Unknown_Error, e.getErrorCode());
}
}
Aggregations