Search in sources :

Example 11 with ByteBufferInputStream

use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.

the class IndexSegment method map.

/**
 * Memory maps the segment of index. Optionally, it also persist the bloom filter to disk
 * @param persistBloom True, if the bloom filter needs to be persisted. False otherwise.
 * @throws IOException
 * @throws StoreException
 */
void map(boolean persistBloom) throws IOException, StoreException {
    RandomAccessFile raf = new RandomAccessFile(indexFile, "r");
    rwLock.writeLock().lock();
    try {
        mmap = raf.getChannel().map(FileChannel.MapMode.READ_ONLY, 0, indexFile.length());
        mmap.position(0);
        version = mmap.getShort();
        StoreKey storeKey;
        int keySize;
        short resetKeyType;
        switch(version) {
            case PersistentIndex.VERSION_0:
                indexSizeExcludingEntries = VERSION_FIELD_LENGTH + KEY_OR_ENTRY_SIZE_FIELD_LENGTH + VALUE_SIZE_FIELD_LENGTH + LOG_END_OFFSET_FIELD_LENGTH + CRC_FIELD_LENGTH;
                keySize = mmap.getInt();
                valueSize = mmap.getInt();
                persistedEntrySize = keySize + valueSize;
                endOffset.set(new Offset(startOffset.getName(), mmap.getLong()));
                lastModifiedTimeSec.set(indexFile.lastModified() / 1000);
                firstKeyRelativeOffset = indexSizeExcludingEntries - CRC_FIELD_LENGTH;
                break;
            case PersistentIndex.VERSION_1:
                keySize = mmap.getInt();
                valueSize = mmap.getInt();
                persistedEntrySize = keySize + valueSize;
                endOffset.set(new Offset(startOffset.getName(), mmap.getLong()));
                lastModifiedTimeSec.set(mmap.getLong());
                storeKey = factory.getStoreKey(new DataInputStream(new ByteBufferInputStream(mmap)));
                resetKeyType = mmap.getShort();
                resetKey = new Pair<>(storeKey, PersistentIndex.IndexEntryType.values()[resetKeyType]);
                indexSizeExcludingEntries = VERSION_FIELD_LENGTH + KEY_OR_ENTRY_SIZE_FIELD_LENGTH + VALUE_SIZE_FIELD_LENGTH + LOG_END_OFFSET_FIELD_LENGTH + CRC_FIELD_LENGTH + LAST_MODIFIED_TIME_FIELD_LENGTH + resetKey.getFirst().sizeInBytes() + RESET_KEY_TYPE_FIELD_LENGTH;
                firstKeyRelativeOffset = indexSizeExcludingEntries - CRC_FIELD_LENGTH;
                break;
            case PersistentIndex.VERSION_2:
                persistedEntrySize = mmap.getInt();
                valueSize = mmap.getInt();
                endOffset.set(new Offset(startOffset.getName(), mmap.getLong()));
                lastModifiedTimeSec.set(mmap.getLong());
                storeKey = factory.getStoreKey(new DataInputStream(new ByteBufferInputStream(mmap)));
                resetKeyType = mmap.getShort();
                resetKey = new Pair<>(storeKey, PersistentIndex.IndexEntryType.values()[resetKeyType]);
                indexSizeExcludingEntries = VERSION_FIELD_LENGTH + KEY_OR_ENTRY_SIZE_FIELD_LENGTH + VALUE_SIZE_FIELD_LENGTH + LOG_END_OFFSET_FIELD_LENGTH + CRC_FIELD_LENGTH + LAST_MODIFIED_TIME_FIELD_LENGTH + resetKey.getFirst().sizeInBytes() + RESET_KEY_TYPE_FIELD_LENGTH;
                firstKeyRelativeOffset = indexSizeExcludingEntries - CRC_FIELD_LENGTH;
                break;
            default:
                throw new StoreException("IndexSegment : " + indexFile.getAbsolutePath() + " unknown version in index file", StoreErrorCodes.Index_Version_Error);
        }
        mapped.set(true);
        index = null;
    } finally {
        raf.close();
        rwLock.writeLock().unlock();
    }
    // we only persist the bloom filter once during its entire lifetime
    if (persistBloom) {
        CrcOutputStream crcStream = new CrcOutputStream(new FileOutputStream(bloomFile));
        DataOutputStream stream = new DataOutputStream(crcStream);
        FilterFactory.serialize(bloomFilter, stream);
        long crcValue = crcStream.getValue();
        stream.writeLong(crcValue);
    }
}
Also used : DataOutputStream(java.io.DataOutputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) DataInputStream(java.io.DataInputStream) RandomAccessFile(java.io.RandomAccessFile) CrcOutputStream(com.github.ambry.utils.CrcOutputStream) FileOutputStream(java.io.FileOutputStream)

Example 12 with ByteBufferInputStream

use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.

the class StoreFindTokenTest method getSerializedStream.

/**
 * Gets a serialized format of {@code token} in the version {@code version}.
 * @param token the {@link StoreFindToken} to serialize.
 * @param version the version to serialize it in.
 * @return a serialized format of {@code token} in the version {@code version}.
 */
static DataInputStream getSerializedStream(StoreFindToken token, short version) {
    byte[] bytes;
    FindTokenType type = token.getType();
    byte[] sessionIdBytes = token.getSessionIdInBytes();
    byte[] storeKeyInBytes = token.getStoreKeyInBytes();
    switch(version) {
        case StoreFindToken.VERSION_0:
            // version size + sessionId length size + session id size + log offset size + index segment start offset size
            // + store key size
            int size = 2 + 4 + sessionIdBytes.length + 8 + 8 + storeKeyInBytes.length;
            bytes = new byte[size];
            ByteBuffer bufWrap = ByteBuffer.wrap(bytes);
            // add version
            bufWrap.putShort(StoreFindToken.VERSION_0);
            // add sessionId
            bufWrap.putInt(sessionIdBytes.length);
            bufWrap.put(sessionIdBytes);
            long logOffset = -1;
            long indexStartOffset = -1;
            if (type.equals(FindTokenType.JournalBased)) {
                logOffset = token.getOffset().getOffset();
            } else if (type.equals(FindTokenType.IndexBased)) {
                indexStartOffset = token.getOffset().getOffset();
            }
            // add offset
            bufWrap.putLong(logOffset);
            // add index start offset
            bufWrap.putLong(indexStartOffset);
            // add storeKey
            if (storeKeyInBytes.length > 0) {
                bufWrap.put(storeKeyInBytes);
            }
            break;
        case StoreFindToken.VERSION_1:
            byte[] offsetBytes = token.getOffsetInBytes();
            // version size + sessionId length size + session id size + type + log offset / index segment start offset size
            // + store key size
            size = 2 + 4 + sessionIdBytes.length + 2 + offsetBytes.length + storeKeyInBytes.length;
            bytes = new byte[size];
            bufWrap = ByteBuffer.wrap(bytes);
            // add version
            bufWrap.putShort(StoreFindToken.VERSION_1);
            // add sessionId
            bufWrap.putInt(sessionIdBytes.length);
            bufWrap.put(sessionIdBytes);
            // add type
            bufWrap.putShort((byte) type.ordinal());
            bufWrap.put(offsetBytes);
            if (storeKeyInBytes.length > 0) {
                bufWrap.put(storeKeyInBytes);
            }
            break;
        case VERSION_2:
            offsetBytes = token.getOffsetInBytes();
            byte[] incarnationIdBytes = token.getIncarnationIdInBytes();
            size = VERSION_SIZE + TYPE_SIZE;
            if (type != FindTokenType.Uninitialized) {
                size += INCARNATION_ID_LENGTH_SIZE + incarnationIdBytes.length + SESSION_ID_LENGTH_SIZE + sessionIdBytes.length + offsetBytes.length;
                if (type == FindTokenType.JournalBased) {
                    size += INCLUSIVE_BYTE_SIZE;
                } else if (type == FindTokenType.IndexBased) {
                    size += storeKeyInBytes.length;
                }
            }
            bytes = new byte[size];
            bufWrap = ByteBuffer.wrap(bytes);
            // add version
            bufWrap.putShort(VERSION_2);
            // add type
            bufWrap.putShort((short) type.ordinal());
            if (type != FindTokenType.Uninitialized) {
                // add incarnationId
                bufWrap.putInt(incarnationIdBytes.length);
                bufWrap.put(incarnationIdBytes);
                // add sessionId
                bufWrap.putInt(sessionIdBytes.length);
                bufWrap.put(sessionIdBytes);
                // add offset
                bufWrap.put(offsetBytes);
                if (type == FindTokenType.JournalBased) {
                    bufWrap.put(token.getInclusive() ? (byte) 1 : (byte) 0);
                } else if (type == FindTokenType.IndexBased) {
                    bufWrap.put(storeKeyInBytes);
                }
            }
            break;
        case StoreFindToken.CURRENT_VERSION:
            bytes = token.toBytes();
            break;
        default:
            throw new IllegalArgumentException("Version " + version + " of StoreFindToken does not exist");
    }
    return new DataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(bytes)));
}
Also used : ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) FindTokenType(com.github.ambry.replication.FindTokenType) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer)

Example 13 with ByteBufferInputStream

use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.

the class StoreMessageReadSetTest method getSerializedStream.

/**
 * Gets a serialized format of {@code readOptions} in the version {@code version}.
 * @param readOptions the {@link BlobReadOptions} to serialize.
 * @param version the version to serialize it in.
 * @return a serialized format of {@code readOptions} in the version {@code version}.
 */
private DataInputStream getSerializedStream(BlobReadOptions readOptions, short version) {
    byte[] bytes;
    switch(version) {
        case BlobReadOptions.VERSION_0:
            // version length + offset length + size length + expires at length + key size
            bytes = new byte[2 + 8 + 8 + 8 + readOptions.getMessageInfo().getStoreKey().sizeInBytes()];
            ByteBuffer bufWrap = ByteBuffer.wrap(bytes);
            bufWrap.putShort(BlobReadOptions.VERSION_0);
            bufWrap.putLong(readOptions.getOffset());
            bufWrap.putLong(readOptions.getMessageInfo().getSize());
            bufWrap.putLong(readOptions.getMessageInfo().getExpirationTimeInMs());
            bufWrap.put(readOptions.getMessageInfo().getStoreKey().toBytes());
        case BlobReadOptions.VERSION_1:
            bytes = readOptions.toBytes();
            break;
        default:
            throw new IllegalArgumentException("Version " + version + " of BlobReadOptions does not exist");
    }
    return new DataInputStream(new ByteBufferInputStream(ByteBuffer.wrap(bytes)));
}
Also used : ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer)

Example 14 with ByteBufferInputStream

use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.

the class StoreMessageReadSetTest method blobReadOptionsTest.

/**
 * Tests {@link BlobReadOptions} for getter correctness, serialization/deserialization and bad input.
 * @throws IOException
 */
@Test
public void blobReadOptionsTest() throws IOException, StoreException {
    int logCapacity = 2000;
    int[] segCapacities = { 2000, 1000 };
    for (int segCapacity : segCapacities) {
        Log log = new Log(tempDir.getAbsolutePath(), logCapacity, StoreTestUtils.DEFAULT_DISK_SPACE_ALLOCATOR, createStoreConfig(segCapacity, setFilePermissionEnabled), metrics, null);
        try {
            LogSegment firstSegment = log.getFirstSegment();
            int availableSegCapacity = (int) (segCapacity - firstSegment.getStartOffset());
            int count = logCapacity / segCapacity;
            for (int i = 0; i < count; i++) {
                ByteBuffer buffer = ByteBuffer.allocate(availableSegCapacity);
                log.appendFrom(Channels.newChannel(new ByteBufferInputStream(buffer)), availableSegCapacity);
            }
            long offset = Utils.getRandomLong(TestUtils.RANDOM, availableSegCapacity) + firstSegment.getStartOffset();
            long size = Utils.getRandomLong(TestUtils.RANDOM, firstSegment.getEndOffset() - offset);
            long expiresAtMs = Utils.getRandomLong(TestUtils.RANDOM, 1000);
            long operationTimeMs = System.currentTimeMillis() + TestUtils.RANDOM.nextInt(10000);
            long crc = TestUtils.RANDOM.nextLong();
            MockId id = new MockId("id1");
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            boolean deleted = TestUtils.RANDOM.nextBoolean();
            boolean ttlUpdated = TestUtils.RANDOM.nextBoolean();
            // basic test
            MessageInfo info = new MessageInfo(id, size, deleted, ttlUpdated, expiresAtMs, crc, accountId, containerId, operationTimeMs);
            BlobReadOptions options = new BlobReadOptions(log, new Offset(firstSegment.getName(), offset), info);
            assertEquals("Ref count of log segment should have increased", 1, firstSegment.refCount());
            verifyGetters(options, firstSegment, offset, true, info);
            options.close();
            assertEquals("Ref count of log segment should have decreased", 0, firstSegment.refCount());
            // toBytes() and back test
            doSerDeTest(options, firstSegment, log);
            if (count > 1) {
                // toBytes() and back test for the second segment
                LogSegment secondSegment = log.getNextSegment(firstSegment);
                options = new BlobReadOptions(log, new Offset(secondSegment.getName(), offset), info);
                assertEquals("Ref count of log segment should have increased", 1, secondSegment.refCount());
                options.close();
                assertEquals("Ref count of log segment should have decreased", 0, secondSegment.refCount());
                doSerDeTest(options, secondSegment, log);
            }
            try {
                new BlobReadOptions(log, new Offset(firstSegment.getName(), firstSegment.getEndOffset()), new MessageInfo(null, 1, 1, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), operationTimeMs));
                fail("Construction should have failed because offset + size > endOffset");
            } catch (IllegalArgumentException e) {
            // expected. Nothing to do.
            }
        } finally {
            log.close(false);
            assertTrue(tempDir + " could not be cleaned", StoreTestUtils.cleanDirectory(tempDir, false));
        }
    }
}
Also used : ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 15 with ByteBufferInputStream

use of com.github.ambry.utils.ByteBufferInputStream in project ambry by linkedin.

the class LogSegmentTest method appendTest.

/**
 * Tests {@link LogSegment#appendFrom(ByteBuffer)} and {@link LogSegment#appendFrom(ReadableByteChannel, long)} for
 * various cases.
 * @throws IOException
 */
@Test
public void appendTest() throws IOException, StoreException {
    // buffer append
    doAppendTest(new Appender() {

        @Override
        public void append(LogSegment segment, ByteBuffer buffer) throws StoreException {
            int writeSize = buffer.remaining();
            int written = segment.appendFrom(buffer);
            assertEquals("Size written did not match size of buffer provided", writeSize, written);
        }
    });
    // channel append
    doAppendTest(new Appender() {

        @Override
        public void append(LogSegment segment, ByteBuffer buffer) throws StoreException {
            int writeSize = buffer.remaining();
            segment.appendFrom(Channels.newChannel(new ByteBufferInputStream(buffer)), writeSize);
            assertFalse("The buffer was not completely written", buffer.hasRemaining());
        }
    });
    // direct IO append
    if (Utils.isLinux()) {
        doAppendTest(new Appender() {

            @Override
            public void append(LogSegment segment, ByteBuffer buffer) throws StoreException {
                int writeSize = buffer.remaining();
                segment.appendFromDirectly(buffer.array(), 0, writeSize);
            }
        });
    }
}
Also used : ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)79 ByteBuffer (java.nio.ByteBuffer)48 DataInputStream (java.io.DataInputStream)34 Test (org.junit.Test)25 ArrayList (java.util.ArrayList)19 InputStream (java.io.InputStream)16 MessageInfo (com.github.ambry.store.MessageInfo)15 StoreKey (com.github.ambry.store.StoreKey)14 IOException (java.io.IOException)12 MetricRegistry (com.codahale.metrics.MetricRegistry)10 Random (java.util.Random)10 GetResponse (com.github.ambry.protocol.GetResponse)8 MockId (com.github.ambry.store.MockId)8 ByteBufferOutputStream (com.github.ambry.utils.ByteBufferOutputStream)8 Crc32 (com.github.ambry.utils.Crc32)6 BlobId (com.github.ambry.commons.BlobId)5 BlobProperties (com.github.ambry.messageformat.BlobProperties)5 CrcInputStream (com.github.ambry.utils.CrcInputStream)5 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)4 RequestInfo (com.github.ambry.network.RequestInfo)4