Search in sources :

Example 6 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class CloudBlobStoreTest method testExceptionalDest.

/**
 * Test verifying exception handling behavior.
 */
@Test
public void testExceptionalDest() throws Exception {
    CloudDestination exDest = mock(CloudDestination.class);
    when(exDest.uploadBlob(any(BlobId.class), anyLong(), any(), any(InputStream.class))).thenThrow(new CloudStorageException("ouch"));
    when(exDest.deleteBlob(any(BlobId.class), anyLong(), anyShort(), any(CloudUpdateValidator.class))).thenThrow(new CloudStorageException("ouch"));
    when(exDest.getBlobMetadata(anyList())).thenThrow(new CloudStorageException("ouch"));
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty(CloudConfig.VCR_REQUIRE_ENCRYPTION, "false");
    props.setProperty(CloudConfig.CLOUD_BLOB_CRYPTO_AGENT_FACTORY_CLASS, TestCloudBlobCryptoAgentFactory.class.getName());
    vcrMetrics = new VcrMetrics(new MetricRegistry());
    CloudBlobStore exStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, exDest, clusterMap, vcrMetrics);
    exStore.start();
    List<StoreKey> keys = Collections.singletonList(getUniqueId(refAccountId, refContainerId, false, partitionId));
    MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
    CloudTestUtil.addBlobToMessageSet(messageWriteSet, 10, Utils.Infinite_Time, refAccountId, refContainerId, true, false, partitionId, operationTime, isVcr);
    try {
        exStore.put(messageWriteSet);
        fail("Store put should have failed.");
    } catch (StoreException e) {
        assertEquals(StoreErrorCodes.IOError, e.getErrorCode());
    }
    try {
        exStore.delete(messageWriteSet.getMessageSetInfo());
        fail("Store delete should have failed.");
    } catch (StoreException e) {
        assertEquals(StoreErrorCodes.IOError, e.getErrorCode());
    }
    try {
        exStore.findMissingKeys(keys);
        fail("Store findMissingKeys should have failed.");
    } catch (StoreException e) {
        assertEquals(StoreErrorCodes.IOError, e.getErrorCode());
    }
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) InputStream(java.io.InputStream) MetricRegistry(com.codahale.metrics.MetricRegistry) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKey(com.github.ambry.store.StoreKey) StoreException(com.github.ambry.store.StoreException) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) BlobId(com.github.ambry.commons.BlobId) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 7 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class CloudBlobStoreTest method testExceptionRetry.

/* Test retry behavior */
@Test
public void testExceptionRetry() throws Exception {
    CloudDestination exDest = mock(CloudDestination.class);
    long retryDelay = 5;
    MockMessageWriteSet messageWriteSet = new MockMessageWriteSet();
    BlobId blobId = CloudTestUtil.addBlobToMessageSet(messageWriteSet, SMALL_BLOB_SIZE, Utils.Infinite_Time, refAccountId, refContainerId, false, false, partitionId, operationTime, isVcr);
    List<StoreKey> keys = Collections.singletonList(blobId);
    CloudBlobMetadata metadata = new CloudBlobMetadata(blobId, operationTime, Utils.Infinite_Time, 1024, null);
    CloudStorageException retryableException = new CloudStorageException("Server unavailable", null, 500, true, retryDelay);
    when(exDest.uploadBlob(any(BlobId.class), anyLong(), any(), any(InputStream.class))).thenAnswer((invocation -> {
        consumeStream(invocation);
        throw retryableException;
    })).thenAnswer(invocation -> {
        consumeStream(invocation);
        return true;
    });
    when(exDest.deleteBlob(any(BlobId.class), anyLong(), anyShort(), any(CloudUpdateValidator.class))).thenThrow(retryableException).thenReturn(true);
    when(exDest.updateBlobExpiration(any(BlobId.class), anyLong(), any(CloudUpdateValidator.class))).thenThrow(retryableException).thenReturn((short) 1);
    when(exDest.getBlobMetadata(anyList())).thenThrow(retryableException).thenReturn(Collections.singletonMap(metadata.getId(), metadata));
    doThrow(retryableException).doNothing().when(exDest).downloadBlob(any(BlobId.class), any());
    Properties props = new Properties();
    setBasicProperties(props);
    props.setProperty(CloudConfig.VCR_REQUIRE_ENCRYPTION, "false");
    props.setProperty(CloudConfig.CLOUD_BLOB_CRYPTO_AGENT_FACTORY_CLASS, TestCloudBlobCryptoAgentFactory.class.getName());
    vcrMetrics = new VcrMetrics(new MetricRegistry());
    CloudBlobStore exStore = spy(new CloudBlobStore(new VerifiableProperties(props), partitionId, exDest, clusterMap, vcrMetrics));
    exStore.start();
    // Run all operations, they should be retried and succeed second time.
    int expectedCacheLookups = 0, expectedCacheRemoves = 0, expectedRetries = 0;
    // PUT
    exStore.put(messageWriteSet);
    expectedRetries++;
    expectedCacheLookups++;
    // UPDATE_TTL
    exStore.updateTtl(messageWriteSet.getMessageSetInfo());
    expectedRetries++;
    expectedCacheRemoves++;
    expectedCacheLookups += 2;
    // DELETE
    exStore.delete(messageWriteSet.getMessageSetInfo());
    expectedRetries++;
    expectedCacheRemoves++;
    if (isVcr) {
        // no cache lookup in case of delete for frontend.
        expectedCacheLookups += 2;
    }
    // GET
    exStore.get(keys, EnumSet.noneOf(StoreGetOptions.class));
    expectedRetries++;
    exStore.downloadBlob(metadata, blobId, new ByteArrayOutputStream());
    expectedRetries++;
    // Expect retries for all ops, cache lookups for the first three
    assertEquals("Unexpected retry count", expectedRetries, vcrMetrics.retryCount.getCount());
    assertEquals("Unexpected wait time", expectedRetries * retryDelay, vcrMetrics.retryWaitTimeMsec.getCount());
    verifyCacheHits(expectedCacheLookups, 0);
    verify(exStore, times(expectedCacheRemoves)).removeFromCache(eq(blobId.getID()));
    // Rerun the first three, should all skip due to cache hit
    messageWriteSet.resetBuffers();
    int expectedCacheHits = 0;
    try {
        exStore.put(messageWriteSet);
    } catch (StoreException ex) {
        assertEquals(ex.getErrorCode(), StoreErrorCodes.Already_Exist);
    }
    expectedCacheHits++;
    exStore.addToCache(blobId.getID(), (short) 0, CloudBlobStore.BlobState.TTL_UPDATED);
    exStore.updateTtl(messageWriteSet.getMessageSetInfo());
    expectedCacheHits++;
    exStore.addToCache(blobId.getID(), (short) 0, CloudBlobStore.BlobState.DELETED);
    try {
        exStore.delete(messageWriteSet.getMessageSetInfo());
    } catch (StoreException ex) {
        assertEquals(ex.getErrorCode(), StoreErrorCodes.ID_Deleted);
    }
    if (isVcr) {
        expectedCacheHits++;
    }
    verifyCacheHits(expectedCacheLookups + expectedCacheHits, expectedCacheHits);
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) StoreGetOptions(com.github.ambry.store.StoreGetOptions) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) StoreKey(com.github.ambry.store.StoreKey) StoreException(com.github.ambry.store.StoreException) MockMessageWriteSet(com.github.ambry.store.MockMessageWriteSet) BlobId(com.github.ambry.commons.BlobId) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 8 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class CloudBlobStoreTest method testFindMissingKeys.

/**
 * Test the CloudBlobStore findMissingKeys method.
 */
@Test
public void testFindMissingKeys() throws Exception {
    setupCloudStore(false, true, defaultCacheLimit, true);
    int count = 10;
    List<StoreKey> keys = new ArrayList<>();
    Map<String, CloudBlobMetadata> metadataMap = new HashMap<>();
    for (int j = 0; j < count; j++) {
        // Blob with metadata
        BlobId existentBlobId = getUniqueId(refAccountId, refContainerId, false, partitionId);
        keys.add(existentBlobId);
        metadataMap.put(existentBlobId.getID(), new CloudBlobMetadata(existentBlobId, operationTime, Utils.Infinite_Time, 1024, CloudBlobMetadata.EncryptionOrigin.ROUTER));
        // Blob without metadata
        BlobId nonexistentBlobId = getUniqueId(refAccountId, refContainerId, false, partitionId);
        keys.add(nonexistentBlobId);
    }
    when(dest.getBlobMetadata(anyList())).thenReturn(metadataMap);
    Set<StoreKey> missingKeys = store.findMissingKeys(keys);
    verify(dest).getBlobMetadata(anyList());
    int expectedLookups = keys.size();
    int expectedHits = 0;
    verifyCacheHits(expectedLookups, expectedHits);
    assertEquals("Wrong number of missing keys", count, missingKeys.size());
    if (isVcr) {
        // Add keys to cache and rerun (should be cached)
        for (StoreKey storeKey : keys) {
            store.addToCache(storeKey.getID(), (short) 0, CloudBlobStore.BlobState.CREATED);
        }
        missingKeys = store.findMissingKeys(keys);
        assertTrue("Expected no missing keys", missingKeys.isEmpty());
        expectedLookups += keys.size();
        expectedHits += keys.size();
        verifyCacheHits(expectedLookups, expectedHits);
        // getBlobMetadata should not have been called a second time.
        verify(dest).getBlobMetadata(anyList());
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) StoreKey(com.github.ambry.store.StoreKey) BlobId(com.github.ambry.commons.BlobId) ReplicationTest(com.github.ambry.replication.ReplicationTest) Test(org.junit.Test)

Example 9 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class HardDeleteRecoveryMetadata method getHardDeleteInfo.

/**
 * For the message at readSetIndex, does the following:
 *   1. Reads the whole blob and does a crc check. If the crc check fails, returns null - this means that the record
 *   is not retrievable anyway.
 *   2. Adds to a hard delete replacement write set.
 *   3. Returns the hard delete info.
 */
private HardDeleteInfo getHardDeleteInfo(int readSetIndex) {
    HardDeleteInfo hardDeleteInfo = null;
    try {
        /* Read the version field in the header */
        ByteBuffer headerVersionBuf = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
        readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(headerVersionBuf)), 0, Version_Field_Size_In_Bytes);
        headerVersionBuf.flip();
        short headerVersion = headerVersionBuf.getShort();
        if (!isValidHeaderVersion(headerVersion)) {
            throw new MessageFormatException("Unknown header version during hard delete " + headerVersion + " storeKey " + readSet.getKeyAt(readSetIndex), MessageFormatErrorCodes.Unknown_Format_Version);
        }
        ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(headerVersion));
        /* Read the rest of the header */
        header.putShort(headerVersion);
        readSet.writeTo(readSetIndex, Channels.newChannel(new ByteBufferOutputStream(header)), Version_Field_Size_In_Bytes, header.capacity() - Version_Field_Size_In_Bytes);
        header.flip();
        MessageHeader_Format headerFormat = getMessageHeader(headerVersion, header);
        headerFormat.verifyHeader();
        StoreKey storeKey = storeKeyFactory.getStoreKey(new DataInputStream(new MessageReadSetIndexInputStream(readSet, readSetIndex, header.capacity())));
        if (storeKey.compareTo(readSet.getKeyAt(readSetIndex)) != 0) {
            throw new MessageFormatException("Id mismatch between metadata and store - metadataId " + readSet.getKeyAt(readSetIndex) + " storeId " + storeKey, MessageFormatErrorCodes.Store_Key_Id_MisMatch);
        }
        if (!headerFormat.isPutRecord()) {
            throw new MessageFormatException("Cleanup operation for a non-PUT record is unsupported", MessageFormatErrorCodes.IO_Error);
        } else {
            HardDeleteRecoveryMetadata hardDeleteRecoveryMetadata = recoveryInfoMap.get(storeKey);
            int userMetadataRelativeOffset = headerFormat.getUserMetadataRecordRelativeOffset();
            short userMetadataVersion;
            int userMetadataSize;
            short blobRecordVersion;
            BlobType blobType;
            long blobStreamSize;
            DeserializedUserMetadata userMetadataInfo;
            DeserializedBlob blobRecordInfo;
            if (hardDeleteRecoveryMetadata == null) {
                userMetadataInfo = getUserMetadataInfo(readSet, readSetIndex, headerFormat.getUserMetadataRecordRelativeOffset(), headerFormat.getUserMetadataRecordSize());
                userMetadataSize = userMetadataInfo.getUserMetadata().capacity();
                userMetadataVersion = userMetadataInfo.getVersion();
                blobRecordInfo = getBlobRecordInfo(readSet, readSetIndex, headerFormat.getBlobRecordRelativeOffset(), headerFormat.getBlobRecordSize());
                blobStreamSize = blobRecordInfo.getBlobData().getSize();
                blobRecordVersion = blobRecordInfo.getVersion();
                blobType = blobRecordInfo.getBlobData().getBlobType();
                hardDeleteRecoveryMetadata = new HardDeleteRecoveryMetadata(headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize, storeKey);
            } else {
                logger.trace("Skipping crc check for user metadata and blob stream fields for key {}", storeKey);
                userMetadataVersion = hardDeleteRecoveryMetadata.getUserMetadataVersion();
                blobRecordVersion = hardDeleteRecoveryMetadata.getBlobRecordVersion();
                blobType = hardDeleteRecoveryMetadata.getBlobType();
                userMetadataSize = hardDeleteRecoveryMetadata.getUserMetadataSize();
                blobStreamSize = hardDeleteRecoveryMetadata.getBlobStreamSize();
            }
            HardDeleteMessageFormatInputStream hardDeleteStream = new HardDeleteMessageFormatInputStream(userMetadataRelativeOffset, userMetadataVersion, userMetadataSize, blobRecordVersion, blobType, blobStreamSize);
            hardDeleteInfo = new HardDeleteInfo(Channels.newChannel(hardDeleteStream), hardDeleteStream.getSize(), hardDeleteStream.getHardDeleteStreamRelativeOffset(), hardDeleteRecoveryMetadata.toBytes());
        }
    } catch (Exception e) {
        logger.error("Exception when reading blob: ", e);
    }
    return hardDeleteInfo;
}
Also used : DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) StoreKey(com.github.ambry.store.StoreKey) IOException(java.io.IOException) NoSuchElementException(java.util.NoSuchElementException) ByteBufferOutputStream(com.github.ambry.utils.ByteBufferOutputStream) HardDeleteInfo(com.github.ambry.store.HardDeleteInfo)

Example 10 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class ValidatingKeyConvertingTransformer method testInValidDeletedAndExpiredBlobs.

private void testInValidDeletedAndExpiredBlobs(short blobVersion, BlobType blobType, short headerVersionToUse) throws Exception {
    MessageFormatRecord.headerVersionToUse = headerVersionToUse;
    // MessageSievingInputStream contains put records for 2 valid blobs, 1 corrupt blob, 1 deleted blob and 1 expired.
    // id1(put record for valid blob), id2(corrupt), id3(put record of deleted blob), id4(put record of expired blob)
    // and id3(put record for valid blob)
    // create message stream for blob 1
    StoreKey key1 = new MockId("id1");
    short accountId1 = Utils.getRandomShort(RANDOM);
    short containerId1 = Utils.getRandomShort(RANDOM);
    BlobProperties prop1 = new BlobProperties(10, "servid1", accountId1, containerId1, false);
    byte[] encryptionKey1 = new byte[100];
    RANDOM.nextBytes(encryptionKey1);
    byte[] usermetadata1 = new byte[1000];
    RANDOM.nextBytes(usermetadata1);
    int blobContentSize = 2000;
    byte[] data1 = new byte[blobContentSize];
    RANDOM.nextBytes(data1);
    long blobSize = -1;
    if (blobType == BlobType.DataBlob) {
        blobSize = (int) Blob_Format_V2.getBlobRecordSize(blobContentSize);
    } else {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data1 = byteBufferBlob.array();
        blobContentSize = data1.length;
        blobSize = (int) Blob_Format_V2.getBlobRecordSize(blobContentSize);
    }
    ByteBufferInputStream stream1 = new ByteBufferInputStream(ByteBuffer.wrap(data1));
    MessageFormatInputStream messageFormatStream1 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key1, ByteBuffer.wrap(encryptionKey1), prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key1, prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType);
    MessageInfo msgInfo1 = new MessageInfo(key1, messageFormatStream1.getSize(), accountId1, containerId1, prop1.getCreationTimeInMs());
    // create message stream for blob 2
    StoreKey key2 = new MockId("id2");
    short accountId2 = Utils.getRandomShort(RANDOM);
    short containerId2 = Utils.getRandomShort(RANDOM);
    BlobProperties prop2 = new BlobProperties(10, "servid2", accountId2, containerId2, false);
    byte[] encryptionKey2 = new byte[100];
    RANDOM.nextBytes(encryptionKey2);
    byte[] usermetadata2 = new byte[1000];
    RANDOM.nextBytes(usermetadata2);
    blobContentSize = 2000;
    byte[] data2 = new byte[blobContentSize];
    RANDOM.nextBytes(data2);
    if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data2 = byteBufferBlob.array();
        blobContentSize = data2.length;
    }
    ByteBufferInputStream stream2 = new ByteBufferInputStream(ByteBuffer.wrap(data2));
    MessageFormatInputStream messageFormatStream2 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key2, ByteBuffer.wrap(encryptionKey2), prop2, ByteBuffer.wrap(usermetadata2), stream2, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key2, prop2, ByteBuffer.wrap(usermetadata2), stream2, blobContentSize, blobType);
    MessageInfo msgInfo2 = new MessageInfo(key2, messageFormatStream2.getSize(), accountId2, containerId2, prop2.getCreationTimeInMs());
    // corrupt the message stream but make sure this header version is still valid
    byte[] corruptMessageStream2 = new byte[(int) messageFormatStream2.getSize()];
    RANDOM.nextBytes(corruptMessageStream2);
    corruptMessageStream2[0] = (byte) 0;
    corruptMessageStream2[1] = (byte) headerVersionToUse;
    InputStream corruptStream2 = new ByteBufferInputStream(ByteBuffer.wrap(corruptMessageStream2));
    // create message stream for blob 3 that is deleted.
    StoreKey key3 = new MockId("id3");
    short accountId3 = Utils.getRandomShort(RANDOM);
    short containerId3 = Utils.getRandomShort(RANDOM);
    BlobProperties prop3 = new BlobProperties(10, "servid3", accountId3, containerId3, false);
    byte[] usermetadata3 = new byte[1000];
    RANDOM.nextBytes(usermetadata3);
    blobContentSize = 2000;
    byte[] data3 = new byte[blobContentSize];
    RANDOM.nextBytes(data3);
    if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data3 = byteBufferBlob.array();
        blobContentSize = data3.length;
    }
    ByteBufferInputStream stream3 = new ByteBufferInputStream(ByteBuffer.wrap(data3));
    MessageFormatInputStream messageFormatStream3 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key3, null, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key3, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType);
    // MessageInfo marks this blob as deleted.
    MessageInfo msgInfo3 = new MessageInfo(key3, messageFormatStream3.getSize(), true, false, Utils.Infinite_Time, accountId3, containerId3, prop3.getCreationTimeInMs());
    // create message stream for blob 4 that is expired.
    StoreKey key4 = new MockId("id4");
    short accountId4 = Utils.getRandomShort(RANDOM);
    short containerId4 = Utils.getRandomShort(RANDOM);
    BlobProperties prop4 = new BlobProperties(10, "servid4", accountId4, containerId4, false);
    byte[] usermetadata4 = new byte[1000];
    RANDOM.nextBytes(usermetadata4);
    blobContentSize = 2000;
    byte[] data4 = new byte[blobContentSize];
    RANDOM.nextBytes(data4);
    if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data4 = byteBufferBlob.array();
        blobContentSize = data4.length;
    }
    ByteBufferInputStream stream4 = new ByteBufferInputStream(ByteBuffer.wrap(data4));
    MessageFormatInputStream messageFormatStream4 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key4, null, prop4, ByteBuffer.wrap(usermetadata4), stream4, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key4, prop4, ByteBuffer.wrap(usermetadata4), stream4, blobContentSize, blobType);
    // MessageInfo marks this as already expired (the third field is an absolute expiresAt time).
    MessageInfo msgInfo4 = new MessageInfo(key4, messageFormatStream4.getSize(), 1, accountId4, containerId4, prop4.getCreationTimeInMs());
    // create message stream for blob 5
    StoreKey key5 = new MockId("id5");
    short accountId5 = Utils.getRandomShort(RANDOM);
    short containerId5 = Utils.getRandomShort(RANDOM);
    BlobProperties prop5 = new BlobProperties(10, "servid5", accountId5, containerId5, false);
    byte[] encryptionKey5 = new byte[100];
    RANDOM.nextBytes(encryptionKey5);
    byte[] usermetadata5 = new byte[1000];
    RANDOM.nextBytes(usermetadata5);
    blobContentSize = 2000;
    byte[] data5 = new byte[blobContentSize];
    RANDOM.nextBytes(data5);
    if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data5 = byteBufferBlob.array();
        blobContentSize = data5.length;
    }
    ByteBufferInputStream stream5 = new ByteBufferInputStream(ByteBuffer.wrap(data5));
    MessageFormatInputStream messageFormatStream5 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key5, ByteBuffer.wrap(encryptionKey5), prop5, ByteBuffer.wrap(usermetadata5), stream5, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key5, prop5, ByteBuffer.wrap(usermetadata5), stream5, blobContentSize, blobType);
    MessageInfo msgInfo5 = new MessageInfo(key5, messageFormatStream5.getSize(), accountId5, containerId5, prop5.getCreationTimeInMs());
    // create message stream for blob 6
    StoreKey key6 = new MockId("id6");
    short accountId6 = Utils.getRandomShort(RANDOM);
    short containerId6 = Utils.getRandomShort(RANDOM);
    BlobProperties prop6 = new BlobProperties(10, "servid6", accountId6, containerId6, false);
    byte[] encryptionKey6 = new byte[100];
    RANDOM.nextBytes(encryptionKey6);
    byte[] usermetadata6 = new byte[1000];
    RANDOM.nextBytes(usermetadata6);
    blobContentSize = 2000;
    byte[] data6 = new byte[blobContentSize];
    RANDOM.nextBytes(data6);
    if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
        ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
        data2 = byteBufferBlob.array();
        blobContentSize = data6.length;
    }
    ByteBufferInputStream stream6 = new ByteBufferInputStream(ByteBuffer.wrap(data2));
    MessageFormatInputStream messageFormatStream6 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key6, ByteBuffer.wrap(encryptionKey6), prop6, ByteBuffer.wrap(usermetadata6), stream6, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key6, prop6, ByteBuffer.wrap(usermetadata6), stream6, blobContentSize, blobType);
    MessageInfo msgInfo6 = new MessageInfo(key6, messageFormatStream6.getSize(), accountId6, containerId6, prop6.getCreationTimeInMs());
    // corrupt the message stream but make sure this header version is not valid
    byte[] corruptMessageStream6 = new byte[(int) messageFormatStream6.getSize()];
    RANDOM.nextBytes(corruptMessageStream6);
    corruptMessageStream6[1] = (byte) 100;
    InputStream corruptStream6 = new ByteBufferInputStream(ByteBuffer.wrap(corruptMessageStream6));
    // create input stream for all blob messages together
    byte[] totalMessageStreamContent = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize() + (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize()];
    messageFormatStream1.read(totalMessageStreamContent, 0, (int) messageFormatStream1.getSize());
    corruptStream2.read(totalMessageStreamContent, (int) messageFormatStream1.getSize(), (int) messageFormatStream2.getSize());
    messageFormatStream3.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream3.getSize());
    messageFormatStream4.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize(), (int) messageFormatStream4.getSize());
    messageFormatStream5.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize(), (int) messageFormatStream5.getSize());
    corruptStream6.read(totalMessageStreamContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize() + (int) messageFormatStream4.getSize() + (int) messageFormatStream5.getSize(), (int) messageFormatStream6.getSize());
    InputStream inputStream = new ByteBufferInputStream(ByteBuffer.wrap(totalMessageStreamContent));
    List<MessageInfo> msgInfoList = new ArrayList<>();
    msgInfoList.add(msgInfo1);
    msgInfoList.add(msgInfo2);
    msgInfoList.add(msgInfo3);
    msgInfoList.add(msgInfo4);
    msgInfoList.add(msgInfo5);
    msgInfoList.add(msgInfo6);
    MessageSievingInputStream sievedStream = new MessageSievingInputStream(inputStream, msgInfoList, transformers, new MetricRegistry());
    Map<StoreKey, StoreKey> convertedMap = randomKeyConverter.convert(Arrays.asList(key1, key2, key3, key4, key5, key6));
    int headerSize = getHeaderSizeForVersion(headerVersionToUse);
    int blobPropertiesRecordSize = BlobProperties_Format_V1.getBlobPropertiesRecordSize(prop1);
    int userMetadataSize = UserMetadata_Format_V1.getUserMetadataSize(ByteBuffer.wrap(usermetadata1));
    int totalHeadSize = 2 * headerSize;
    int totalBlobPropertiesSize = 2 * blobPropertiesRecordSize;
    int totalUserMetadataSize = 2 * userMetadataSize;
    int totalBlobSize = 2 * (int) blobSize;
    int totalKeySize = options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key1).sizeInBytes() + convertedMap.get(key5).sizeInBytes() : key1.sizeInBytes() + key5.sizeInBytes();
    int totalEncryptionRecordSize = blobVersion > Blob_Version_V1 ? BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey1)) + BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey5)) : 0;
    if (!options.isEmpty()) {
        Assert.assertTrue(sievedStream.hasInvalidMessages());
        Assert.assertEquals((int) sievedStream.getValidMessageInfoList().stream().mapToLong(MessageInfo::getSize).sum(), sievedStream.getSize());
        Assert.assertEquals(totalHeadSize + totalBlobPropertiesSize + totalUserMetadataSize + totalBlobSize + totalKeySize + totalEncryptionRecordSize, sievedStream.getSize());
        verifySievedTransformedMessage(sievedStream, options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key1) : key1, "servid1", accountId1, containerId1, blobVersion > Blob_Version_V1 ? encryptionKey1 : null, usermetadata1, data1, blobVersion, blobType);
        verifySievedTransformedMessage(sievedStream, options.contains(TransformerOptions.KeyConvert) ? convertedMap.get(key5) : key5, "servid5", accountId5, containerId5, blobVersion > Blob_Version_V1 ? encryptionKey5 : null, usermetadata5, data5, blobVersion, blobType);
    } else {
        // even if there are no transformers, deleted and expired messages should be dropped by the MessageSievingInputStream.
        byte[] expectedBytes = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize()];
        System.arraycopy(totalMessageStreamContent, 0, expectedBytes, 0, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize());
        System.arraycopy(totalMessageStreamContent, totalMessageStreamContent.length - (int) messageFormatStream5.getSize() - (int) messageFormatStream6.getSize(), expectedBytes, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream5.getSize() + (int) messageFormatStream6.getSize());
        Assert.assertEquals(expectedBytes.length, sievedStream.getSize());
        byte[] sievedBytes = Utils.readBytesFromStream(sievedStream, sievedStream.getSize());
        Assert.assertArrayEquals(expectedBytes, sievedBytes);
    }
    Assert.assertEquals(-1, sievedStream.read());
}
Also used : DataInputStream(java.io.DataInputStream) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) InputStream(java.io.InputStream) MetricRegistry(com.codahale.metrics.MetricRegistry) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) ArrayList(java.util.ArrayList) MockId(com.github.ambry.store.MockId) StoreKey(com.github.ambry.store.StoreKey) ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Aggregations

StoreKey (com.github.ambry.store.StoreKey)89 ArrayList (java.util.ArrayList)56 MessageInfo (com.github.ambry.store.MessageInfo)43 ByteBuffer (java.nio.ByteBuffer)43 Test (org.junit.Test)37 DataInputStream (java.io.DataInputStream)30 BlobId (com.github.ambry.commons.BlobId)27 HashMap (java.util.HashMap)26 IOException (java.io.IOException)23 List (java.util.List)22 PartitionId (com.github.ambry.clustermap.PartitionId)21 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)21 Map (java.util.Map)19 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)18 MockId (com.github.ambry.store.MockId)18 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)17 InputStream (java.io.InputStream)16 HashSet (java.util.HashSet)16 ClusterMap (com.github.ambry.clustermap.ClusterMap)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14