Search in sources :

Example 1 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class ReplicaThread method getMessagesForMissingKeys.

/**
 * Gets the messages for the keys that are missing from the local store by issuing a {@link GetRequest} to the remote
 * node, if there are any missing keys. If there are no missing keys to be fetched, then no request is issued and a
 * null response is returned.
 * @param connectedChannel The connection channel to the remote node
 * @param exchangeMetadataResponseList The list of metadata response from the remote node
 * @param replicasToReplicatePerNode The list of remote replicas for the remote node
 * @param remoteNode The remote node from which replication needs to happen
 * @return The response that contains the missing messages; or null if no request was issued because there were no
 * keys missing.
 * @throws ReplicationException
 * @throws IOException
 */
private GetResponse getMessagesForMissingKeys(ConnectedChannel connectedChannel, List<ExchangeMetadataResponse> exchangeMetadataResponseList, List<RemoteReplicaInfo> replicasToReplicatePerNode, DataNodeId remoteNode) throws ReplicationException, IOException {
    List<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    for (int i = 0; i < exchangeMetadataResponseList.size(); i++) {
        ExchangeMetadataResponse exchangeMetadataResponse = exchangeMetadataResponseList.get(i);
        RemoteReplicaInfo remoteReplicaInfo = replicasToReplicatePerNode.get(i);
        if (exchangeMetadataResponse.serverErrorCode == ServerErrorCode.No_Error) {
            Set<StoreKey> missingStoreKeys = exchangeMetadataResponse.missingStoreKeys;
            if (missingStoreKeys.size() > 0) {
                ArrayList<BlobId> keysToFetch = new ArrayList<BlobId>();
                for (StoreKey storeKey : missingStoreKeys) {
                    keysToFetch.add((BlobId) storeKey);
                }
                PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(remoteReplicaInfo.getReplicaId().getPartitionId(), keysToFetch);
                partitionRequestInfoList.add(partitionRequestInfo);
            }
        }
    }
    GetResponse getResponse = null;
    if (!partitionRequestInfoList.isEmpty()) {
        GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), "replication-fetch-" + dataNodeId.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
        long startTime = SystemTime.getInstance().milliseconds();
        try {
            connectedChannel.send(getRequest);
            ChannelOutput channelOutput = connectedChannel.receive();
            getResponse = GetResponse.readFrom(new DataInputStream(channelOutput.getInputStream()), clusterMap);
            long getRequestTime = SystemTime.getInstance().milliseconds() - startTime;
            replicationMetrics.updateGetRequestTime(getRequestTime, replicatingFromRemoteColo, replicatingOverSsl, datacenterName);
            if (getResponse.getError() != ServerErrorCode.No_Error) {
                logger.error("Remote node: " + remoteNode + " Thread name: " + threadName + " Remote replicas: " + replicasToReplicatePerNode + " GetResponse from replication: " + getResponse.getError());
                throw new ReplicationException(" Get Request returned error when trying to get missing keys " + getResponse.getError());
            }
        } catch (IOException e) {
            responseHandler.onEvent(replicasToReplicatePerNode.get(0).getReplicaId(), e);
            throw e;
        }
    }
    return getResponse;
}
Also used : ChannelOutput(com.github.ambry.network.ChannelOutput) ArrayList(java.util.ArrayList) IOException(java.io.IOException) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) StoreKey(com.github.ambry.store.StoreKey) GetResponse(com.github.ambry.protocol.GetResponse) GetRequest(com.github.ambry.protocol.GetRequest) BlobId(com.github.ambry.commons.BlobId)

Example 2 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class ReplicationTest method addPutMessagesToReplicasOfPartition.

/**
 * For the given partitionId, constructs put messages and adds them to the given lists.
 * @param partitionId the {@link PartitionId} to use for generating the {@link StoreKey} of the message.
 * @param hosts the list of {@link Host} all of which will be populated with the messages.
 * @param count the number of messages to construct and add.
 * @return the list of blobs ids that were generated.
 * @throws MessageFormatException
 * @throws IOException
 */
private List<StoreKey> addPutMessagesToReplicasOfPartition(PartitionId partitionId, List<Host> hosts, int count) throws MessageFormatException, IOException {
    List<StoreKey> ids = new ArrayList<>();
    for (int i = 0; i < count; i++) {
        short accountId = Utils.getRandomShort(TestUtils.RANDOM);
        short containerId = Utils.getRandomShort(TestUtils.RANDOM);
        short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
        boolean toEncrypt = i % 2 == 0;
        BlobId id = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMapUtils.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt);
        ids.add(id);
        Pair<ByteBuffer, MessageInfo> putMsgInfo = getPutMessage(id, accountId, containerId, toEncrypt);
        for (Host host : hosts) {
            host.addMessage(partitionId, putMsgInfo.getSecond(), putMsgInfo.getFirst().duplicate());
        }
    }
    return ids;
}
Also used : ArrayList(java.util.ArrayList) StoreKey(com.github.ambry.store.StoreKey) BlobId(com.github.ambry.commons.BlobId) ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Example 3 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class MockReadableStreamChannel method verifyBlob.

/**
 * Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
 * data as the original object that was put.
 * @param blobId the blobId of the blob that is to be verified.
 * @param properties the {@link BlobProperties} of the blob that is to be verified
 * @param originalPutContent original content of the blob
 * @param originalUserMetadata original user-metadata of the blob
 * @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
 */
private void verifyBlob(String blobId, BlobProperties properties, byte[] originalPutContent, byte[] originalUserMetadata, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
    ByteBuffer serializedRequest = serializedRequests.get(blobId);
    PutRequest.ReceivedPutRequest request = deserializePutRequest(serializedRequest);
    NotificationBlobType notificationBlobType;
    if (request.getBlobType() == BlobType.MetadataBlob) {
        notificationBlobType = NotificationBlobType.Composite;
        byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
        Assert.assertEquals("Wrong max chunk size in metadata", chunkSize, compositeBlobInfo.getChunkSize());
        Assert.assertEquals("Wrong total size in metadata", originalPutContent.length, compositeBlobInfo.getTotalSize());
        List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
        Assert.assertEquals("Number of chunks is not as expected", RouterUtils.getNumChunksForBlobAndChunkSize(originalPutContent.length, chunkSize), dataBlobIds.size());
        // verify user-metadata
        if (properties.isEncrypted()) {
            ByteBuffer userMetadata = request.getUsermetadata();
            BlobId origBlobId = new BlobId(blobId, mockClusterMap);
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                Assert.assertNull("Exception should not be thrown", exception);
                Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
            }).run();
        } else {
            Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, request.getUsermetadata().array());
        }
        verifyCompositeBlob(properties, originalPutContent, originalUserMetadata, dataBlobIds, request, serializedRequests);
    } else {
        notificationBlobType = NotificationBlobType.Simple;
        byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        if (!properties.isEncrypted()) {
            Assert.assertArrayEquals("Input blob and written blob should be the same", originalPutContent, content);
            Assert.assertArrayEquals("UserMetadata mismatch for simple blob", originalUserMetadata, request.getUsermetadata().array());
            notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
        } else {
            ByteBuffer userMetadata = request.getUsermetadata();
            BlobId origBlobId = new BlobId(blobId, mockClusterMap);
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), ByteBuffer.wrap(content), userMetadata, cryptoService, kms, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), new Callback<DecryptJob.DecryptJobResult>() {

                @Override
                public void onCompletion(DecryptJob.DecryptJobResult result, Exception exception) {
                    Assert.assertNull("Exception should not be thrown", exception);
                    Assert.assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                    Assert.assertArrayEquals("Content mismatch", originalPutContent, result.getDecryptedBlobContent().array());
                    Assert.assertArrayEquals("UserMetadata mismatch", originalUserMetadata, result.getDecryptedUserMetadata().array());
                }
            }).run();
        }
    }
    notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Also used : DataInputStream(java.io.DataInputStream) Arrays(java.util.Arrays) ServerErrorCode(com.github.ambry.commons.ServerErrorCode) BlobProperties(com.github.ambry.messageformat.BlobProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) RunWith(org.junit.runner.RunWith) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) HashMap(java.util.HashMap) Random(java.util.Random) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Future(java.util.concurrent.Future) GeneralSecurityException(java.security.GeneralSecurityException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) SystemTime(com.github.ambry.utils.SystemTime) PutRequest(com.github.ambry.protocol.PutRequest) Parameterized(org.junit.runners.Parameterized) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Set(java.util.Set) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) Utils(com.github.ambry.utils.Utils) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) TimeUnit(java.util.concurrent.TimeUnit) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) KMSConfig(com.github.ambry.config.KMSConfig) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) Assert(org.junit.Assert) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobId(com.github.ambry.commons.BlobId) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) InputStream(java.io.InputStream) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) PutRequest(com.github.ambry.protocol.PutRequest) ByteBuffer(java.nio.ByteBuffer) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) StoreKey(com.github.ambry.store.StoreKey) GeneralSecurityException(java.security.GeneralSecurityException) IOException(java.io.IOException) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobId(com.github.ambry.commons.BlobId)

Example 4 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class HardDeleteRecoveryMetadata method getMessageInfo.

@Override
public MessageInfo getMessageInfo(Read read, long offset, StoreKeyFactory storeKeyFactory) throws IOException {
    try {
        // read message header
        ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
        read.readInto(headerVersion, offset);
        offset += headerVersion.capacity();
        headerVersion.flip();
        short version = headerVersion.getShort();
        MessageHeader_Format headerFormat;
        ReadInputStream stream;
        long endOffset;
        if (!isValidHeaderVersion(version)) {
            throw new MessageFormatException("Version not known while reading message - " + version, MessageFormatErrorCodes.Unknown_Format_Version);
        }
        ByteBuffer header = ByteBuffer.allocate(getHeaderSizeForVersion(version));
        header.putShort(version);
        read.readInto(header, offset);
        offset += header.capacity() - headerVersion.capacity();
        header.flip();
        headerFormat = getMessageHeader(version, header);
        headerFormat.verifyHeader();
        endOffset = offset + headerFormat.getPayloadRelativeOffset() + headerFormat.getMessageSize();
        stream = new ReadInputStream(read, offset, endOffset);
        StoreKey key = storeKeyFactory.getStoreKey(new DataInputStream(stream));
        if (headerFormat.hasEncryptionKeyRecord()) {
            deserializeBlobEncryptionKey(stream);
        }
        // read the appropriate type of message based on the relative offset that is set
        if (headerFormat.isPutRecord()) {
            BlobProperties properties = deserializeBlobProperties(stream);
            return new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), Utils.addSecondsToEpochTime(properties.getCreationTimeInMs(), properties.getTimeToLiveInSeconds()), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs());
        } else {
            DeleteRecord deleteRecord = deserializeDeleteRecord(stream);
            return new MessageInfo(key, header.capacity() + key.sizeInBytes() + headerFormat.getMessageSize(), true, deleteRecord.getAccountId(), deleteRecord.getContainerId(), deleteRecord.getDeletionTimeInMs());
        }
    } catch (MessageFormatException e) {
        // log in case where we were not able to parse a message.
        throw new IOException("Message format exception while parsing messages ", e);
    } catch (IndexOutOfBoundsException e) {
        // log in case where were not able to read a complete message.
        throw new IOException("Trying to read more than the available bytes");
    }
}
Also used : IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) StoreKey(com.github.ambry.store.StoreKey) MessageInfo(com.github.ambry.store.MessageInfo)

Example 5 with StoreKey

use of com.github.ambry.store.StoreKey in project ambry by linkedin.

the class MessageSievingInputStreamTest method testDeletedBlobs.

private void testDeletedBlobs(short blobVersion, BlobType blobType) throws IOException, MessageFormatException {
    // MessageSievingInputStream contains put records for 2 valid blobs and 1 deleted blob
    // id1(put record for valid blob), id2(delete record) and id3(put record for valid blob)
    ArrayList<Short> versions = new ArrayList<>();
    versions.add(Message_Header_Version_V1);
    if (blobVersion != Blob_Version_V1) {
        versions.add(Message_Header_Version_V2);
    }
    try {
        for (short version : versions) {
            headerVersionToUse = version;
            // create message stream for blob 1
            StoreKey key1 = new MockId("id1");
            short accountId = Utils.getRandomShort(TestUtils.RANDOM);
            short containerId = Utils.getRandomShort(TestUtils.RANDOM);
            BlobProperties prop1 = new BlobProperties(10, "servid1", accountId, containerId, false);
            byte[] encryptionKey1 = new byte[100];
            TestUtils.RANDOM.nextBytes(encryptionKey1);
            byte[] usermetadata1 = new byte[1000];
            TestUtils.RANDOM.nextBytes(usermetadata1);
            int blobContentSize = 2000;
            byte[] data1 = new byte[blobContentSize];
            TestUtils.RANDOM.nextBytes(data1);
            if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
                ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
                data1 = byteBufferBlob.array();
                blobContentSize = data1.length;
            }
            ByteBufferInputStream stream1 = new ByteBufferInputStream(ByteBuffer.wrap(data1));
            MessageFormatInputStream messageFormatStream1 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key1, ByteBuffer.wrap(encryptionKey1), prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key1, prop1, ByteBuffer.wrap(usermetadata1), stream1, blobContentSize, blobType);
            MessageInfo msgInfo1 = new MessageInfo(key1, messageFormatStream1.getSize(), accountId, containerId, prop1.getCreationTimeInMs());
            // create message stream for blob 2 and mark it as deleted
            StoreKey key2 = new MockId("id2");
            accountId = Utils.getRandomShort(TestUtils.RANDOM);
            containerId = Utils.getRandomShort(TestUtils.RANDOM);
            long deletionTimeMs = SystemTime.getInstance().milliseconds() + TestUtils.RANDOM.nextInt();
            MessageFormatInputStream messageFormatStream2 = new DeleteMessageFormatInputStream(key2, accountId, containerId, deletionTimeMs);
            MessageInfo msgInfo2 = new MessageInfo(key2, messageFormatStream2.getSize(), accountId, containerId, deletionTimeMs);
            // create message stream for blob 3
            StoreKey key3 = new MockId("id3");
            accountId = Utils.getRandomShort(TestUtils.RANDOM);
            containerId = Utils.getRandomShort(TestUtils.RANDOM);
            BlobProperties prop3 = new BlobProperties(10, "servid3", accountId, containerId, false);
            byte[] encryptionKey3 = new byte[100];
            TestUtils.RANDOM.nextBytes(encryptionKey3);
            byte[] usermetadata3 = new byte[1000];
            TestUtils.RANDOM.nextBytes(usermetadata3);
            blobContentSize = 2000;
            byte[] data3 = new byte[blobContentSize];
            TestUtils.RANDOM.nextBytes(data3);
            if (blobVersion == Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
                ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
                data3 = byteBufferBlob.array();
                blobContentSize = data3.length;
            }
            ByteBufferInputStream stream3 = new ByteBufferInputStream(ByteBuffer.wrap(data3));
            MessageFormatInputStream messageFormatStream3 = (blobVersion == Blob_Version_V2) ? new PutMessageFormatInputStream(key3, ByteBuffer.wrap(encryptionKey3), prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key3, prop3, ByteBuffer.wrap(usermetadata3), stream3, blobContentSize, blobType);
            MessageInfo msgInfo3 = new MessageInfo(key3, messageFormatStream3.getSize(), accountId, containerId, prop3.getCreationTimeInMs());
            // create input stream for all blob messages together
            byte[] totalMessageContent = new byte[(int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize() + (int) messageFormatStream3.getSize()];
            messageFormatStream1.read(totalMessageContent, 0, (int) messageFormatStream1.getSize());
            messageFormatStream2.read(totalMessageContent, (int) messageFormatStream1.getSize(), (int) messageFormatStream2.getSize());
            messageFormatStream3.read(totalMessageContent, (int) messageFormatStream1.getSize() + (int) messageFormatStream2.getSize(), (int) messageFormatStream3.getSize());
            InputStream inputStream = new ByteBufferInputStream(ByteBuffer.wrap(totalMessageContent));
            List<MessageInfo> msgInfoList = new ArrayList<MessageInfo>();
            msgInfoList.add(msgInfo1);
            msgInfoList.add(msgInfo2);
            msgInfoList.add(msgInfo3);
            MessageSievingInputStream validMessageDetectionInputStream = new MessageSievingInputStream(inputStream, msgInfoList, new MockIdFactory(), new MetricRegistry());
            Assert.fail("IllegalStateException should have been thrown due to delete record ");
        }
    } catch (IllegalStateException e) {
        Assert.assertTrue("IllegalStateException thrown as expected ", true);
    }
    headerVersionToUse = Message_Header_Version_V1;
}
Also used : DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) InputStream(java.io.InputStream) MetricRegistry(com.codahale.metrics.MetricRegistry) ArrayList(java.util.ArrayList) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) StoreKey(com.github.ambry.store.StoreKey) ByteBuffer(java.nio.ByteBuffer) MessageInfo(com.github.ambry.store.MessageInfo)

Aggregations

StoreKey (com.github.ambry.store.StoreKey)89 ArrayList (java.util.ArrayList)56 MessageInfo (com.github.ambry.store.MessageInfo)43 ByteBuffer (java.nio.ByteBuffer)43 Test (org.junit.Test)37 DataInputStream (java.io.DataInputStream)30 BlobId (com.github.ambry.commons.BlobId)27 HashMap (java.util.HashMap)26 IOException (java.io.IOException)23 List (java.util.List)22 PartitionId (com.github.ambry.clustermap.PartitionId)21 ByteBufferInputStream (com.github.ambry.utils.ByteBufferInputStream)21 Map (java.util.Map)19 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)18 MockId (com.github.ambry.store.MockId)18 MockClusterMap (com.github.ambry.clustermap.MockClusterMap)17 InputStream (java.io.InputStream)16 HashSet (java.util.HashSet)16 ClusterMap (com.github.ambry.clustermap.ClusterMap)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14