Search in sources :

Example 21 with PartitionRequestInfo

use of com.github.ambry.protocol.PartitionRequestInfo in project ambry by linkedin.

the class ServerHardDeleteTest method getAndVerify.

/**
 * Fetches the Blob(for all MessageFormatFlags) and verifies the content
 * @param channel the {@link BlockingChannel} to use to send and receive data
 * @param blobsCount the total number of blobs that needs to be verified against
 * @throws Exception
 */
void getAndVerify(BlockingChannel channel, int blobsCount) throws Exception {
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<>();
    ArrayList<BlobId> ids = new ArrayList<>();
    for (int i = 0; i < blobsCount; i++) {
        ids.add(blobIdList.get(i));
    }
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(blobIdList.get(0).getPartition(), ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    ArrayList<MessageFormatFlags> flags = new ArrayList<>();
    flags.add(MessageFormatFlags.BlobProperties);
    flags.add(MessageFormatFlags.BlobUserMetadata);
    flags.add(MessageFormatFlags.Blob);
    for (MessageFormatFlags flag : flags) {
        GetRequest getRequest = new GetRequest(1, "clientid2", flag, partitionRequestInfoList, GetOption.Include_All);
        channel.send(getRequest);
        InputStream stream = channel.receive().getInputStream();
        GetResponse resp = GetResponse.readFrom(new DataInputStream(stream), mockClusterMap);
        if (flag == MessageFormatFlags.BlobProperties) {
            for (int i = 0; i < blobsCount; i++) {
                BlobProperties propertyOutput = MessageFormatRecord.deserializeBlobProperties(resp.getInputStream());
                Assert.assertEquals(properties.get(i).getBlobSize(), propertyOutput.getBlobSize());
                Assert.assertEquals("serviceid1", propertyOutput.getServiceId());
                Assert.assertEquals("AccountId mismatch", properties.get(i).getAccountId(), propertyOutput.getAccountId());
                Assert.assertEquals("ContainerId mismatch", properties.get(i).getContainerId(), propertyOutput.getContainerId());
            }
        } else if (flag == MessageFormatFlags.BlobUserMetadata) {
            for (int i = 0; i < blobsCount; i++) {
                ByteBuffer userMetadataOutput = MessageFormatRecord.deserializeUserMetadata(resp.getInputStream());
                Assert.assertArrayEquals(userMetadataOutput.array(), usermetadata.get(i));
            }
        } else if (flag == MessageFormatFlags.Blob) {
            for (int i = 0; i < blobsCount; i++) {
                BlobData blobData = MessageFormatRecord.deserializeBlob(resp.getInputStream());
                Assert.assertEquals(properties.get(i).getBlobSize(), blobData.getSize());
                byte[] dataOutput = new byte[(int) blobData.getSize()];
                blobData.getStream().read(dataOutput);
                Assert.assertArrayEquals(dataOutput, data.get(i));
            }
        } else {
            throw new IllegalArgumentException("Unrecognized message format flags " + flags);
        }
    }
}
Also used : MessageFormatFlags(com.github.ambry.messageformat.MessageFormatFlags) DataInputStream(java.io.DataInputStream) CrcInputStream(com.github.ambry.utils.CrcInputStream) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) ArrayList(java.util.ArrayList) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) GetRequest(com.github.ambry.protocol.GetRequest) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobData(com.github.ambry.messageformat.BlobData) BlobId(com.github.ambry.commons.BlobId)

Example 22 with PartitionRequestInfo

use of com.github.ambry.protocol.PartitionRequestInfo in project ambry by linkedin.

the class InMemoryCloudDestinationErrorSimulationTest method testGetBlobErrorSimulation.

/**
 * test error simulation for GetBlobRequest
 * @throws Exception
 */
@Test
public void testGetBlobErrorSimulation() throws Exception {
    BlobId blobId = doPut(partitionId);
    ArrayList<BlobId> blobIdList = new ArrayList<BlobId>();
    blobIdList.add(blobId);
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partitionId, blobIdList);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest = new GetRequest(1234, "clientId", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
    RequestInfo requestInfo = new RequestInfo(hostname, port, getRequest, replica, null);
    ResponseInfo responseInfo = sendAndWaitForResponses(requestInfo);
    GetResponse response = responseInfo.getError() == null ? (GetResponse) RouterUtils.mapToReceivedResponse((Response) responseInfo.getResponse()) : null;
    PartitionResponseInfo partitionResponseInfo = response.getPartitionResponseInfoList().get(0);
    Assert.assertEquals("GetRequest should succeed.", response.getError(), ServerErrorCode.No_Error);
    Assert.assertEquals("GetRequest partitionResponseInfo should succeed.", partitionResponseInfo.getErrorCode(), ServerErrorCode.No_Error);
    responseInfo.release();
    // inject error for cloud colo.
    cloudDestination.setServerErrorForAllRequests(StoreErrorCodes.ID_Not_Found);
    getRequest = new GetRequest(1234, "clientId", MessageFormatFlags.Blob, partitionRequestInfoList, GetOption.None);
    requestInfo = new RequestInfo(hostname, port, getRequest, replica, null);
    responseInfo = sendAndWaitForResponses(requestInfo);
    response = responseInfo.getError() == null ? (GetResponse) RouterUtils.mapToReceivedResponse((Response) responseInfo.getResponse()) : null;
    partitionResponseInfo = response.getPartitionResponseInfoList().get(0);
    Assert.assertEquals("GetRequest responseInfo should have no error.", response.getError(), ServerErrorCode.No_Error);
    Assert.assertEquals("GetRequest partitionResponseInfo should be Blob_Not_Found", partitionResponseInfo.getErrorCode(), ServerErrorCode.Blob_Not_Found);
    responseInfo.release();
}
Also used : ResponseInfo(com.github.ambry.network.ResponseInfo) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) GetResponse(com.github.ambry.protocol.GetResponse) TtlUpdateResponse(com.github.ambry.protocol.TtlUpdateResponse) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) DeleteResponse(com.github.ambry.protocol.DeleteResponse) PutResponse(com.github.ambry.protocol.PutResponse) Response(com.github.ambry.protocol.Response) GetRequest(com.github.ambry.protocol.GetRequest) ArrayList(java.util.ArrayList) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) RequestInfo(com.github.ambry.network.RequestInfo) BlobId(com.github.ambry.commons.BlobId) GetResponse(com.github.ambry.protocol.GetResponse) Test(org.junit.Test)

Example 23 with PartitionRequestInfo

use of com.github.ambry.protocol.PartitionRequestInfo in project ambry by linkedin.

the class GetBlobOperationTest method getBlobBufferFromServer.

/**
 * @param server the server hosting the blob.
 * @return the ByteBuffer for the blob contents on the specified server.
 * @throws IOException
 */
private ByteBuffer getBlobBufferFromServer(MockServer server) throws IOException {
    PartitionRequestInfo requestInfo = new PartitionRequestInfo(blobId.getPartition(), Collections.singletonList(blobId));
    GetRequest getRequest = new GetRequest(1, "assertBlobReadSuccess", MessageFormatFlags.All, Collections.singletonList(requestInfo), GetOption.None);
    GetResponse getResponse = server.makeGetResponse(getRequest, ServerErrorCode.No_Error);
    getRequest.release();
    // simulating server sending response over the wire
    ByteBufferChannel channel = new ByteBufferChannel(ByteBuffer.allocate((int) getResponse.sizeInBytes()));
    getResponse.writeTo(channel);
    getResponse.release();
    // simulating the Router receiving the data from the wire
    ByteBuffer data = channel.getBuffer();
    data.flip();
    DataInputStream stream = new DataInputStream(new ByteBufferInputStream(data));
    // read off the size because GetResponse.readFrom() expects that this be read off
    stream.readLong();
    // construct a GetResponse as the Router would have
    getResponse = GetResponse.readFrom(stream, mockClusterMap);
    byte[] blobData = Utils.readBytesFromStream(getResponse.getInputStream(), (int) getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getSize());
    // set the put content buf to the data in the stream
    return ByteBuffer.wrap(blobData);
}
Also used : ByteBufferChannel(com.github.ambry.utils.ByteBufferChannel) GetRequest(com.github.ambry.protocol.GetRequest) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer)

Example 24 with PartitionRequestInfo

use of com.github.ambry.protocol.PartitionRequestInfo in project ambry by linkedin.

the class StoredBlob method makeGetResponse.

/**
 * Make a {@link GetResponse} for the given {@link GetRequest} for which the given {@link ServerErrorCode} was
 * encountered. The request could be for BlobInfo or for Blob (the only two options that the router would request
 * for).
 * @param getRequest the {@link GetRequest} for which the response is being constructed.
 * @param getError the {@link ServerErrorCode} that was encountered.
 * @return the constructed {@link GetResponse}
 * @throws IOException if there was an error constructing the response.
 */
GetResponse makeGetResponse(GetRequest getRequest, ServerErrorCode getError) throws IOException {
    GetResponse getResponse;
    if (getError == ServerErrorCode.No_Error) {
        List<PartitionRequestInfo> infos = getRequest.getPartitionInfoList();
        if (infos.size() != 1 || infos.get(0).getBlobIds().size() != 1) {
            getError = ServerErrorCode.Unknown_Error;
        }
    }
    ServerErrorCode serverError;
    ServerErrorCode partitionError;
    boolean isDataBlob = false;
    try {
        String id = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0).getID();
        isDataBlob = blobs.get(id).type == BlobType.DataBlob;
    } catch (Exception ignored) {
    }
    if (!getErrorOnDataBlobOnly || isDataBlob) {
        // set it in the partitionResponseInfo
        if (getError == ServerErrorCode.No_Error || getError == ServerErrorCode.Blob_Expired || getError == ServerErrorCode.Blob_Deleted || getError == ServerErrorCode.Blob_Not_Found || getError == ServerErrorCode.Blob_Authorization_Failure || getError == ServerErrorCode.Disk_Unavailable) {
            partitionError = getError;
            serverError = ServerErrorCode.No_Error;
        } else {
            serverError = getError;
            // does not matter - this will not be checked if serverError is not No_Error.
            partitionError = ServerErrorCode.No_Error;
        }
    } else {
        serverError = ServerErrorCode.No_Error;
        partitionError = ServerErrorCode.No_Error;
    }
    if (serverError == ServerErrorCode.No_Error) {
        int byteBufferSize;
        ByteBuffer byteBuffer;
        StoreKey key = getRequest.getPartitionInfoList().get(0).getBlobIds().get(0);
        short accountId = Account.UNKNOWN_ACCOUNT_ID;
        short containerId = Container.UNKNOWN_CONTAINER_ID;
        long operationTimeMs = Utils.Infinite_Time;
        StoredBlob blob = blobs.get(key.getID());
        ServerErrorCode processedError = errorForGet(key.getID(), blob, getRequest);
        MessageMetadata msgMetadata = null;
        if (processedError == ServerErrorCode.No_Error) {
            ByteBuffer buf = blobs.get(key.getID()).serializedSentPutRequest.duplicate();
            // read off the size
            buf.getLong();
            // read off the type.
            buf.getShort();
            PutRequest originalBlobPutReq = PutRequest.readFrom(new DataInputStream(new ByteBufferInputStream(buf)), clusterMap);
            switch(getRequest.getMessageFormatFlag()) {
                case BlobInfo:
                    BlobProperties blobProperties = originalBlobPutReq.getBlobProperties();
                    accountId = blobProperties.getAccountId();
                    containerId = blobProperties.getContainerId();
                    operationTimeMs = blobProperties.getCreationTimeInMs();
                    ByteBuffer userMetadata = originalBlobPutReq.getUsermetadata();
                    byteBufferSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties) + MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
                    byteBuffer = ByteBuffer.allocate(byteBufferSize);
                    if (originalBlobPutReq.getBlobEncryptionKey() != null) {
                        msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
                    }
                    MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
                    MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
                    break;
                case Blob:
                    switch(blobFormatVersion) {
                        case MessageFormatRecord.Blob_Version_V2:
                            if (originalBlobPutReq.getBlobEncryptionKey() != null) {
                                msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
                            }
                            byteBufferSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
                            byteBuffer = ByteBuffer.allocate(byteBufferSize);
                            MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
                            break;
                        case MessageFormatRecord.Blob_Version_V1:
                            byteBufferSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
                            byteBuffer = ByteBuffer.allocate(byteBufferSize);
                            MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
                            break;
                        default:
                            throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
                    }
                    byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
                    Crc32 crc = new Crc32();
                    crc.update(byteBuffer.array(), 0, byteBuffer.position());
                    byteBuffer.putLong(crc.getValue());
                    break;
                case All:
                    blobProperties = originalBlobPutReq.getBlobProperties();
                    accountId = blobProperties.getAccountId();
                    containerId = blobProperties.getContainerId();
                    userMetadata = originalBlobPutReq.getUsermetadata();
                    operationTimeMs = originalBlobPutReq.getBlobProperties().getCreationTimeInMs();
                    int blobHeaderSize = MessageFormatRecord.MessageHeader_Format_V2.getHeaderSize();
                    int blobEncryptionRecordSize = originalBlobPutReq.getBlobEncryptionKey() != null ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(originalBlobPutReq.getBlobEncryptionKey().duplicate()) : 0;
                    int blobPropertiesSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(blobProperties);
                    int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(userMetadata);
                    int blobInfoSize = blobPropertiesSize + userMetadataSize;
                    int blobRecordSize;
                    switch(blobFormatVersion) {
                        case MessageFormatRecord.Blob_Version_V2:
                            blobRecordSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
                            break;
                        case MessageFormatRecord.Blob_Version_V1:
                            blobRecordSize = (int) MessageFormatRecord.Blob_Format_V1.getBlobRecordSize((int) originalBlobPutReq.getBlobSize());
                            break;
                        default:
                            throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
                    }
                    byteBufferSize = blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize + blobRecordSize;
                    byteBuffer = ByteBuffer.allocate(byteBufferSize);
                    try {
                        MessageFormatRecord.MessageHeader_Format_V2.serializeHeader(byteBuffer, blobEncryptionRecordSize + blobInfoSize + blobRecordSize, originalBlobPutReq.getBlobEncryptionKey() == null ? Message_Header_Invalid_Relative_Offset : blobHeaderSize + key.sizeInBytes(), blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize, Message_Header_Invalid_Relative_Offset, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobPropertiesSize, blobHeaderSize + key.sizeInBytes() + blobEncryptionRecordSize + blobInfoSize);
                    } catch (MessageFormatException e) {
                        e.printStackTrace();
                    }
                    byteBuffer.put(key.toBytes());
                    if (originalBlobPutReq.getBlobEncryptionKey() != null) {
                        MessageFormatRecord.BlobEncryptionKey_Format_V1.serializeBlobEncryptionKeyRecord(byteBuffer, originalBlobPutReq.getBlobEncryptionKey().duplicate());
                        msgMetadata = new MessageMetadata(originalBlobPutReq.getBlobEncryptionKey().duplicate());
                    }
                    MessageFormatRecord.BlobProperties_Format_V1.serializeBlobPropertiesRecord(byteBuffer, blobProperties);
                    MessageFormatRecord.UserMetadata_Format_V1.serializeUserMetadataRecord(byteBuffer, userMetadata);
                    int blobRecordStart = byteBuffer.position();
                    switch(blobFormatVersion) {
                        case MessageFormatRecord.Blob_Version_V2:
                            MessageFormatRecord.Blob_Format_V2.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize(), originalBlobPutReq.getBlobType());
                            break;
                        case MessageFormatRecord.Blob_Version_V1:
                            MessageFormatRecord.Blob_Format_V1.serializePartialBlobRecord(byteBuffer, (int) originalBlobPutReq.getBlobSize());
                            break;
                        default:
                            throw new IllegalStateException("Blob format version " + blobFormatVersion + " not supported.");
                    }
                    byteBuffer.put(Utils.readBytesFromStream(originalBlobPutReq.getBlobStream(), (int) originalBlobPutReq.getBlobSize()));
                    crc = new Crc32();
                    crc.update(byteBuffer.array(), blobRecordStart, blobRecordSize - MessageFormatRecord.Crc_Size);
                    byteBuffer.putLong(crc.getValue());
                    break;
                default:
                    throw new IOException("GetRequest flag is not supported: " + getRequest.getMessageFormatFlag());
            }
        } else if (processedError == ServerErrorCode.Blob_Deleted) {
            if (partitionError == ServerErrorCode.No_Error) {
                partitionError = ServerErrorCode.Blob_Deleted;
            }
            byteBuffer = ByteBuffer.allocate(0);
            byteBufferSize = 0;
        } else if (processedError == ServerErrorCode.Blob_Expired) {
            if (partitionError == ServerErrorCode.No_Error) {
                partitionError = ServerErrorCode.Blob_Expired;
            }
            byteBuffer = ByteBuffer.allocate(0);
            byteBufferSize = 0;
        } else if (processedError == ServerErrorCode.Blob_Authorization_Failure) {
            if (partitionError == ServerErrorCode.No_Error) {
                partitionError = ServerErrorCode.Blob_Authorization_Failure;
            }
            byteBuffer = ByteBuffer.allocate(0);
            byteBufferSize = 0;
        } else {
            if (partitionError == ServerErrorCode.No_Error) {
                partitionError = ServerErrorCode.Blob_Not_Found;
            }
            byteBuffer = ByteBuffer.allocate(0);
            byteBufferSize = 0;
        }
        byteBuffer.flip();
        ByteBufferSend responseSend = new ByteBufferSend(byteBuffer);
        List<MessageInfo> messageInfoList = new ArrayList<>();
        List<MessageMetadata> messageMetadataList = new ArrayList<>();
        List<PartitionResponseInfo> partitionResponseInfoList = new ArrayList<PartitionResponseInfo>();
        if (partitionError == ServerErrorCode.No_Error) {
            messageInfoList.add(new MessageInfo(key, byteBufferSize, false, blob.isTtlUpdated(), blob.isUndeleted(), blob.expiresAt, null, accountId, containerId, operationTimeMs, blob.lifeVersion));
            messageMetadataList.add(msgMetadata);
        }
        PartitionResponseInfo partitionResponseInfo = partitionError == ServerErrorCode.No_Error ? new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), messageInfoList, messageMetadataList) : new PartitionResponseInfo(getRequest.getPartitionInfoList().get(0).getPartition(), partitionError);
        partitionResponseInfoList.add(partitionResponseInfo);
        getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), partitionResponseInfoList, responseSend, serverError);
    } else {
        getResponse = new GetResponse(getRequest.getCorrelationId(), getRequest.getClientId(), new ArrayList<PartitionResponseInfo>(), new ByteBufferSend(ByteBuffer.allocate(0)), serverError);
    }
    return getResponse;
}
Also used : ArrayList(java.util.ArrayList) MessageMetadata(com.github.ambry.messageformat.MessageMetadata) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) PutRequest(com.github.ambry.protocol.PutRequest) IOException(java.io.IOException) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) DataInputStream(java.io.DataInputStream) GetResponse(com.github.ambry.protocol.GetResponse) ByteBuffer(java.nio.ByteBuffer) StoreKey(com.github.ambry.store.StoreKey) ServerErrorCode(com.github.ambry.server.ServerErrorCode) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) MessageInfo(com.github.ambry.store.MessageInfo) Crc32(com.github.ambry.utils.Crc32) ByteBufferSend(com.github.ambry.network.ByteBufferSend) BlobProperties(com.github.ambry.messageformat.BlobProperties)

Example 25 with PartitionRequestInfo

use of com.github.ambry.protocol.PartitionRequestInfo in project ambry by linkedin.

the class CloudAndStoreReplicationTest method testGetOnServerNode.

/**
 * Do a get on recovery server node to test that all the blobids that were uploaded to vcr node have been recovered on
 * recovery node.
 * @param blobIdToSizeMap {@link Map} of blobid to size uploaded to vcr node.
 * @param node recovery server node
 * @throws IOException on {@link IOException}
 */
private void testGetOnServerNode(Map<BlobId, Integer> blobIdToSizeMap, DataNodeId node) throws IOException {
    ConnectedChannel channel = ServerTestUtil.getBlockingChannelBasedOnPortType(node.getPortToConnectTo(), node.getHostname(), null, null);
    channel.connect();
    AtomicInteger correlationIdGenerator = new AtomicInteger(0);
    List<BlobId> allBlobIds = Stream.concat(cloudBlobIds.stream(), serverBlobIds.stream()).collect(Collectors.toList());
    List<PartitionRequestInfo> partitionRequestInfoList = Collections.singletonList(new PartitionRequestInfo(partitionId, allBlobIds));
    GetRequest getRequest = new GetRequest(correlationIdGenerator.incrementAndGet(), GetRequest.Replication_Client_Id_Prefix + node.getHostname(), MessageFormatFlags.All, partitionRequestInfoList, new ReplicationConfig(new VerifiableProperties(recoveryProperties)).replicationIncludeAll ? GetOption.Include_All : GetOption.None);
    channel.send(getRequest);
    GetResponse getResponse = GetResponse.readFrom(channel.receive().getInputStream(), recoveryCluster.getClusterMap());
    for (PartitionResponseInfo partitionResponseInfo : getResponse.getPartitionResponseInfoList()) {
        assertEquals("Error in getting the recovered blobs", ServerErrorCode.No_Error, partitionResponseInfo.getErrorCode());
        // old value is 272. Adding 8 Bytes due to the two fields added 4 + 4 Blob Property BYTE.
        for (MessageInfo messageInfo : partitionResponseInfo.getMessageInfoList()) {
            assertEquals(blobIdToSizeMap.get(messageInfo.getStoreKey()) + 280, messageInfo.getSize());
        }
    }
}
Also used : ReplicationConfig(com.github.ambry.config.ReplicationConfig) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) VerifiableProperties(com.github.ambry.config.VerifiableProperties) GetRequest(com.github.ambry.protocol.GetRequest) ConnectedChannel(com.github.ambry.network.ConnectedChannel) PartitionResponseInfo(com.github.ambry.protocol.PartitionResponseInfo) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) BlobId(com.github.ambry.commons.BlobId) GetResponse(com.github.ambry.protocol.GetResponse) MessageInfo(com.github.ambry.store.MessageInfo)

Aggregations

PartitionRequestInfo (com.github.ambry.protocol.PartitionRequestInfo)29 GetRequest (com.github.ambry.protocol.GetRequest)28 GetResponse (com.github.ambry.protocol.GetResponse)28 BlobId (com.github.ambry.commons.BlobId)22 ArrayList (java.util.ArrayList)22 DataInputStream (java.io.DataInputStream)19 BlobProperties (com.github.ambry.messageformat.BlobProperties)16 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 ConnectedChannel (com.github.ambry.network.ConnectedChannel)11 NettyByteBufDataInputStream (com.github.ambry.utils.NettyByteBufDataInputStream)11 MessageFormatException (com.github.ambry.messageformat.MessageFormatException)10 PutRequest (com.github.ambry.protocol.PutRequest)10 IOException (java.io.IOException)10 ByteBuffer (java.nio.ByteBuffer)10 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)9 PartitionId (com.github.ambry.clustermap.PartitionId)9 BlobData (com.github.ambry.messageformat.BlobData)9 DeleteResponse (com.github.ambry.protocol.DeleteResponse)9 PutResponse (com.github.ambry.protocol.PutResponse)9 Properties (java.util.Properties)9