Search in sources :

Example 96 with MockClusterMap

use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.

the class OperationTrackerTest method operationClassTest.

/**
 * Test that operation tracker can correctly populate parameters(i.e. successTarget) based on input {@link RouterOperation}.
 */
@Test
public void operationClassTest() {
    Properties props = new Properties();
    props.setProperty("router.hostname", "localhost");
    props.setProperty("router.datacenter.name", "dc-0");
    props.setProperty("router.get.success.target", "1");
    props.setProperty("router.put.success.target", "2");
    props.setProperty("router.delete.success.target", "2");
    props.setProperty("router.ttl.update.success.target", "2");
    RouterConfig routerConfig = new RouterConfig(new VerifiableProperties(props));
    initialize();
    NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
    Map<RouterOperation, Integer> operationAndSuccessTarget = new HashMap<>();
    operationAndSuccessTarget.put(RouterOperation.GetBlobOperation, 1);
    operationAndSuccessTarget.put(RouterOperation.GetBlobInfoOperation, 1);
    operationAndSuccessTarget.put(RouterOperation.PutOperation, 2);
    operationAndSuccessTarget.put(RouterOperation.DeleteOperation, 2);
    operationAndSuccessTarget.put(RouterOperation.TtlUpdateOperation, 2);
    for (Map.Entry<RouterOperation, Integer> entry : operationAndSuccessTarget.entrySet()) {
        SimpleOperationTracker operationTracker = null;
        switch(operationTrackerType) {
            case SIMPLE_OP_TRACKER:
                operationTracker = new SimpleOperationTracker(routerConfig, entry.getKey(), mockPartition, originatingDcName, true, routerMetrics);
                break;
            case ADAPTIVE_OP_TRACKER:
                try {
                    operationTracker = new AdaptiveOperationTracker(routerConfig, routerMetrics, entry.getKey(), mockPartition, originatingDcName, time);
                } catch (IllegalArgumentException e) {
                    assertTrue("Get operation shouldn't throw any exception in adaptive tracker", entry.getKey() != RouterOperation.GetBlobOperation && entry.getKey() != RouterOperation.GetBlobInfoOperation);
                }
                break;
        }
        // ensure the success target matches the number specified for each type of operaiton
        if (operationTracker != null) {
            assertEquals("The suggest target doesn't match", (long) entry.getValue(), operationTracker.getSuccessTarget(ReplicaType.DISK_BACKED));
        }
    }
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) HashMap(java.util.HashMap) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) RouterConfig(com.github.ambry.config.RouterConfig) HashMap(java.util.HashMap) Map(java.util.Map) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 97 with MockClusterMap

use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.

the class ChunkFillTest method testChunkNumAndSizeCalculations.

/**
 * Test the calculation of number of chunks and the size of each chunk, using a very large blob size. No content
 * comparison is done. This test does not consume memory more than chunkSize.
 */
@Test
public void testChunkNumAndSizeCalculations() throws Exception {
    chunkSize = 4 * 1024 * 1024;
    // a large blob greater than Integer.MAX_VALUE and not at chunk size boundary.
    final long blobSize = ((long) Integer.MAX_VALUE / chunkSize + 1) * chunkSize + random.nextInt(chunkSize - 1) + 1;
    VerifiableProperties vProps = getNonBlockingRouterProperties();
    MockClusterMap mockClusterMap = new MockClusterMap();
    RouterConfig routerConfig = new RouterConfig(vProps);
    NonBlockingRouterMetrics routerMetrics = new NonBlockingRouterMetrics(mockClusterMap, routerConfig);
    short accountId = Utils.getRandomShort(random);
    short containerId = Utils.getRandomShort(random);
    BlobProperties putBlobProperties = new BlobProperties(blobSize, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, accountId, containerId, false, null, null, null);
    Random random = new Random();
    byte[] putUserMetadata = new byte[10];
    random.nextBytes(putUserMetadata);
    final MockReadableStreamChannel putChannel = new MockReadableStreamChannel(blobSize, false);
    FutureResult<String> futureResult = new FutureResult<String>();
    MockTime time = new MockTime();
    MockNetworkClientFactory networkClientFactory = new MockNetworkClientFactory(vProps, null, 0, 0, 0, null, time);
    PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), putUserMetadata, putChannel, PutBlobOptions.DEFAULT, futureResult, null, new RouterCallback(networkClientFactory.getNetworkClient(), new ArrayList<>()), null, null, null, null, new MockTime(), putBlobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
    op.startOperation();
    numChunks = RouterUtils.getNumChunksForBlobAndChunkSize(blobSize, chunkSize);
    // largeBlobSize is not a multiple of chunkSize
    int expectedNumChunks = (int) (blobSize / chunkSize + 1);
    Assert.assertEquals("numChunks should be as expected", expectedNumChunks, numChunks);
    int lastChunkSize = (int) (blobSize % chunkSize);
    final AtomicReference<Exception> channelException = new AtomicReference<Exception>(null);
    int chunkIndex = 0;
    // The write to the MockReadableStreamChannel blocks until the data is read as part fo the chunk filling,
    // so create a thread that fills the MockReadableStreamChannel.
    Utils.newThread(new Runnable() {

        @Override
        public void run() {
            try {
                byte[] writeBuf = new byte[chunkSize];
                long written = 0;
                while (written < blobSize) {
                    int toWrite = (int) Math.min(chunkSize, blobSize - written);
                    putChannel.write(ByteBuffer.wrap(writeBuf, 0, toWrite));
                    written += toWrite;
                }
            } catch (Exception e) {
                channelException.set(e);
            }
        }
    }, false).start();
    // Do the chunk filling.
    boolean fillingComplete = false;
    do {
        op.fillChunks();
        // since the channel is ByteBuffer based.
        for (PutOperation.PutChunk putChunk : op.putChunks) {
            Assert.assertNull("Mock channel write should not have caused an exception", channelException.get());
            if (putChunk.isFree()) {
                continue;
            }
            if (chunkIndex == numChunks - 1) {
                // last chunk may not be Ready as it is dependent on the completion callback to be called.
                Assert.assertTrue("Chunk should be Building or Ready.", putChunk.getState() == PutOperation.ChunkState.Ready || putChunk.getState() == PutOperation.ChunkState.Building);
                if (putChunk.getState() == PutOperation.ChunkState.Ready) {
                    Assert.assertEquals("Chunk size should be the last chunk size", lastChunkSize, putChunk.buf.readableBytes());
                    Assert.assertTrue("Chunk Filling should be complete at this time", op.isChunkFillingDone());
                    fillingComplete = true;
                }
            } else {
                // if not last chunk, then the chunk should be full and Ready.
                Assert.assertEquals("Chunk should be ready.", PutOperation.ChunkState.Ready, putChunk.getState());
                Assert.assertEquals("Chunk size should be maxChunkSize", chunkSize, putChunk.buf.readableBytes());
                chunkIndex++;
                putChunk.clear();
            }
        }
    } while (!fillingComplete);
}
Also used : ArrayList(java.util.ArrayList) InMemAccountService(com.github.ambry.account.InMemAccountService) Random(java.util.Random) MockTime(com.github.ambry.utils.MockTime) VerifiableProperties(com.github.ambry.config.VerifiableProperties) AtomicReference(java.util.concurrent.atomic.AtomicReference) RouterConfig(com.github.ambry.config.RouterConfig) IOException(java.io.IOException) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobProperties(com.github.ambry.messageformat.BlobProperties) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 98 with MockClusterMap

use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.

the class OperationTrackerTest method downReplicasOrderingTest.

/**
 * Test to ensure that replicas that are down are also returned by the operation tracker, but they are
 * ordered after the healthy replicas.
 */
@Test
public void downReplicasOrderingTest() {
    List<Port> portList = Collections.singletonList(new Port(PORT, PortType.PLAINTEXT));
    List<String> mountPaths = Collections.singletonList("mockMountPath");
    datanodes = new ArrayList<>();
    datanodes.add(new MockDataNodeId(portList, mountPaths, "dc-0"));
    datanodes.add(new MockDataNodeId(portList, mountPaths, "dc-1"));
    mockPartition = new MockPartitionId();
    mockClusterMap = new MockClusterMap(false, datanodes, 1, Collections.singletonList(mockPartition), datanodes.get(0).getDatacenterName());
    int replicaCount = 6;
    populateReplicaList(replicaCount, ReplicaState.STANDBY);
    // Test scenarios with various number of replicas down
    for (int i = 0; i < replicaCount; i++) {
        testReplicaDown(replicaCount, i);
    }
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) Port(com.github.ambry.network.Port) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Example 99 with MockClusterMap

use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.

the class MockReadableStreamChannel method verifyBlob.

/**
 * Verifies that the blob associated with the blob id returned by a successful put operation has exactly the same
 * data as the original object that was put.
 * @param requestAndResult the {@link RequestAndResult} to use for verification.
 * @param serializedRequests the mapping from blob ids to their corresponding serialized {@link PutRequest}.
 */
private void verifyBlob(RequestAndResult requestAndResult, HashMap<String, ByteBuffer> serializedRequests) throws Exception {
    String blobId = requestAndResult.result.result();
    ByteBuffer serializedRequest = serializedRequests.get(blobId);
    PutRequest request = deserializePutRequest(serializedRequest);
    NotificationBlobType notificationBlobType;
    BlobId origBlobId = new BlobId(blobId, mockClusterMap);
    boolean stitchOperation = requestAndResult.chunksToStitch != null;
    if (stitchOperation) {
        assertEquals("Stitch operations should always produce metadata blobs", BlobType.MetadataBlob, request.getBlobType());
    }
    if (request.getBlobType() == BlobType.MetadataBlob) {
        notificationBlobType = NotificationBlobType.Composite;
        assertEquals("Expected metadata", BlobDataType.METADATA, origBlobId.getBlobDataType());
        byte[] data = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(ByteBuffer.wrap(data), new BlobIdFactory(mockClusterMap));
        List<StoreKey> dataBlobIds = compositeBlobInfo.getKeys();
        long expectedMaxChunkSize;
        long expectedTotalSize;
        int expectedNumChunks;
        if (stitchOperation) {
            expectedMaxChunkSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).max().orElse(0);
            expectedTotalSize = requestAndResult.chunksToStitch.stream().mapToLong(ChunkInfo::getChunkSizeInBytes).sum();
            expectedNumChunks = requestAndResult.chunksToStitch.size();
        } else {
            expectedMaxChunkSize = chunkSize;
            expectedTotalSize = requestAndResult.putContent.length;
            expectedNumChunks = RouterUtils.getNumChunksForBlobAndChunkSize(requestAndResult.putContent.length, chunkSize);
        }
        if (metadataContentVersion <= MessageFormatRecord.Metadata_Content_Version_V2) {
            assertEquals("Wrong max chunk size in metadata", expectedMaxChunkSize, compositeBlobInfo.getChunkSize());
        }
        assertEquals("Wrong total size in metadata", expectedTotalSize, compositeBlobInfo.getTotalSize());
        assertEquals("Number of chunks is not as expected", expectedNumChunks, dataBlobIds.size());
        // Verify all dataBlobIds are DataChunk
        for (StoreKey key : dataBlobIds) {
            BlobId origDataBlobId = (BlobId) key;
            assertEquals("Expected datachunk", BlobDataType.DATACHUNK, origDataBlobId.getBlobDataType());
        }
        // verify user-metadata
        if (requestAndResult.putBlobProperties.isEncrypted()) {
            ByteBuffer userMetadata = request.getUsermetadata();
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), null, userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                assertNull("Exception should not be thrown", exception);
                assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
            }).run();
        } else {
            assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, request.getUsermetadata().array());
        }
        if (!stitchOperation) {
            verifyCompositeBlob(requestAndResult.putBlobProperties, requestAndResult.putContent, requestAndResult.putUserMetadata, dataBlobIds, request, serializedRequests);
        }
    } else {
        notificationBlobType = requestAndResult.options.isChunkUpload() ? NotificationBlobType.DataChunk : NotificationBlobType.Simple;
        // TODO: Currently, we don't have the logic to distinguish Simple vs DataChunk for the first chunk
        // Once the logic is fixed we should assert Simple.
        BlobDataType dataType = origBlobId.getBlobDataType();
        assertTrue("Invalid blob data type", dataType == BlobDataType.DATACHUNK || dataType == BlobDataType.SIMPLE);
        byte[] content = Utils.readBytesFromStream(request.getBlobStream(), (int) request.getBlobSize());
        if (!requestAndResult.putBlobProperties.isEncrypted()) {
            assertArrayEquals("Input blob and written blob should be the same", requestAndResult.putContent, content);
            assertArrayEquals("UserMetadata mismatch for simple blob", requestAndResult.putUserMetadata, request.getUsermetadata().array());
        } else {
            ByteBuffer userMetadata = request.getUsermetadata();
            // reason to directly call run() instead of spinning up a thread instead of calling start() is that, any exceptions or
            // assertion failures in non main thread will not fail the test.
            new DecryptJob(origBlobId, request.getBlobEncryptionKey().duplicate(), Unpooled.wrappedBuffer(content), userMetadata, cryptoService, kms, null, new CryptoJobMetricsTracker(metrics.decryptJobMetrics), (result, exception) -> {
                assertNull("Exception should not be thrown", exception);
                assertEquals("BlobId mismatch", origBlobId, result.getBlobId());
                ByteBuf decryptedBlobContent = result.getDecryptedBlobContent();
                byte[] blobContent = new byte[decryptedBlobContent.readableBytes()];
                decryptedBlobContent.readBytes(blobContent);
                assertArrayEquals("Content mismatch", requestAndResult.putContent, blobContent);
                assertArrayEquals("UserMetadata mismatch", requestAndResult.putUserMetadata, result.getDecryptedUserMetadata().array());
                decryptedBlobContent.release();
            }).run();
        }
    }
    notificationSystem.verifyNotification(blobId, notificationBlobType, request.getBlobProperties());
}
Also used : Arrays(java.util.Arrays) BlobProperties(com.github.ambry.messageformat.BlobProperties) DataNodeId(com.github.ambry.clustermap.DataNodeId) ByteBufferReadableStreamChannel(com.github.ambry.commons.ByteBufferReadableStreamChannel) Random(java.util.Random) ByteBuffer(java.nio.ByteBuffer) ThrowingConsumer(com.github.ambry.utils.ThrowingConsumer) Unpooled(io.netty.buffer.Unpooled) Future(java.util.concurrent.Future) GeneralSecurityException(java.security.GeneralSecurityException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestUtils(com.github.ambry.utils.TestUtils) Map(java.util.Map) After(org.junit.After) NettyByteBufLeakHelper(com.github.ambry.utils.NettyByteBufLeakHelper) QuotaMethod(com.github.ambry.quota.QuotaMethod) Parameterized(org.junit.runners.Parameterized) Container(com.github.ambry.account.Container) Set(java.util.Set) Utils(com.github.ambry.utils.Utils) CryptoServiceConfig(com.github.ambry.config.CryptoServiceConfig) RouterConfig(com.github.ambry.config.RouterConfig) CountDownLatch(java.util.concurrent.CountDownLatch) StoreKey(com.github.ambry.store.StoreKey) List(java.util.List) MockTime(com.github.ambry.utils.MockTime) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) Account(com.github.ambry.account.Account) Callback(com.github.ambry.commons.Callback) LoggingNotificationSystem(com.github.ambry.commons.LoggingNotificationSystem) BlobType(com.github.ambry.messageformat.BlobType) InMemAccountService(com.github.ambry.account.InMemAccountService) BlobId(com.github.ambry.commons.BlobId) ResponseHandler(com.github.ambry.commons.ResponseHandler) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) DataInputStream(java.io.DataInputStream) ServerErrorCode(com.github.ambry.server.ServerErrorCode) RunWith(org.junit.runner.RunWith) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ByteBuf(io.netty.buffer.ByteBuf) QuotaResource(com.github.ambry.quota.QuotaResource) SystemTime(com.github.ambry.utils.SystemTime) PutRequest(com.github.ambry.protocol.PutRequest) Before(org.junit.Before) QuotaResourceType(com.github.ambry.quota.QuotaResourceType) BlobDataType(com.github.ambry.commons.BlobId.BlobDataType) Properties(java.util.Properties) LongStream(java.util.stream.LongStream) Pair(com.github.ambry.utils.Pair) Iterator(java.util.Iterator) VerifiableProperties(com.github.ambry.config.VerifiableProperties) QuotaChargeCallback(com.github.ambry.quota.QuotaChargeCallback) MetadataContentSerDe(com.github.ambry.messageformat.MetadataContentSerDe) IOException(java.io.IOException) Test(org.junit.Test) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) TimeUnit(java.util.concurrent.TimeUnit) QuotaException(com.github.ambry.quota.QuotaException) KMSConfig(com.github.ambry.config.KMSConfig) ByteBufferInputStream(com.github.ambry.utils.ByteBufferInputStream) MessageFormatRecord(com.github.ambry.messageformat.MessageFormatRecord) Assert(org.junit.Assert) Collections(java.util.Collections) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) InputStream(java.io.InputStream) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) PutRequest(com.github.ambry.protocol.PutRequest) ByteBuf(io.netty.buffer.ByteBuf) ByteBuffer(java.nio.ByteBuffer) NotificationBlobType(com.github.ambry.notification.NotificationBlobType) StoreKey(com.github.ambry.store.StoreKey) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobDataType(com.github.ambry.commons.BlobId.BlobDataType) BlobId(com.github.ambry.commons.BlobId)

Example 100 with MockClusterMap

use of com.github.ambry.clustermap.MockClusterMap in project ambry by linkedin.

the class Http2NetworkClientTest method putGetTest.

@Test
public void putGetTest() throws Exception {
    MockClusterMap clusterMap = http2Cluster.getClusterMap();
    DataNodeId dataNodeId = http2Cluster.getGeneralDataNode();
    BlobIdFactory blobIdFactory = new BlobIdFactory(clusterMap);
    SSLFactory sslFactory = new NettySslHttp2Factory(clientSSLConfig);
    Http2NetworkClient networkClient = new Http2NetworkClient(new Http2ClientMetrics(new MetricRegistry()), new Http2ClientConfig(new VerifiableProperties(new Properties())), sslFactory, eventLoopGroup);
    // Put a blob
    int blobSize = 1024 * 1024;
    byte[] usermetadata = new byte[1000];
    byte[] data = new byte[blobSize];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(blobSize, "serviceid1", accountId, containerId, false);
    TestUtils.RANDOM.nextBytes(usermetadata);
    TestUtils.RANDOM.nextBytes(data);
    List<? extends PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
    // put blob 1
    PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(usermetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, null);
    RequestInfo request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), putRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
    List<ResponseInfo> responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
    long startTime = SystemTime.getInstance().milliseconds();
    while (responseInfos.size() == 0) {
        responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
        if (SystemTime.getInstance().milliseconds() - startTime >= 6000) {
            fail("Network Client no reponse and timeout.");
        }
        Thread.sleep(30);
    }
    assertEquals("Should be only one response", 1, responseInfos.size());
    DataInputStream dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
    PutResponse putResponse = PutResponse.readFrom(dis);
    assertEquals("No error expected.", ServerErrorCode.No_Error, putResponse.getError());
    // Get the blob
    // get blob properties
    ArrayList<BlobId> ids = new ArrayList<BlobId>();
    MockPartitionId partition = (MockPartitionId) clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS).get(0);
    ids.add(blobId1);
    ArrayList<PartitionRequestInfo> partitionRequestInfoList = new ArrayList<PartitionRequestInfo>();
    PartitionRequestInfo partitionRequestInfo = new PartitionRequestInfo(partition, ids);
    partitionRequestInfoList.add(partitionRequestInfo);
    GetRequest getRequest = new GetRequest(1, "http2-clientid", MessageFormatFlags.All, partitionRequestInfoList, GetOption.None);
    request = new RequestInfo(dataNodeId.getHostname(), new Port(dataNodeId.getHttp2Port(), PortType.HTTP2), getRequest, clusterMap.getReplicaIds(dataNodeId).get(0), null);
    responseInfos = networkClient.sendAndPoll(Collections.singletonList(request), new HashSet<>(), 300);
    startTime = SystemTime.getInstance().milliseconds();
    while (responseInfos.size() == 0) {
        responseInfos = networkClient.sendAndPoll(Collections.EMPTY_LIST, new HashSet<>(), 300);
        if (SystemTime.getInstance().milliseconds() - startTime >= 3000) {
            fail("Network Client no response and timeout.");
        }
        Thread.sleep(30);
    }
    assertEquals("Should be only one response", 1, responseInfos.size());
    dis = new NettyByteBufDataInputStream(responseInfos.get(0).content());
    GetResponse resp = GetResponse.readFrom(dis, clusterMap);
    BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(resp.getInputStream(), blobIdFactory);
    // verify BlobProperties
    BlobProperties propertyOutput = blobAll.getBlobInfo().getBlobProperties();
    assertEquals(blobSize, propertyOutput.getBlobSize());
    assertEquals("serviceid1", propertyOutput.getServiceId());
    assertEquals("AccountId mismatch", accountId, propertyOutput.getAccountId());
    assertEquals("ContainerId mismatch", containerId, propertyOutput.getContainerId());
    // verify UserMetadata
    byte[] userMetadataOutput = blobAll.getBlobInfo().getUserMetadata();
    assertArrayEquals(usermetadata, userMetadataOutput);
    // verify content
    byte[] actualBlobData = getBlobDataAndRelease(blobAll.getBlobData());
    assertArrayEquals("Content mismatch.", data, actualBlobData);
}
Also used : SSLFactory(com.github.ambry.commons.SSLFactory) Http2ClientMetrics(com.github.ambry.network.http2.Http2ClientMetrics) Port(com.github.ambry.network.Port) ArrayList(java.util.ArrayList) NettySslHttp2Factory(com.github.ambry.commons.NettySslHttp2Factory) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) RequestInfo(com.github.ambry.network.RequestInfo) PutResponse(com.github.ambry.protocol.PutResponse) BlobAll(com.github.ambry.messageformat.BlobAll) GetRequest(com.github.ambry.protocol.GetRequest) HashSet(java.util.HashSet) ResponseInfo(com.github.ambry.network.ResponseInfo) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) VerifiableProperties(com.github.ambry.config.VerifiableProperties) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) MetricRegistry(com.codahale.metrics.MetricRegistry) PutRequest(com.github.ambry.protocol.PutRequest) Http2ClientConfig(com.github.ambry.config.Http2ClientConfig) DataInputStream(java.io.DataInputStream) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) BlobIdFactory(com.github.ambry.commons.BlobIdFactory) BlobProperties(com.github.ambry.messageformat.BlobProperties) Http2NetworkClient(com.github.ambry.network.http2.Http2NetworkClient) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) BlobId(com.github.ambry.commons.BlobId) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) Test(org.junit.Test)

Aggregations

MockClusterMap (com.github.ambry.clustermap.MockClusterMap)109 Test (org.junit.Test)78 MockPartitionId (com.github.ambry.clustermap.MockPartitionId)57 VerifiableProperties (com.github.ambry.config.VerifiableProperties)55 Properties (java.util.Properties)50 ArrayList (java.util.ArrayList)44 PartitionId (com.github.ambry.clustermap.PartitionId)42 MockDataNodeId (com.github.ambry.clustermap.MockDataNodeId)40 HashMap (java.util.HashMap)36 Map (java.util.Map)33 BlobId (com.github.ambry.commons.BlobId)32 ClusterMap (com.github.ambry.clustermap.ClusterMap)31 MetricRegistry (com.codahale.metrics.MetricRegistry)30 Port (com.github.ambry.network.Port)29 List (java.util.List)28 DataNodeId (com.github.ambry.clustermap.DataNodeId)26 ReplicaId (com.github.ambry.clustermap.ReplicaId)26 BlobIdFactory (com.github.ambry.commons.BlobIdFactory)26 BlobProperties (com.github.ambry.messageformat.BlobProperties)23 DataInputStream (java.io.DataInputStream)23