Search in sources :

Example 6 with RouterException

use of com.github.ambry.router.RouterException in project ambry by linkedin.

the class RejectingQuotaChargeCallback method charge.

@Override
public void charge(long chunkSize) throws QuotaException {
    try {
        Map<QuotaName, Double> requestCost = requestCostPolicy.calculateRequestQuotaCharge(restRequest, chunkSize).entrySet().stream().collect(Collectors.toMap(entry -> QuotaName.valueOf(entry.getKey()), Map.Entry::getValue));
        QuotaAction quotaAction = quotaManager.chargeAndRecommend(restRequest, requestCost, false, true);
        if (QuotaUtils.shouldThrottle(quotaAction) && isQuotaEnforcedOnRequest) {
            if (quotaManager.getQuotaMode() == QuotaMode.THROTTLING && quotaManager.getQuotaConfig().throttleInProgressRequests) {
                throw new QuotaException("Exception while charging quota", new RouterException("RequestQuotaExceeded", RouterErrorCode.TooManyRequests), false);
            } else {
                LOGGER.debug("Quota exceeded for an in progress request.");
            }
        }
    } catch (Exception ex) {
        if (ex.getCause() instanceof RouterException && ((RouterException) ex.getCause()).getErrorCode().equals(RouterErrorCode.TooManyRequests)) {
            throw ex;
        }
        LOGGER.error("Unexpected exception while charging quota.", ex);
    }
}
Also used : Logger(org.slf4j.Logger) Map(java.util.Map) LoggerFactory(org.slf4j.LoggerFactory) RouterException(com.github.ambry.router.RouterException) RestRequest(com.github.ambry.rest.RestRequest) Collectors(java.util.stream.Collectors) RouterErrorCode(com.github.ambry.router.RouterErrorCode) RouterException(com.github.ambry.router.RouterException) Map(java.util.Map) RouterException(com.github.ambry.router.RouterException)

Example 7 with RouterException

use of com.github.ambry.router.RouterException in project ambry by linkedin.

the class ServerTestUtil method undeleteCornerCasesTest.

static void undeleteCornerCasesTest(MockCluster cluster, PortType portType, SSLConfig clientSSLConfig1, SSLConfig clientSSLConfig2, SSLConfig clientSSLConfig3, SSLSocketFactory clientSSLSocketFactory1, SSLSocketFactory clientSSLSocketFactory2, SSLSocketFactory clientSSLSocketFactory3, MockNotificationSystem notificationSystem, Properties routerProps, boolean testEncryption) {
    MockClusterMap clusterMap = cluster.getClusterMap();
    byte[] userMetadata = new byte[1000];
    byte[] data = new byte[31870];
    byte[] encryptionKey = new byte[100];
    short accountId = Utils.getRandomShort(TestUtils.RANDOM);
    short containerId = Utils.getRandomShort(TestUtils.RANDOM);
    BlobProperties properties = new BlobProperties(31870, "serviceid1", accountId, containerId, testEncryption, cluster.time.milliseconds());
    TestUtils.RANDOM.nextBytes(userMetadata);
    TestUtils.RANDOM.nextBytes(data);
    if (testEncryption) {
        TestUtils.RANDOM.nextBytes(encryptionKey);
    }
    short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
    Map<String, List<DataNodeId>> dataNodesPerDC = clusterMap.getDataNodes().stream().collect(Collectors.groupingBy(DataNodeId::getDatacenterName));
    Map<String, Pair<SSLConfig, SSLSocketFactory>> sslSettingPerDC = new HashMap<>();
    sslSettingPerDC.put("DC1", new Pair<>(clientSSLConfig1, clientSSLSocketFactory1));
    sslSettingPerDC.put("DC2", new Pair<>(clientSSLConfig2, clientSSLSocketFactory2));
    sslSettingPerDC.put("DC3", new Pair<>(clientSSLConfig3, clientSSLSocketFactory3));
    List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(MockClusterMap.DEFAULT_PARTITION_CLASS);
    DataNodeId dataNodeId = dataNodesPerDC.get("DC1").get(0);
    Router router = null;
    try {
        Properties routerProperties = getRouterProps("DC1");
        routerProperties.putAll(routerProps);
        VerifiableProperties routerVerifiableProps = new VerifiableProperties(routerProperties);
        AccountService accountService = new InMemAccountService(false, true);
        router = new NonBlockingRouterFactory(routerVerifiableProps, clusterMap, new MockNotificationSystem(clusterMap), getSSLFactoryIfRequired(routerVerifiableProps), accountService).getRouter();
        // channels to all datanodes
        List<ConnectedChannel> channels = new ArrayList<>();
        for (Map.Entry<String, List<DataNodeId>> entry : dataNodesPerDC.entrySet()) {
            Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(entry.getKey());
            for (DataNodeId node : entry.getValue()) {
                ConnectedChannel connectedChannel = getBlockingChannelBasedOnPortType(portType, node, pair.getSecond(), pair.getFirst());
                connectedChannel.connect();
                channels.add(connectedChannel);
            }
        }
        // ////////////////////////////////////////////////////
        // Corner case 1: When only one datacenter has delete
        // ////////////////////////////////////////////////////
        BlobId blobId1 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        ConnectedChannel channel = getBlockingChannelBasedOnPortType(portType, dataNodeId, clientSSLSocketFactory1, clientSSLConfig1);
        channel.connect();
        PutRequest putRequest = new PutRequest(1, "client1", blobId1, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        DataInputStream putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        PutResponse response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId1.toString());
        // Now stop the replications this partition.
        PartitionId partitionId = blobId1.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the delete to two data nodes in the same DC
        List<DataNodeId> toBeDeleteDataNodes = dataNodesPerDC.values().stream().findFirst().get();
        Pair<SSLConfig, SSLSocketFactory> pair = sslSettingPerDC.get(toBeDeleteDataNodes.get(0).getDatacenterName());
        ConnectedChannel channel1 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(0), pair.getSecond(), pair.getFirst());
        channel1.connect();
        ConnectedChannel channel2 = getBlockingChannelBasedOnPortType(portType, toBeDeleteDataNodes.get(1), pair.getSecond(), pair.getFirst());
        channel2.connect();
        DeleteRequest deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId1, System.currentTimeMillis());
        DataInputStream stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        DeleteResponse deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        DeleteRequest deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId1, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of not deleted error.
        Future<Void> future = router.undeleteBlob(blobId1.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId1.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.BlobNotDeleted, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now see if either data node 1 or data node 2 has undelete or not, if so, undelete would replicate. If not,
        // delete would replicate.
        List<PartitionRequestInfo> partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId1);
        boolean hasUndelete = false;
        for (ConnectedChannel connectedChannel : new ConnectedChannel[] { channel1, channel2 }) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = channel1.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            hasUndelete = getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == (short) 1;
            if (hasUndelete) {
                break;
            }
        }
        releaseNettyBufUnderneathStream(stream);
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        if (hasUndelete) {
            notificationSystem.awaitBlobUndeletes(blobId1.toString());
        } else {
            notificationSystem.awaitBlobDeletions(blobId1.toString());
        }
        for (ConnectedChannel connectedChannel : channels) {
            GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
            stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
            GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
            releaseNettyBufUnderneathStream(stream);
            assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
            MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
            if (hasUndelete) {
                assertEquals((short) 1, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
            } else {
                assertEquals((short) 0, getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion());
                assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
            }
        }
        // ///////////////////////////////////////////////////////////
        // Corner case 2: two data nodes have different life versions
        // //////////////////////////////////////////////////////////
        BlobId blobId2 = new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, clusterMap.getLocalDatacenterId(), properties.getAccountId(), properties.getContainerId(), partitionIds.get(0), false, BlobId.BlobDataType.DATACHUNK);
        putRequest = new PutRequest(1, "client1", blobId2, properties, ByteBuffer.wrap(userMetadata), Unpooled.wrappedBuffer(data), properties.getBlobSize(), BlobType.DataBlob, testEncryption ? ByteBuffer.wrap(encryptionKey) : null);
        putResponseStream = channel.sendAndReceive(putRequest).getInputStream();
        response = PutResponse.readFrom(putResponseStream);
        releaseNettyBufUnderneathStream(putResponseStream);
        assertEquals(ServerErrorCode.No_Error, response.getError());
        notificationSystem.awaitBlobCreations(blobId2.toString());
        // Now delete this blob on all servers.
        DeleteRequest deleteRequest = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel.sendAndReceive(deleteRequest).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        notificationSystem.awaitBlobDeletions(blobId2.toString());
        // Now stop the replication
        partitionId = blobId2.getPartition();
        controlReplicationForPartition(channels, partitionId, false);
        // Now send the undelete to two data nodes in the same DC and then send delete
        UndeleteRequest undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(undeleteRequest).getInputStream();
        UndeleteResponse undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        undeleteRequest = new UndeleteRequest(1, "undeleteClient", blobId2, undeleteRequest.getOperationTimeMs());
        stream = channel2.sendAndReceive(undeleteRequest).getInputStream();
        undeleteResponse = UndeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, undeleteResponse.getError());
        assertEquals((short) 1, undeleteResponse.getLifeVersion());
        deleteRequest1 = new DeleteRequest(1, "deleteClient", blobId2, System.currentTimeMillis());
        stream = channel1.sendAndReceive(deleteRequest1).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        deleteRequest2 = new DeleteRequest(1, "deleteClient", blobId2, deleteRequest1.getDeletionTimeInMs());
        stream = channel2.sendAndReceive(deleteRequest2).getInputStream();
        deleteResponse = DeleteResponse.readFrom(stream);
        releaseNettyBufUnderneathStream(stream);
        assertEquals(ServerErrorCode.No_Error, deleteResponse.getError());
        // Now send the undelete operation through router, and it should fail because of lifeVersion conflict error.
        future = router.undeleteBlob(blobId2.toString(), "service");
        try {
            future.get();
            fail("Undelete blob " + blobId2.toString() + " should fail");
        } catch (ExecutionException e) {
            assertTrue(e.getCause() instanceof RouterException);
            assertEquals(RouterErrorCode.LifeVersionConflict, ((RouterException) e.getCause()).getErrorCode());
        }
        // Now restart the replication
        controlReplicationForPartition(channels, partitionId, true);
        notificationSystem.awaitBlobUndeletes(blobId2.toString());
        // Now after replication is resumed, the undelete of lifeversion 2 will eventually be replicated to all servers.
        partitionRequestInfoList = getPartitionRequestInfoListFromBlobId(blobId2);
        for (ConnectedChannel connectedChannel : channels) {
            // Even if the notificationSystem acknowledged the undelete, it might be triggered by undelete at lifeversion 1.
            // So check in a loop with a time out.
            long deadline = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(10);
            while (true) {
                GetRequest getRequest = new GetRequest(1, "clientId1", MessageFormatFlags.BlobProperties, partitionRequestInfoList, GetOption.Include_All);
                stream = connectedChannel.sendAndReceive(getRequest).getInputStream();
                GetResponse getResponse = GetResponse.readFrom(stream, clusterMap);
                assertEquals(ServerErrorCode.No_Error, getResponse.getPartitionResponseInfoList().get(0).getErrorCode());
                MessageFormatRecord.deserializeBlobProperties(getResponse.getInputStream());
                if (getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).getLifeVersion() == 2) {
                    assertTrue(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isUndeleted());
                    assertFalse(getResponse.getPartitionResponseInfoList().get(0).getMessageInfoList().get(0).isDeleted());
                    break;
                } else {
                    Thread.sleep(1000);
                    if (System.currentTimeMillis() > deadline) {
                        throw new TimeoutException("Fail to get blob " + blobId2 + " at lifeversion 2 at " + connectedChannel.getRemoteHost());
                    }
                }
            }
        }
        releaseNettyBufUnderneathStream(stream);
        for (ConnectedChannel connectedChannel : channels) {
            connectedChannel.disconnect();
        }
        channel1.disconnect();
        channel2.disconnect();
        channel.disconnect();
    } catch (Exception e) {
        e.printStackTrace();
        fail();
    } finally {
        if (router != null) {
            try {
                router.close();
            } catch (Exception e) {
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) UndeleteRequest(com.github.ambry.protocol.UndeleteRequest) UndeleteResponse(com.github.ambry.protocol.UndeleteResponse) InMemAccountService(com.github.ambry.account.InMemAccountService) GetRequest(com.github.ambry.protocol.GetRequest) ArrayList(java.util.ArrayList) List(java.util.List) SSLSocketFactory(javax.net.ssl.SSLSocketFactory) TimeoutException(java.util.concurrent.TimeoutException) VerifiableProperties(com.github.ambry.config.VerifiableProperties) Router(com.github.ambry.router.Router) ConnectedChannel(com.github.ambry.network.ConnectedChannel) MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) DeleteResponse(com.github.ambry.protocol.DeleteResponse) DataNodeId(com.github.ambry.clustermap.DataNodeId) MockDataNodeId(com.github.ambry.clustermap.MockDataNodeId) InMemAccountService(com.github.ambry.account.InMemAccountService) AccountService(com.github.ambry.account.AccountService) Map(java.util.Map) HashMap(java.util.HashMap) ClusterMap(com.github.ambry.clustermap.ClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) MockClusterMap(com.github.ambry.clustermap.MockClusterMap) BlobProperties(com.github.ambry.messageformat.BlobProperties) Properties(java.util.Properties) VerifiableProperties(com.github.ambry.config.VerifiableProperties) PutResponse(com.github.ambry.protocol.PutResponse) ExecutionException(java.util.concurrent.ExecutionException) Pair(com.github.ambry.utils.Pair) NonBlockingRouterFactory(com.github.ambry.router.NonBlockingRouterFactory) SSLConfig(com.github.ambry.config.SSLConfig) RouterException(com.github.ambry.router.RouterException) PutRequest(com.github.ambry.protocol.PutRequest) NettyByteBufDataInputStream(com.github.ambry.utils.NettyByteBufDataInputStream) DataInputStream(java.io.DataInputStream) PartitionRequestInfo(com.github.ambry.protocol.PartitionRequestInfo) GetResponse(com.github.ambry.protocol.GetResponse) IOException(java.io.IOException) MessageFormatException(com.github.ambry.messageformat.MessageFormatException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId) DeleteRequest(com.github.ambry.protocol.DeleteRequest)

Example 8 with RouterException

use of com.github.ambry.router.RouterException in project ambry by linkedin.

the class AmbryBlobStorageService method submitResponse.

/**
 * Submits the response and {@code responseBody} (and any {@code exception})for the {@code restRequest} to the
 * {@code responseHandler}.
 * @param restRequest the {@link RestRequest} for which a response is ready.
 * @param restResponseChannel the {@link RestResponseChannel} over which the response can be sent.
 * @param responseBody the body of the response in the form of a {@link ReadableStreamChannel}.
 * @param exception any {@link Exception} that occurred during the handling of {@code restRequest}.
 */
void submitResponse(RestRequest restRequest, RestResponseChannel restResponseChannel, ReadableStreamChannel responseBody, Exception exception) {
    try {
        if (exception != null && exception instanceof RouterException) {
            exception = new RestServiceException(exception, RestServiceErrorCode.getRestServiceErrorCode(((RouterException) exception).getErrorCode()));
        }
        responseHandler.handleResponse(restRequest, restResponseChannel, responseBody, exception);
    } catch (Exception e) {
        frontendMetrics.responseSubmissionError.inc();
        if (exception != null) {
            logger.error("Error submitting response to response handler", e);
        } else {
            exception = e;
        }
        logger.error("Handling of request {} failed", restRequest.getUri(), exception);
        restResponseChannel.onResponseComplete(exception);
        if (responseBody != null) {
            try {
                responseBody.close();
            } catch (IOException ioe) {
                frontendMetrics.resourceReleaseError.inc();
                logger.error("Error closing ReadableStreamChannel", e);
            }
        }
    }
}
Also used : RestServiceException(com.github.ambry.rest.RestServiceException) RouterException(com.github.ambry.router.RouterException) IOException(java.io.IOException) IOException(java.io.IOException) RouterException(com.github.ambry.router.RouterException) ExecutionException(java.util.concurrent.ExecutionException) RestServiceException(com.github.ambry.rest.RestServiceException)

Example 9 with RouterException

use of com.github.ambry.router.RouterException in project ambry by linkedin.

the class MockHeadCallback method onCompletion.

/**
 * If there was no exception, sets headers and submits response.
 * @param result The result of the request - a {@link GetBlobResult} object with the {@link BlobInfo} containing the
 *               blob properties and other headers of the blob, and the {@link ReadableStreamChannel} of blob data.
 *               This is non null if the request executed successfully.
 * @param exception The exception that was reported on execution of the request (if any).
 */
@Override
public void onCompletion(GetBlobResult result, Exception exception) {
    try {
        restResponseChannel.setHeader(RestUtils.Headers.DATE, new GregorianCalendar().getTime());
        if (exception == null && result != null) {
            setResponseHeaders(result.getBlobInfo());
        } else if (exception != null && exception instanceof RouterException) {
            exception = new RestServiceException(exception, RestServiceErrorCode.getRestServiceErrorCode(((RouterException) exception).getErrorCode()));
        }
    } catch (Exception e) {
        exception = exception == null ? e : exception;
    } finally {
        ReadableStreamChannel channel = result != null ? result.getBlobDataChannel() : null;
        mockRestRequestService.handleResponse(restRequest, restResponseChannel, channel, exception);
    }
}
Also used : RouterException(com.github.ambry.router.RouterException) ReadableStreamChannel(com.github.ambry.router.ReadableStreamChannel) GregorianCalendar(java.util.GregorianCalendar) IOException(java.io.IOException) RouterException(com.github.ambry.router.RouterException)

Example 10 with RouterException

use of com.github.ambry.router.RouterException in project ambry by linkedin.

the class MockRouter method deleteBlob.

@Override
public Future<Void> deleteBlob(String blobId, String serviceId, Callback<Void> callback, QuotaChargeCallback quotaChargeCallback) {
    lock.lock();
    try {
        FutureResult<Void> future = new FutureResult<>();
        BlobInfoAndData blob = allBlobs.get(blobId);
        if (blob == null) {
            Exception e = new RouterException("NotFound", RouterErrorCode.BlobDoesNotExist);
            future.done(null, e);
            if (callback != null) {
                callback.onCompletion(null, e);
            }
            return future;
        }
        allBlobs.remove(blobId);
        future.done(null, null);
        if (callback != null) {
            callback.onCompletion(null, null);
        }
        return future;
    } finally {
        lock.unlock();
    }
}
Also used : RouterException(com.github.ambry.router.RouterException) FutureResult(com.github.ambry.router.FutureResult) IOException(java.io.IOException) RouterException(com.github.ambry.router.RouterException)

Aggregations

RouterException (com.github.ambry.router.RouterException)13 IOException (java.io.IOException)8 Map (java.util.Map)4 ExecutionException (java.util.concurrent.ExecutionException)4 Test (org.junit.Test)4 ClusterMap (com.github.ambry.clustermap.ClusterMap)3 BlobId (com.github.ambry.commons.BlobId)3 RestRequest (com.github.ambry.rest.RestRequest)3 GetBlobOptionsBuilder (com.github.ambry.router.GetBlobOptionsBuilder)3 HashMap (java.util.HashMap)3 AccountService (com.github.ambry.account.AccountService)2 ByteBufferReadableStreamChannel (com.github.ambry.commons.ByteBufferReadableStreamChannel)2 MockRestResponseChannel (com.github.ambry.rest.MockRestResponseChannel)2 RestResponseChannel (com.github.ambry.rest.RestResponseChannel)2 RestServiceException (com.github.ambry.rest.RestServiceException)2 RestUtilsTest (com.github.ambry.rest.RestUtilsTest)2 FutureResult (com.github.ambry.router.FutureResult)2 ReadableStreamChannel (com.github.ambry.router.ReadableStreamChannel)2 Router (com.github.ambry.router.Router)2 RouterErrorCode (com.github.ambry.router.RouterErrorCode)2