use of com.github.ambry.rest.NettyClient.ResponseParts in project ambry by linkedin.
the class FrontendIntegrationTest method getNotModifiedBlobAndVerify.
/**
* Gets the blob with blob ID {@code blobId} and verifies that the blob is not returned as blob is not modified
* @param blobId the blob ID of the blob to GET.
* @param getOption the options to use while getting the blob.
* @param isPrivate {@code true} if the blob is private, {@code false} if not.
* @throws Exception
*/
private void getNotModifiedBlobAndVerify(String blobId, GetOption getOption, boolean isPrivate) throws Exception {
HttpHeaders headers = new DefaultHttpHeaders();
if (getOption != null) {
headers.add(RestUtils.Headers.GET_OPTION, getOption.toString());
}
headers.add(RestUtils.Headers.IF_MODIFIED_SINCE, new Date());
FullHttpRequest httpRequest = buildRequest(HttpMethod.GET, blobId, headers, null);
ResponseParts responseParts = nettyClient.sendRequest(httpRequest, null, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.NOT_MODIFIED, response.status());
assertNotNull("Date header should be set", response.headers().get(RestUtils.Headers.DATE));
assertNotNull("Last-Modified header should be set", response.headers().get("Last-Modified"));
assertNull("Content-Length should not be set", response.headers().get(RestUtils.Headers.CONTENT_LENGTH));
assertNull("Accept-Ranges should not be set", response.headers().get(RestUtils.Headers.ACCEPT_RANGES));
assertNull("Content-Range header should not be set", response.headers().get(RestUtils.Headers.CONTENT_RANGE));
assertNull(RestUtils.Headers.BLOB_SIZE + " should have been null ", response.headers().get(RestUtils.Headers.BLOB_SIZE));
assertNull("Content-Type should have been null", response.headers().get(RestUtils.Headers.CONTENT_TYPE));
verifyCacheHeaders(isPrivate, response);
assertNoContent(responseParts.queue);
}
use of com.github.ambry.rest.NettyClient.ResponseParts in project ambry by linkedin.
the class FrontendIntegrationTest method updateContainersAndVerify.
// accountApiTest() helpers
/**
* Call the {@code POST /accounts/updateContainers} API to update account metadata and verify that the update succeeded.
* @param account the account in which to update containers.
* @param containers the containers to update.
*/
private void updateContainersAndVerify(Account account, Container... containers) throws Exception {
byte[] containersUpdateJson = AccountCollectionSerde.serializeContainersInJson(Arrays.asList(containers));
String accountName = account.getName();
HttpHeaders headers = new DefaultHttpHeaders();
headers.add(RestUtils.Headers.TARGET_ACCOUNT_NAME, accountName);
FullHttpRequest request = buildRequest(HttpMethod.POST, Operations.ACCOUNTS_CONTAINERS, headers, ByteBuffer.wrap(containersUpdateJson));
ResponseParts responseParts = nettyClient.sendRequest(request, null, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.OK, response.status());
// verify regular response header
assertEquals("Unexpected account id in response header", account.getId(), Short.parseShort(response.headers().get(RestUtils.Headers.TARGET_ACCOUNT_ID)));
assertEquals("Unexpected content type in response header", RestUtils.JSON_CONTENT_TYPE, response.headers().get(RestUtils.Headers.CONTENT_TYPE));
verifyTrackingHeaders(response);
ByteBuffer content = getContent(responseParts.queue, HttpUtil.getContentLength(response));
Collection<Container> outputContainers = AccountCollectionSerde.containersFromInputStreamInJson(new ByteArrayInputStream(content.array()), account.getId());
for (Container container : outputContainers) {
assertEquals("Update not reflected in AccountService", container, ACCOUNT_SERVICE.getContainerByName(accountName, container.getName()));
}
}
use of com.github.ambry.rest.NettyClient.ResponseParts in project ambry by linkedin.
the class FrontendIntegrationTest method getAccounts.
/**
* Call the {@code GET /accounts} API and deserialize the response.
* @param accountName if non-null, fetch a single account by name instead of all accounts.
* @param accountId if non-null, fetch a single account by ID instead of all accounts.
* @return the accounts fetched.
*/
private Set<Account> getAccounts(String accountName, Short accountId) throws Exception {
HttpHeaders headers = new DefaultHttpHeaders();
if (accountName != null) {
headers.add(RestUtils.Headers.TARGET_ACCOUNT_NAME, accountName);
} else if (accountId != null) {
headers.add(RestUtils.Headers.TARGET_ACCOUNT_ID, accountId);
}
FullHttpRequest request = buildRequest(HttpMethod.GET, Operations.ACCOUNTS, headers, null);
ResponseParts responseParts = nettyClient.sendRequest(request, null, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.OK, response.status());
verifyTrackingHeaders(response);
ByteBuffer content = getContent(responseParts.queue, HttpUtil.getContentLength(response));
return new HashSet<>(AccountCollectionSerde.accountsFromInputStreamInJson(new ByteArrayInputStream(content.array())));
}
use of com.github.ambry.rest.NettyClient.ResponseParts in project ambry by linkedin.
the class FrontendIntegrationTest method stitchBlobAndVerify.
/**
* Test the stitched upload flow for a specified chunk size and number of chunks.
* @param account the {@link Account} to upload into.
* @param container the {@link Container} to upload into.
* @param signedChunkIds the list of signed chunk IDs to stitch together.
* @param fullContentArray the content to compare the stitched blob against.
* @throws Exception
*/
private void stitchBlobAndVerify(Account account, Container container, List<String> signedChunkIds, byte[] fullContentArray, long stitchedBlobSize) throws Exception {
// stitchBlob
HttpHeaders stitchHeaders = new DefaultHttpHeaders();
setAmbryHeadersForPut(stitchHeaders, TTL_SECS, !container.isCacheable(), "stitcher", "video/mp4", "stitchedUploadTest", account.getName(), container.getName());
HttpRequest httpRequest = buildRequest(HttpMethod.POST, Operations.STITCH, stitchHeaders, ByteBuffer.wrap(StitchRequestSerDe.toJson(signedChunkIds).toString().getBytes(StandardCharsets.UTF_8)));
ResponseParts responseParts = nettyClient.sendRequest(httpRequest, null, null).get();
String stitchedBlobId = verifyPostAndReturnBlobId(responseParts, stitchedBlobSize, true);
HttpHeaders expectedGetHeaders = new DefaultHttpHeaders().add(stitchHeaders);
// Test different request types on stitched blob ID
// (getBlobInfo, getBlob, getBlob w/ range, head, updateBlobTtl, deleteBlob)
expectedGetHeaders.add(RestUtils.Headers.BLOB_SIZE, fullContentArray.length);
expectedGetHeaders.set(RestUtils.Headers.LIFE_VERSION, "0");
getBlobInfoAndVerify(stitchedBlobId, GetOption.None, expectedGetHeaders, !container.isCacheable(), account.getName(), container.getName(), null);
List<ByteRange> ranges = new ArrayList<>();
ranges.add(null);
ranges.add(ByteRanges.fromLastNBytes(ThreadLocalRandom.current().nextLong(fullContentArray.length + 1)));
ranges.add(ByteRanges.fromStartOffset(ThreadLocalRandom.current().nextLong(fullContentArray.length)));
long random1 = ThreadLocalRandom.current().nextLong(fullContentArray.length);
long random2 = ThreadLocalRandom.current().nextLong(fullContentArray.length);
ranges.add(ByteRanges.fromOffsetRange(Math.min(random1, random2), Math.max(random1, random2)));
for (ByteRange range : ranges) {
getBlobAndVerify(stitchedBlobId, range, GetOption.None, false, expectedGetHeaders, !container.isCacheable(), ByteBuffer.wrap(fullContentArray), account.getName(), container.getName());
getHeadAndVerify(stitchedBlobId, range, GetOption.None, expectedGetHeaders, !container.isCacheable(), account.getName(), container.getName());
}
updateBlobTtlAndVerify(stitchedBlobId, expectedGetHeaders, !container.isCacheable(), account.getName(), container.getName(), null);
// Delete stitched blob.
deleteBlobAndVerify(stitchedBlobId);
verifyOperationsAfterDelete(stitchedBlobId, expectedGetHeaders, !container.isCacheable(), account.getName(), container.getName(), ByteBuffer.wrap(fullContentArray), null);
}
use of com.github.ambry.rest.NettyClient.ResponseParts in project ambry by linkedin.
the class FrontendIntegrationTest method multipartPostGetHeadUpdateDeleteUndeleteTest.
/**
* Tests multipart POST and verifies it via GET operations.
* @throws Exception
*/
@Test
public void multipartPostGetHeadUpdateDeleteUndeleteTest() throws Exception {
Account refAccount = ACCOUNT_SERVICE.createAndAddRandomAccount();
Container refContainer = refAccount.getContainerById(Container.DEFAULT_PUBLIC_CONTAINER_ID);
doPostGetHeadUpdateDeleteUndeleteTest(0, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
doPostGetHeadUpdateDeleteUndeleteTest((int) FRONTEND_CONFIG.chunkedGetResponseThresholdInBytes * 3, refAccount, refContainer, refAccount.getName(), !refContainer.isCacheable(), refAccount.getName(), refContainer.getName(), true);
// failure case
// size of content being POSTed is higher than what is allowed via multipart/form-data
long maxAllowedSizeBytes = new NettyConfig(FRONTEND_VERIFIABLE_PROPS).nettyMultipartPostMaxSizeBytes;
ByteBuffer content = ByteBuffer.wrap(TestUtils.getRandomBytes((int) maxAllowedSizeBytes + 1));
HttpHeaders headers = new DefaultHttpHeaders();
setAmbryHeadersForPut(headers, TTL_SECS, !refContainer.isCacheable(), refAccount.getName(), "application/octet-stream", null, refAccount.getName(), refContainer.getName());
HttpRequest httpRequest = RestTestUtils.createRequest(HttpMethod.POST, "/", headers);
HttpPostRequestEncoder encoder = createEncoder(httpRequest, content, ByteBuffer.allocate(0));
ResponseParts responseParts = nettyClient.sendRequest(encoder.finalizeRequest(), encoder, null).get();
HttpResponse response = getHttpResponse(responseParts);
assertEquals("Unexpected response status", HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status());
assertTrue("No Date header", response.headers().getTimeMillis(HttpHeaderNames.DATE, -1) != -1);
assertFalse("Channel should not be active", HttpUtil.isKeepAlive(response));
}
Aggregations