use of com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream in project OpenSearch by opensearch-project.
the class S3BlobContainerRetriesTests method testWriteLargeBlob.
public void testWriteLargeBlob() throws Exception {
final boolean useTimeout = rarely();
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB);
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
final int parts = randomIntBetween(1, 5);
final long lastPartSize = randomLongBetween(10, 512);
final long blobSize = (parts * bufferSize.getBytes()) + lastPartSize;
// we want all requests to fail at least once
final int nbErrors = 2;
final CountDown countDownInitiate = new CountDown(nbErrors);
final AtomicInteger countDownUploads = new AtomicInteger(nbErrors * (parts + 1));
final CountDown countDownComplete = new CountDown(nbErrors);
httpServer.createContext("/bucket/write_large_blob", exchange -> {
final long contentLength = Long.parseLong(exchange.getRequestHeaders().getFirst("Content-Length"));
if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploads")) {
// initiate multipart upload request
if (countDownInitiate.countDown()) {
byte[] response = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<InitiateMultipartUploadResult>\n" + " <Bucket>bucket</Bucket>\n" + " <Key>write_large_blob</Key>\n" + " <UploadId>TEST</UploadId>\n" + "</InitiateMultipartUploadResult>").getBytes(StandardCharsets.UTF_8);
exchange.getResponseHeaders().add("Content-Type", "application/xml");
exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
exchange.getResponseBody().write(response);
exchange.close();
return;
}
} else if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().contains("uploadId=TEST") && exchange.getRequestURI().getQuery().contains("partNumber=")) {
// upload part request
MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody());
BytesReference bytes = Streams.readFully(md5);
assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes())));
assertThat(contentLength, anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes())));
if (countDownUploads.decrementAndGet() % 2 == 0) {
exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest()));
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
exchange.close();
return;
}
} else if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) {
// complete multipart upload request
if (countDownComplete.countDown()) {
Streams.readFully(exchange.getRequestBody());
byte[] response = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" + "<CompleteMultipartUploadResult>\n" + " <Bucket>bucket</Bucket>\n" + " <Key>write_large_blob</Key>\n" + "</CompleteMultipartUploadResult>").getBytes(StandardCharsets.UTF_8);
exchange.getResponseHeaders().add("Content-Type", "application/xml");
exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length);
exchange.getResponseBody().write(response);
exchange.close();
return;
}
}
// sends an error back or let the request time out
if (useTimeout == false) {
if (randomBoolean() && contentLength > 0) {
Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.toIntExact(contentLength - 1))]);
} else {
Streams.readFully(exchange.getRequestBody());
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
}
exchange.close();
}
});
blobContainer.writeBlob("write_large_blob", new ZeroInputStream(blobSize), blobSize, false);
assertThat(countDownInitiate.isCountedDown(), is(true));
assertThat(countDownUploads.get(), equalTo(0));
assertThat(countDownComplete.isCountedDown(), is(true));
}
Aggregations