use of org.opensearch.common.lucene.store.ByteArrayIndexInput in project OpenSearch by opensearch-project.
the class AzureBlobContainerRetriesTests method testWriteLargeBlob.
public void testWriteLargeBlob() throws Exception {
// The request retry policy counts the first attempt as retry, so we need to
// account for that and increase the max retry count by one.
final int maxRetries = randomIntBetween(3, 6);
final int nbBlocks = randomIntBetween(1, 2);
final byte[] data = randomBytes(BlobClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE * nbBlocks);
// we want all requests to fail at least once
final int nbErrors = 2;
final AtomicInteger countDownUploads = new AtomicInteger(nbErrors * nbBlocks);
final CountDown countDownComplete = new CountDown(nbErrors);
final Map<String, BytesReference> blocks = new ConcurrentHashMap<>();
httpServer.createContext("/container/write_large_blob", exchange -> {
if ("PUT".equals(exchange.getRequestMethod())) {
final Map<String, String> params = new HashMap<>();
if (exchange.getRequestURI().getQuery() != null) {
RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
}
final String blockId = params.get("blockid");
if (Strings.hasText(blockId) && (countDownUploads.decrementAndGet() % 2 == 0)) {
blocks.put(blockId, Streams.readFully(exchange.getRequestBody()));
exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false");
exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1);
exchange.close();
return;
}
final String complete = params.get("comp");
if ("blocklist".equals(complete) && (countDownComplete.countDown())) {
final String blockList = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), UTF_8));
final List<String> blockUids = Arrays.stream(blockList.split("<Latest>")).filter(line -> line.contains("</Latest>")).map(line -> line.substring(0, line.indexOf("</Latest>"))).collect(Collectors.toList());
final ByteArrayOutputStream blob = new ByteArrayOutputStream();
for (String blockUid : blockUids) {
BytesReference block = blocks.remove(blockUid);
assert block != null;
block.writeTo(blob);
}
assertArrayEquals(data, blob.toByteArray());
exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false");
exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1);
exchange.close();
return;
}
}
if (randomBoolean()) {
Streams.readFully(exchange.getRequestBody());
AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE));
}
exchange.close();
});
final BlobContainer blobContainer = createBlobContainer(maxRetries);
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) {
blobContainer.writeBlob("write_large_blob", stream, data.length, false);
}
assertThat(countDownUploads.get(), equalTo(0));
assertThat(countDownComplete.isCountedDown(), is(true));
assertThat(blocks.isEmpty(), is(true));
}
use of org.opensearch.common.lucene.store.ByteArrayIndexInput in project OpenSearch by opensearch-project.
the class GoogleCloudStorageBlobContainerRetriesTests method testWriteBlobWithReadTimeouts.
public void testWriteBlobWithReadTimeouts() {
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128));
final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500));
final BlobContainer blobContainer = createBlobContainer(1, readTimeout, null, null);
// HTTP server does not send a response
httpServer.createContext("/upload/storage/v1/b/bucket/o", exchange -> {
if (randomBoolean()) {
if (randomBoolean()) {
Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]);
} else {
Streams.readFully(exchange.getRequestBody());
}
}
});
Exception exception = expectThrows(StorageException.class, () -> {
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false);
}
});
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class));
assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
}
use of org.opensearch.common.lucene.store.ByteArrayIndexInput in project OpenSearch by opensearch-project.
the class GoogleCloudStorageBlobContainerRetriesTests method testWriteLargeBlob.
public void testWriteLargeBlob() throws IOException {
// See {@link BaseWriteChannel#DEFAULT_CHUNK_SIZE}
final int defaultChunkSize = 60 * 256 * 1024;
final int nbChunks = randomIntBetween(3, 5);
final int lastChunkSize = randomIntBetween(1, defaultChunkSize - 1);
final int totalChunks = nbChunks + 1;
final byte[] data = randomBytes(defaultChunkSize * nbChunks + lastChunkSize);
assertThat(data.length, greaterThan(GoogleCloudStorageBlobStore.LARGE_BLOB_THRESHOLD_BYTE_SIZE));
logger.debug("resumable upload is composed of [{}] total chunks ([{}] chunks of length [{}] and last chunk of length [{}]", totalChunks, nbChunks, defaultChunkSize, lastChunkSize);
// we want all requests to fail at least once
final int nbErrors = 2;
final AtomicInteger countInits = new AtomicInteger(nbErrors);
final AtomicInteger countUploads = new AtomicInteger(nbErrors * totalChunks);
final AtomicBoolean allow410Gone = new AtomicBoolean(randomBoolean());
final AtomicBoolean allowReadTimeout = new AtomicBoolean(rarely());
final int wrongChunk = randomIntBetween(1, totalChunks);
final AtomicReference<String> sessionUploadId = new AtomicReference<>(UUIDs.randomBase64UUID());
logger.debug("starting with resumable upload id [{}]", sessionUploadId.get());
httpServer.createContext("/upload/storage/v1/b/bucket/o", safeHandler(exchange -> {
final BytesReference requestBody = Streams.readFully(exchange.getRequestBody());
final Map<String, String> params = new HashMap<>();
RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
assertThat(params.get("uploadType"), equalTo("resumable"));
if ("POST".equals(exchange.getRequestMethod())) {
assertThat(params.get("name"), equalTo("write_large_blob"));
if (countInits.decrementAndGet() <= 0) {
byte[] response = requestBody.utf8ToString().getBytes(UTF_8);
exchange.getResponseHeaders().add("Content-Type", "application/json");
exchange.getResponseHeaders().add("Location", httpServerUrl() + "/upload/storage/v1/b/bucket/o?uploadType=resumable&upload_id=" + sessionUploadId.get());
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
exchange.getResponseBody().write(response);
return;
}
if (allowReadTimeout.get()) {
assertThat(wrongChunk, greaterThan(0));
return;
}
} else if ("PUT".equals(exchange.getRequestMethod())) {
final String uploadId = params.get("upload_id");
if (uploadId.equals(sessionUploadId.get()) == false) {
logger.debug("session id [{}] is gone", uploadId);
assertThat(wrongChunk, greaterThan(0));
exchange.sendResponseHeaders(HttpStatus.SC_GONE, -1);
return;
}
if (countUploads.get() == (wrongChunk * nbErrors)) {
if (allowReadTimeout.compareAndSet(true, false)) {
assertThat(wrongChunk, greaterThan(0));
return;
}
if (allow410Gone.compareAndSet(true, false)) {
final String newUploadId = UUIDs.randomBase64UUID(random());
logger.debug("chunk [{}] gone, updating session ids [{} -> {}]", wrongChunk, sessionUploadId.get(), newUploadId);
sessionUploadId.set(newUploadId);
// we must reset the counters because the whole object upload will be retried
countInits.set(nbErrors);
countUploads.set(nbErrors * totalChunks);
exchange.sendResponseHeaders(HttpStatus.SC_GONE, -1);
return;
}
}
final String range = exchange.getRequestHeaders().getFirst("Content-Range");
assertTrue(Strings.hasLength(range));
if (countUploads.decrementAndGet() % 2 == 0) {
assertThat(Math.toIntExact(requestBody.length()), anyOf(equalTo(defaultChunkSize), equalTo(lastChunkSize)));
final int rangeStart = getContentRangeStart(range);
final int rangeEnd = getContentRangeEnd(range);
assertThat(rangeEnd + 1 - rangeStart, equalTo(Math.toIntExact(requestBody.length())));
assertThat(new BytesArray(data, rangeStart, rangeEnd - rangeStart + 1), is(requestBody));
final Integer limit = getContentRangeLimit(range);
if (limit != null) {
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
return;
} else {
exchange.getResponseHeaders().add("Range", String.format(Locale.ROOT, "bytes=%d/%d", rangeStart, rangeEnd));
exchange.getResponseHeaders().add("Content-Length", "0");
exchange.sendResponseHeaders(308, /* Resume Incomplete */
-1);
return;
}
}
}
if (randomBoolean()) {
exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1);
}
}));
final TimeValue readTimeout = allowReadTimeout.get() ? TimeValue.timeValueSeconds(3) : null;
final BlobContainer blobContainer = createBlobContainer(nbErrors + 1, readTimeout, null, null);
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) {
blobContainer.writeBlob("write_large_blob", stream, data.length, false);
}
assertThat(countInits.get(), equalTo(0));
assertThat(countUploads.get(), equalTo(0));
assertThat(allow410Gone.get(), is(false));
}
use of org.opensearch.common.lucene.store.ByteArrayIndexInput in project OpenSearch by opensearch-project.
the class S3BlobContainerRetriesTests method testWriteBlobWithReadTimeouts.
public void testWriteBlobWithReadTimeouts() {
final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128));
final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500));
final BlobContainer blobContainer = createBlobContainer(1, readTimeout, true, null);
// HTTP server does not send a response
httpServer.createContext("/bucket/write_blob_timeout", exchange -> {
if (randomBoolean()) {
if (randomBoolean()) {
Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, bytes.length - 1)]);
} else {
Streams.readFully(exchange.getRequestBody());
}
}
});
Exception exception = expectThrows(IOException.class, () -> {
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false);
}
});
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("unable to upload object [write_blob_timeout] using a single upload"));
assertThat(exception.getCause(), instanceOf(SdkClientException.class));
assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
assertThat(exception.getCause().getCause(), instanceOf(SocketTimeoutException.class));
assertThat(exception.getCause().getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
}
use of org.opensearch.common.lucene.store.ByteArrayIndexInput in project OpenSearch by opensearch-project.
the class ChecksumBlobStoreFormat method deserialize.
public T deserialize(String blobName, NamedXContentRegistry namedXContentRegistry, BytesReference bytes) throws IOException {
final String resourceDesc = "ChecksumBlobStoreFormat.readBlob(blob=\"" + blobName + "\")";
try {
final IndexInput indexInput = bytes.length() > 0 ? new ByteBuffersIndexInput(new ByteBuffersDataInput(Arrays.asList(BytesReference.toByteBuffers(bytes))), resourceDesc) : new ByteArrayIndexInput(resourceDesc, BytesRef.EMPTY_BYTES);
CodecUtil.checksumEntireFile(indexInput);
CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION);
long filePointer = indexInput.getFilePointer();
long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer;
try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes.slice((int) filePointer, (int) contentSize), XContentType.SMILE)) {
return reader.apply(parser);
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// we trick this into a dedicated exception with the original stacktrace
throw new CorruptStateException(ex);
}
}
Aggregations