use of org.opensearch.common.util.concurrent.CountDown in project OpenSearch by opensearch-project.
the class AbstractBlobContainerRetriesTestCase method testReadRangeBlobWithRetries.
public void testReadRangeBlobWithRetries() throws Exception {
final int maxRetries = randomInt(5);
final CountDown countDown = new CountDown(maxRetries + 1);
final byte[] bytes = randomBlobContent();
httpServer.createContext(downloadStorageEndpoint("read_range_blob_max_retries"), exchange -> {
Streams.readFully(exchange.getRequestBody());
if (countDown.countDown()) {
final int rangeStart = getRangeStart(exchange);
assertThat(rangeStart, lessThan(bytes.length));
assertTrue(getRangeEnd(exchange).isPresent());
final int rangeEnd = getRangeEnd(exchange).get();
assertThat(rangeEnd, greaterThanOrEqualTo(rangeStart));
// adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
final int effectiveRangeEnd = Math.min(bytes.length - 1, rangeEnd);
final int length = (effectiveRangeEnd - rangeStart) + 1;
exchange.getResponseHeaders().add("Content-Type", bytesContentType());
exchange.sendResponseHeaders(HttpStatus.SC_OK, length);
exchange.getResponseBody().write(bytes, rangeStart, length);
exchange.close();
return;
}
if (randomBoolean()) {
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
} else if (randomBoolean()) {
sendIncompleteContent(exchange, bytes);
}
if (randomBoolean()) {
exchange.close();
}
});
final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 500));
final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null);
final int position = randomIntBetween(0, bytes.length - 1);
final int length = randomIntBetween(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE);
try (InputStream inputStream = blobContainer.readBlob("read_range_blob_max_retries", position, length)) {
final int readLimit;
final InputStream wrappedStream;
if (randomBoolean()) {
// read stream only partly
readLimit = randomIntBetween(0, length);
wrappedStream = Streams.limitStream(inputStream, readLimit);
} else {
readLimit = length;
wrappedStream = inputStream;
}
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream));
logger.info("maxRetries={}, position={}, length={}, readLimit={}, byteSize={}, bytesRead={}", maxRetries, position, length, readLimit, bytes.length, bytesRead.length);
assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + readLimit)), bytesRead);
if (readLimit == 0 || (readLimit < length && readLimit == bytesRead.length)) {
// we might have completed things based on an incomplete response, and we're happy with that
} else {
assertTrue(countDown.isCountedDown());
}
}
}
use of org.opensearch.common.util.concurrent.CountDown in project OpenSearch by opensearch-project.
the class QueryRewriteContext method executeAsyncActions.
/**
* Executes all registered async actions and notifies the listener once it's done. The value that is passed to the listener is always
* <code>null</code>. The list of registered actions is cleared once this method returns.
*/
public void executeAsyncActions(ActionListener listener) {
if (asyncActions.isEmpty()) {
listener.onResponse(null);
} else {
CountDown countDown = new CountDown(asyncActions.size());
ActionListener<?> internalListener = new ActionListener() {
@Override
public void onResponse(Object o) {
if (countDown.countDown()) {
listener.onResponse(null);
}
}
@Override
public void onFailure(Exception e) {
if (countDown.fastForward()) {
listener.onFailure(e);
}
}
};
// make a copy to prevent concurrent modification exception
List<BiConsumer<Client, ActionListener<?>>> biConsumers = new ArrayList<>(asyncActions);
asyncActions.clear();
for (BiConsumer<Client, ActionListener<?>> action : biConsumers) {
action.accept(client, internalListener);
}
}
}
use of org.opensearch.common.util.concurrent.CountDown in project OpenSearch by opensearch-project.
the class S3BlobContainerRetriesTests method testWriteBlobWithRetries.
public void testWriteBlobWithRetries() throws Exception {
final int maxRetries = randomInt(5);
final CountDown countDown = new CountDown(maxRetries + 1);
final byte[] bytes = randomBlobContent();
httpServer.createContext("/bucket/write_blob_max_retries", exchange -> {
if ("PUT".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getQuery() == null) {
if (countDown.countDown()) {
final BytesReference body = Streams.readFully(exchange.getRequestBody());
if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) {
exchange.sendResponseHeaders(HttpStatus.SC_OK, -1);
} else {
exchange.sendResponseHeaders(HttpStatus.SC_BAD_REQUEST, -1);
}
exchange.close();
return;
}
if (randomBoolean()) {
if (randomBoolean()) {
Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]);
} else {
Streams.readFully(exchange.getRequestBody());
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
}
}
exchange.close();
}
});
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null);
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false);
}
assertThat(countDown.isCountedDown(), is(true));
}
use of org.opensearch.common.util.concurrent.CountDown in project OpenSearch by opensearch-project.
the class TransportFieldCapabilitiesAction method doExecute.
@Override
protected void doExecute(Task task, FieldCapabilitiesRequest request, final ActionListener<FieldCapabilitiesResponse> listener) {
// retrieve the initial timestamp in case the action is a cross cluster search
long nowInMillis = request.nowInMillis() == null ? System.currentTimeMillis() : request.nowInMillis();
final ClusterState clusterState = clusterService.state();
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(request.indicesOptions(), request.indices(), idx -> indexNameExpressionResolver.hasIndexAbstraction(idx, clusterState));
final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
final String[] concreteIndices;
if (localIndices == null) {
// in the case we have one or more remote indices but no local we don't expand to all local indices and just do remote indices
concreteIndices = Strings.EMPTY_ARRAY;
} else {
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
}
final int totalNumRequest = concreteIndices.length + remoteClusterIndices.size();
final CountDown completionCounter = new CountDown(totalNumRequest);
final List<FieldCapabilitiesIndexResponse> indexResponses = Collections.synchronizedList(new ArrayList<>());
final Runnable onResponse = () -> {
if (completionCounter.countDown()) {
if (request.isMergeResults()) {
listener.onResponse(merge(indexResponses, request.includeUnmapped()));
} else {
listener.onResponse(new FieldCapabilitiesResponse(indexResponses));
}
}
};
if (totalNumRequest == 0) {
listener.onResponse(new FieldCapabilitiesResponse(new String[0], Collections.emptyMap()));
} else {
ActionListener<FieldCapabilitiesIndexResponse> innerListener = new ActionListener<FieldCapabilitiesIndexResponse>() {
@Override
public void onResponse(FieldCapabilitiesIndexResponse result) {
if (result.canMatch()) {
indexResponses.add(result);
}
onResponse.run();
}
@Override
public void onFailure(Exception e) {
// TODO we should somehow inform the user that we failed
onResponse.run();
}
};
for (String index : concreteIndices) {
shardAction.execute(new FieldCapabilitiesIndexRequest(request.fields(), index, localIndices, request.indexFilter(), nowInMillis), innerListener);
}
// send us back all individual index results.
for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) {
String clusterAlias = remoteIndices.getKey();
OriginalIndices originalIndices = remoteIndices.getValue();
Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias);
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
// we need to merge on this node
remoteRequest.setMergeResults(false);
remoteRequest.indicesOptions(originalIndices.indicesOptions());
remoteRequest.indices(originalIndices.indices());
remoteRequest.fields(request.fields());
remoteRequest.indexFilter(request.indexFilter());
remoteRequest.nowInMillis(nowInMillis);
remoteClusterClient.fieldCaps(remoteRequest, ActionListener.wrap(response -> {
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.buildRemoteIndexName(clusterAlias, res.getIndexName()), res.get(), res.canMatch()));
}
onResponse.run();
}, failure -> onResponse.run()));
}
}
}
use of org.opensearch.common.util.concurrent.CountDown in project OpenSearch by opensearch-project.
the class GoogleCloudStorageBlobContainerRetriesTests method testWriteBlobWithRetries.
public void testWriteBlobWithRetries() throws Exception {
final int maxRetries = randomIntBetween(2, 10);
final CountDown countDown = new CountDown(maxRetries);
final byte[] bytes = randomBlobContent();
httpServer.createContext("/upload/storage/v1/b/bucket/o", safeHandler(exchange -> {
assertThat(exchange.getRequestURI().getQuery(), containsString("uploadType=multipart"));
if (countDown.countDown()) {
Optional<Tuple<String, BytesReference>> content = parseMultipartRequestBody(exchange.getRequestBody());
assertThat(content.isPresent(), is(true));
assertThat(content.get().v1(), equalTo("write_blob_max_retries"));
if (Objects.deepEquals(bytes, BytesReference.toBytes(content.get().v2()))) {
byte[] response = ("{\"bucket\":\"bucket\",\"name\":\"" + content.get().v1() + "\"}").getBytes(UTF_8);
exchange.getResponseHeaders().add("Content-Type", "application/json");
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length);
exchange.getResponseBody().write(response);
} else {
exchange.sendResponseHeaders(HttpStatus.SC_BAD_REQUEST, -1);
}
return;
}
if (randomBoolean()) {
if (randomBoolean()) {
Streams.readFully(exchange.getRequestBody(), new byte[randomIntBetween(1, Math.max(1, bytes.length - 1))]);
} else {
Streams.readFully(exchange.getRequestBody());
exchange.sendResponseHeaders(HttpStatus.SC_INTERNAL_SERVER_ERROR, -1);
}
}
}));
final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null);
try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) {
blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false);
}
assertThat(countDown.isCountedDown(), is(true));
}
Aggregations