use of org.opensearch.action.DocWriteRequest in project OpenSearch by opensearch-project.
the class RequestConverters method bulk.
static Request bulk(BulkRequest bulkRequest) throws IOException {
Request request = new Request(HttpPost.METHOD_NAME, "/_bulk");
Params parameters = new Params();
parameters.withTimeout(bulkRequest.timeout());
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
parameters.withPipeline(bulkRequest.pipeline());
parameters.withRouting(bulkRequest.routing());
// Bulk API only supports newline delimited JSON or Smile. Before executing
// the bulk, we need to check that all requests have the same content-type
// and this content-type is supported by the Bulk API.
XContentType bulkContentType = null;
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> action = bulkRequest.requests().get(i);
DocWriteRequest.OpType opType = action.opType();
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) action;
if (updateRequest.doc() != null) {
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
}
if (updateRequest.upsertRequest() != null) {
bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType);
}
}
}
if (bulkContentType == null) {
bulkContentType = XContentType.JSON;
}
final byte separator = bulkContentType.xContent().streamSeparator();
final ContentType requestContentType = createContentType(bulkContentType);
ByteArrayOutputStream content = new ByteArrayOutputStream();
for (DocWriteRequest<?> action : bulkRequest.requests()) {
DocWriteRequest.OpType opType = action.opType();
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
metadata.startObject();
{
metadata.startObject(opType.getLowercase());
if (Strings.hasLength(action.index())) {
metadata.field("_index", action.index());
}
if (Strings.hasLength(action.id())) {
metadata.field("_id", action.id());
}
if (Strings.hasLength(action.routing())) {
metadata.field("routing", action.routing());
}
if (action.version() != Versions.MATCH_ANY) {
metadata.field("version", action.version());
}
VersionType versionType = action.versionType();
if (versionType != VersionType.INTERNAL) {
if (versionType == VersionType.EXTERNAL) {
metadata.field("version_type", "external");
} else if (versionType == VersionType.EXTERNAL_GTE) {
metadata.field("version_type", "external_gte");
}
}
if (action.ifSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) {
metadata.field("if_seq_no", action.ifSeqNo());
metadata.field("if_primary_term", action.ifPrimaryTerm());
}
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) action;
if (Strings.hasLength(indexRequest.getPipeline())) {
metadata.field("pipeline", indexRequest.getPipeline());
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) action;
if (updateRequest.retryOnConflict() > 0) {
metadata.field("retry_on_conflict", updateRequest.retryOnConflict());
}
if (updateRequest.fetchSource() != null) {
metadata.field("_source", updateRequest.fetchSource());
}
}
metadata.endObject();
}
metadata.endObject();
BytesRef metadataSource = BytesReference.bytes(metadata).toBytesRef();
content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length);
content.write(separator);
}
BytesRef source = null;
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) action;
BytesReference indexSource = indexRequest.source();
XContentType indexXContentType = indexRequest.getContentType();
try (XContentParser parser = XContentHelper.createParser(/*
* EMPTY and THROW are fine here because we just call
* copyCurrentStructure which doesn't touch the
* registry or deprecation.
*/
NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, indexSource, indexXContentType)) {
try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) {
builder.copyCurrentStructure(parser);
source = BytesReference.bytes(builder).toBytesRef();
}
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef();
}
if (source != null) {
content.write(source.bytes, source.offset, source.length);
content.write(separator);
}
}
request.addParameters(parameters.asMap());
request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType));
return request;
}
use of org.opensearch.action.DocWriteRequest in project OpenSearch by opensearch-project.
the class RequestConvertersTests method testBulk.
public void testBulk() throws IOException {
Map<String, String> expectedParams = new HashMap<>();
BulkRequest bulkRequest = new BulkRequest();
if (randomBoolean()) {
String timeout = randomTimeValue();
bulkRequest.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep());
}
setRandomRefreshPolicy(bulkRequest::setRefreshPolicy, expectedParams);
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
int nbItems = randomIntBetween(10, 100);
DocWriteRequest<?>[] requests = new DocWriteRequest<?>[nbItems];
for (int i = 0; i < nbItems; i++) {
String index = randomAlphaOfLength(5);
String id = randomAlphaOfLength(5);
BytesReference source = RandomObjects.randomSource(random(), xContentType);
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
DocWriteRequest<?> docWriteRequest;
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = new IndexRequest(index).id(id).source(source, xContentType);
docWriteRequest = indexRequest;
if (randomBoolean()) {
indexRequest.setPipeline(randomAlphaOfLength(5));
}
} else if (opType == DocWriteRequest.OpType.CREATE) {
IndexRequest createRequest = new IndexRequest(index).id(id).source(source, xContentType).create(true);
docWriteRequest = createRequest;
} else if (opType == DocWriteRequest.OpType.UPDATE) {
final UpdateRequest updateRequest = new UpdateRequest(index, id).doc(new IndexRequest().source(source, xContentType));
docWriteRequest = updateRequest;
if (randomBoolean()) {
updateRequest.retryOnConflict(randomIntBetween(1, 5));
}
if (randomBoolean()) {
randomizeFetchSourceContextParams(updateRequest::fetchSource, new HashMap<>());
}
} else if (opType == DocWriteRequest.OpType.DELETE) {
docWriteRequest = new DeleteRequest(index, id);
} else {
throw new UnsupportedOperationException("optype [" + opType + "] not supported");
}
if (randomBoolean()) {
docWriteRequest.routing(randomAlphaOfLength(10));
}
if (opType != DocWriteRequest.OpType.UPDATE && randomBoolean()) {
docWriteRequest.setIfSeqNo(randomNonNegativeLong());
docWriteRequest.setIfPrimaryTerm(randomLongBetween(1, 200));
}
requests[i] = docWriteRequest;
}
bulkRequest.add(requests);
Request request = RequestConverters.bulk(bulkRequest);
assertEquals("/_bulk", request.getEndpoint());
assertEquals(expectedParams, request.getParameters());
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
byte[] content = new byte[(int) request.getEntity().getContentLength()];
try (InputStream inputStream = request.getEntity().getContent()) {
Streams.readFully(inputStream, content);
}
BulkRequest parsedBulkRequest = new BulkRequest();
parsedBulkRequest.add(content, 0, content.length, xContentType);
assertEquals(bulkRequest.numberOfActions(), parsedBulkRequest.numberOfActions());
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> originalRequest = bulkRequest.requests().get(i);
DocWriteRequest<?> parsedRequest = parsedBulkRequest.requests().get(i);
assertEquals(originalRequest.opType(), parsedRequest.opType());
assertEquals(originalRequest.index(), parsedRequest.index());
assertEquals(originalRequest.id(), parsedRequest.id());
assertEquals(originalRequest.routing(), parsedRequest.routing());
assertEquals(originalRequest.version(), parsedRequest.version());
assertEquals(originalRequest.versionType(), parsedRequest.versionType());
assertEquals(originalRequest.ifSeqNo(), parsedRequest.ifSeqNo());
assertEquals(originalRequest.ifPrimaryTerm(), parsedRequest.ifPrimaryTerm());
DocWriteRequest.OpType opType = originalRequest.opType();
if (opType == DocWriteRequest.OpType.INDEX) {
IndexRequest indexRequest = (IndexRequest) originalRequest;
IndexRequest parsedIndexRequest = (IndexRequest) parsedRequest;
assertEquals(indexRequest.getPipeline(), parsedIndexRequest.getPipeline());
assertToXContentEquivalent(indexRequest.source(), parsedIndexRequest.source(), xContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) originalRequest;
UpdateRequest parsedUpdateRequest = (UpdateRequest) parsedRequest;
assertEquals(updateRequest.retryOnConflict(), parsedUpdateRequest.retryOnConflict());
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
if (updateRequest.doc() != null) {
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.doc());
}
}
}
}
use of org.opensearch.action.DocWriteRequest in project OpenSearch by opensearch-project.
the class TransportBulkAction method prohibitAppendWritesInBackingIndices.
static void prohibitAppendWritesInBackingIndices(DocWriteRequest<?> writeRequest, Metadata metadata) {
IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index());
if (indexAbstraction == null) {
return;
}
if (indexAbstraction.getType() != IndexAbstraction.Type.CONCRETE_INDEX) {
return;
}
if (indexAbstraction.getParentDataStream() == null) {
return;
}
DataStream dataStream = indexAbstraction.getParentDataStream().getDataStream();
// At this point with write op is targeting a backing index of a data stream directly,
// so checking if write op is append-only and if so fail.
// (Updates and deletes are allowed to target a backing index)
DocWriteRequest.OpType opType = writeRequest.opType();
// that an engine level change is needed and for now this check is sufficient.)
if (opType == DocWriteRequest.OpType.CREATE) {
throw new IllegalArgumentException("index request with op_type=create targeting backing indices is disallowed, " + "target corresponding data stream [" + dataStream.getName() + "] instead");
}
if (opType == DocWriteRequest.OpType.INDEX && writeRequest.ifPrimaryTerm() == UNASSIGNED_PRIMARY_TERM && writeRequest.ifSeqNo() == UNASSIGNED_SEQ_NO) {
throw new IllegalArgumentException("index request with op_type=index and no if_primary_term and if_seq_no set " + "targeting backing indices is disallowed, target corresponding data stream [" + dataStream.getName() + "] instead");
}
}
use of org.opensearch.action.DocWriteRequest in project OpenSearch by opensearch-project.
the class TransportBulkAction method doInternalExecute.
protected void doInternalExecute(Task task, BulkRequest bulkRequest, String executorName, ActionListener<BulkResponse> listener) {
final long startTime = relativeTime();
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
boolean hasIndexRequestsWithPipelines = false;
final Metadata metadata = clusterService.state().getMetadata();
final Version minNodeVersion = clusterService.state().getNodes().getMinNodeVersion();
for (DocWriteRequest<?> actionRequest : bulkRequest.requests) {
IndexRequest indexRequest = getIndexWriteRequest(actionRequest);
if (indexRequest != null) {
// Each index request needs to be evaluated, because this method also modifies the IndexRequest
boolean indexRequestHasPipeline = IngestService.resolvePipelines(actionRequest, indexRequest, metadata);
hasIndexRequestsWithPipelines |= indexRequestHasPipeline;
}
if (actionRequest instanceof IndexRequest) {
IndexRequest ir = (IndexRequest) actionRequest;
ir.checkAutoIdWithOpTypeCreateSupportedByVersion(minNodeVersion);
if (ir.getAutoGeneratedTimestamp() != IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP) {
throw new IllegalArgumentException("autoGeneratedTimestamp should not be set externally");
}
}
}
if (hasIndexRequestsWithPipelines) {
// this path is never taken.
try {
if (Assertions.ENABLED) {
final boolean arePipelinesResolved = bulkRequest.requests().stream().map(TransportBulkAction::getIndexWriteRequest).filter(Objects::nonNull).allMatch(IndexRequest::isPipelineResolved);
assert arePipelinesResolved : bulkRequest;
}
if (clusterService.localNode().isIngestNode()) {
processBulkIndexIngestRequest(task, bulkRequest, executorName, listener);
} else {
ingestForwarder.forwardIngestRequest(BulkAction.INSTANCE, bulkRequest, listener);
}
} catch (Exception e) {
listener.onFailure(e);
}
return;
}
final boolean includesSystem = includesSystem(bulkRequest, clusterService.state().metadata().getIndicesLookup(), systemIndices);
if (includesSystem || needToCheck()) {
// Attempt to create all the indices that we're going to need during the bulk before we start.
// Step 1: collect all the indices in the request
final Map<String, Boolean> indices = bulkRequest.requests.stream().filter(request -> request.opType() != DocWriteRequest.OpType.DELETE || request.versionType() == VersionType.EXTERNAL || request.versionType() == VersionType.EXTERNAL_GTE).collect(Collectors.toMap(DocWriteRequest::index, DocWriteRequest::isRequireAlias, (v1, v2) -> v1 || v2));
/* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
* that we'll use when we try to run the requests. */
final Map<String, IndexNotFoundException> indicesThatCannotBeCreated = new HashMap<>();
Set<String> autoCreateIndices = new HashSet<>();
ClusterState state = clusterService.state();
for (Map.Entry<String, Boolean> indexAndFlag : indices.entrySet()) {
boolean shouldAutoCreate;
final String index = indexAndFlag.getKey();
try {
shouldAutoCreate = shouldAutoCreate(index, state);
} catch (IndexNotFoundException e) {
shouldAutoCreate = false;
indicesThatCannotBeCreated.put(index, e);
}
// We should only auto create if we are not requiring it to be an alias
if (shouldAutoCreate && (indexAndFlag.getValue() == false)) {
autoCreateIndices.add(index);
}
}
// Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back.
if (autoCreateIndices.isEmpty()) {
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
} else {
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
for (String index : autoCreateIndices) {
createIndex(index, bulkRequest.timeout(), minNodeVersion, new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse result) {
if (counter.decrementAndGet() == 0) {
threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(listener) {
@Override
protected void doRun() {
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
}
});
}
}
@Override
public void onFailure(Exception e) {
if (!(ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException)) {
// fail all requests involving this index, if create didn't work
for (int i = 0; i < bulkRequest.requests.size(); i++) {
DocWriteRequest<?> request = bulkRequest.requests.get(i);
if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
bulkRequest.requests.set(i, null);
}
}
}
if (counter.decrementAndGet() == 0) {
final ActionListener<BulkResponse> wrappedListener = ActionListener.wrap(listener::onResponse, inner -> {
inner.addSuppressed(e);
listener.onFailure(inner);
});
threadPool.executor(executorName).execute(new ActionRunnable<BulkResponse>(wrappedListener) {
@Override
protected void doRun() {
executeBulk(task, bulkRequest, startTime, wrappedListener, responses, indicesThatCannotBeCreated);
}
@Override
public void onRejection(Exception rejectedException) {
rejectedException.addSuppressed(e);
super.onRejection(rejectedException);
}
});
}
}
});
}
}
} else {
executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap());
}
}
use of org.opensearch.action.DocWriteRequest in project OpenSearch by opensearch-project.
the class TransportShardBulkAction method performOnPrimary.
public static void performOnPrimary(BulkShardRequest request, IndexShard primary, UpdateHelper updateHelper, LongSupplier nowInMillisSupplier, MappingUpdatePerformer mappingUpdater, Consumer<ActionListener<Void>> waitForMappingUpdate, ActionListener<PrimaryResult<BulkShardRequest, BulkShardResponse>> listener, ThreadPool threadPool, String executorName) {
new ActionRunnable<PrimaryResult<BulkShardRequest, BulkShardResponse>>(listener) {
private final Executor executor = threadPool.executor(executorName);
private final BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(request, primary);
@Override
protected void doRun() throws Exception {
while (context.hasMoreOperationsToExecute()) {
if (executeBulkItemRequest(context, updateHelper, nowInMillisSupplier, mappingUpdater, waitForMappingUpdate, ActionListener.wrap(v -> executor.execute(this), this::onRejection)) == false) {
// so we just break out here.
return;
}
// either completed and moved to next or reset
assert context.isInitial();
}
// We're done, there's no more operations to execute so we resolve the wrapped listener
finishRequest();
}
@Override
public void onRejection(Exception e) {
// We must finish the outstanding request. Finishing the outstanding request can include
// refreshing and fsyncing. Therefore, we must force execution on the WRITE thread.
executor.execute(new ActionRunnable<PrimaryResult<BulkShardRequest, BulkShardResponse>>(listener) {
@Override
protected void doRun() {
// Fail all operations after a bulk rejection hit an action that waited for a mapping update and finish the request
while (context.hasMoreOperationsToExecute()) {
context.setRequestToExecute(context.getCurrent());
final DocWriteRequest<?> docWriteRequest = context.getRequestToExecute();
onComplete(exceptionToResult(e, primary, docWriteRequest.opType() == DocWriteRequest.OpType.DELETE, docWriteRequest.version()), context, null);
}
finishRequest();
}
@Override
public boolean isForceExecution() {
return true;
}
});
}
private void finishRequest() {
ActionListener.completeWith(listener, () -> new WritePrimaryResult<>(context.getBulkShardRequest(), context.buildShardResponse(), context.getLocationToSync(), null, context.getPrimary(), logger));
}
}.run();
}
Aggregations