use of org.elasticsearch.index.VersionType in project elasticsearch by elastic.
the class Request method bulk.
static Request bulk(BulkRequest bulkRequest) throws IOException {
Params parameters = Params.builder();
parameters.withTimeout(bulkRequest.timeout());
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
// Bulk API only supports newline delimited JSON or Smile. Before executing
// the bulk, we need to check that all requests have the same content-type
// and this content-type is supported by the Bulk API.
XContentType bulkContentType = null;
for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> request = bulkRequest.requests().get(i);
DocWriteRequest.OpType opType = request.opType();
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (updateRequest.doc() != null) {
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
}
if (updateRequest.upsertRequest() != null) {
bulkContentType = enforceSameContentType(updateRequest.upsertRequest(), bulkContentType);
}
}
}
if (bulkContentType == null) {
bulkContentType = XContentType.JSON;
}
byte separator = bulkContentType.xContent().streamSeparator();
ContentType requestContentType = ContentType.create(bulkContentType.mediaType());
ByteArrayOutputStream content = new ByteArrayOutputStream();
for (DocWriteRequest<?> request : bulkRequest.requests()) {
DocWriteRequest.OpType opType = request.opType();
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
metadata.startObject();
{
metadata.startObject(opType.getLowercase());
if (Strings.hasLength(request.index())) {
metadata.field("_index", request.index());
}
if (Strings.hasLength(request.type())) {
metadata.field("_type", request.type());
}
if (Strings.hasLength(request.id())) {
metadata.field("_id", request.id());
}
if (Strings.hasLength(request.routing())) {
metadata.field("_routing", request.routing());
}
if (Strings.hasLength(request.parent())) {
metadata.field("_parent", request.parent());
}
if (request.version() != Versions.MATCH_ANY) {
metadata.field("_version", request.version());
}
VersionType versionType = request.versionType();
if (versionType != VersionType.INTERNAL) {
if (versionType == VersionType.EXTERNAL) {
metadata.field("_version_type", "external");
} else if (versionType == VersionType.EXTERNAL_GTE) {
metadata.field("_version_type", "external_gte");
} else if (versionType == VersionType.FORCE) {
metadata.field("_version_type", "force");
}
}
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request;
if (Strings.hasLength(indexRequest.getPipeline())) {
metadata.field("pipeline", indexRequest.getPipeline());
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (updateRequest.retryOnConflict() > 0) {
metadata.field("_retry_on_conflict", updateRequest.retryOnConflict());
}
if (updateRequest.fetchSource() != null) {
metadata.field("_source", updateRequest.fetchSource());
}
}
metadata.endObject();
}
metadata.endObject();
BytesRef metadataSource = metadata.bytes().toBytesRef();
content.write(metadataSource.bytes, metadataSource.offset, metadataSource.length);
content.write(separator);
}
BytesRef source = null;
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request;
BytesReference indexSource = indexRequest.source();
XContentType indexXContentType = indexRequest.getContentType();
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, indexSource, indexXContentType)) {
try (XContentBuilder builder = XContentBuilder.builder(bulkContentType.xContent())) {
builder.copyCurrentStructure(parser);
source = builder.bytes().toBytesRef();
}
}
} else if (opType == DocWriteRequest.OpType.UPDATE) {
source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef();
}
if (source != null) {
content.write(source.bytes, source.offset, source.length);
content.write(separator);
}
}
HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType);
return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity);
}
use of org.elasticsearch.index.VersionType in project elasticsearch by elastic.
the class BulkRequest method add.
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException {
XContent xContent = xContentType.xContent();
int line = 0;
int from = 0;
int length = data.length();
byte marker = xContent.streamSeparator();
while (true) {
int nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
line++;
// EMPTY is safe here because we never call namedObject
try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) {
// move pointers
from = nextMarker + 1;
// Move to START_OBJECT
XContentParser.Token token = parser.nextToken();
if (token == null) {
continue;
}
assert token == XContentParser.Token.START_OBJECT;
// Move to FIELD_NAME, that's the action
token = parser.nextToken();
assert token == XContentParser.Token.FIELD_NAME;
String action = parser.currentName();
String index = defaultIndex;
String type = defaultType;
String id = null;
String routing = defaultRouting;
String parent = null;
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
String[] fields = defaultFields;
String opType = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
int retryOnConflict = 0;
String pipeline = defaultPipeline;
// at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
// or START_OBJECT which will have another set of parameters
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("_index".equals(currentFieldName)) {
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in bulk is not allowed");
}
index = parser.text();
} else if ("_type".equals(currentFieldName)) {
type = parser.text();
} else if ("_id".equals(currentFieldName)) {
id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
opType = parser.text();
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
versionType = VersionType.fromString(parser.text());
} else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
retryOnConflict = parser.intValue();
} else if ("pipeline".equals(currentFieldName)) {
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
}
} else if (token != XContentParser.Token.END_OBJECT) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]");
}
if ("delete".equals(action)) {
add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType), payload);
} else {
nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
line++;
// of index request.
if ("index".equals(action)) {
if (opType == null) {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
} else {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).create("create".equals(opType)).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
}
} else if ("create".equals(action)) {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).create(true).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
} else if ("update".equals(action)) {
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict).version(version).versionType(versionType).routing(routing).parent(parent);
// EMPTY is safe here because we never call namedObject
try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType))) {
updateRequest.fromXContent(sliceParser);
}
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
if (fields != null) {
updateRequest.fields(fields);
}
IndexRequest upsertRequest = updateRequest.upsertRequest();
if (upsertRequest != null) {
upsertRequest.version(version);
upsertRequest.versionType(versionType);
}
IndexRequest doc = updateRequest.doc();
if (doc != null) {
doc.version(version);
doc.versionType(versionType);
}
internalAdd(updateRequest, payload);
}
// move pointers
from = nextMarker + 1;
}
}
}
return this;
}
use of org.elasticsearch.index.VersionType in project elasticsearch by elastic.
the class TransportShardBulkAction method executeDeleteRequestOnReplica.
private static Engine.DeleteResult executeDeleteRequestOnReplica(DocWriteResponse primaryResponse, DeleteRequest request, IndexShard replica) throws IOException {
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
final long version = primaryResponse.getVersion();
assert versionType.validateVersionForWrites(version);
final Engine.Delete delete = replica.prepareDeleteOnReplica(request.type(), request.id(), primaryResponse.getSeqNo(), request.primaryTerm(), version, versionType);
return replica.delete(delete);
}
use of org.elasticsearch.index.VersionType in project elasticsearch by elastic.
the class TransportShardBulkAction method executeIndexRequestOnReplica.
/**
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
final ShardId shardId = replica.shardId();
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent());
final Engine.Index operation;
final long version = primaryResponse.getVersion();
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
assert versionType.validateVersionForWrites(version);
final long seqNo = primaryResponse.getSeqNo();
try {
operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
} catch (MapperParsingException e) {
return new Engine.IndexResult(e, version, seqNo);
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
}
return replica.index(operation);
}
use of org.elasticsearch.index.VersionType in project elasticsearch by elastic.
the class RequestTests method testUpdate.
public void testUpdate() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
Map<String, String> expectedParams = new HashMap<>();
String index = randomAsciiOfLengthBetween(3, 10);
String type = randomAsciiOfLengthBetween(3, 10);
String id = randomAsciiOfLengthBetween(3, 10);
UpdateRequest updateRequest = new UpdateRequest(index, type, id);
updateRequest.detectNoop(randomBoolean());
if (randomBoolean()) {
BytesReference source = RandomObjects.randomSource(random(), xContentType);
updateRequest.doc(new IndexRequest().source(source, xContentType));
boolean docAsUpsert = randomBoolean();
updateRequest.docAsUpsert(docAsUpsert);
if (docAsUpsert) {
expectedParams.put("doc_as_upsert", "true");
}
} else {
updateRequest.script(new Script("_value + 1"));
updateRequest.scriptedUpsert(randomBoolean());
}
if (randomBoolean()) {
BytesReference source = RandomObjects.randomSource(random(), xContentType);
updateRequest.upsert(new IndexRequest().source(source, xContentType));
}
if (randomBoolean()) {
String routing = randomAsciiOfLengthBetween(3, 10);
updateRequest.routing(routing);
expectedParams.put("routing", routing);
}
if (randomBoolean()) {
String parent = randomAsciiOfLengthBetween(3, 10);
updateRequest.parent(parent);
expectedParams.put("parent", parent);
}
if (randomBoolean()) {
String timeout = randomTimeValue();
updateRequest.timeout(timeout);
expectedParams.put("timeout", timeout);
} else {
expectedParams.put("timeout", ReplicationRequest.DEFAULT_TIMEOUT.getStringRep());
}
if (randomBoolean()) {
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
updateRequest.setRefreshPolicy(refreshPolicy);
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
expectedParams.put("refresh", refreshPolicy.getValue());
}
}
if (randomBoolean()) {
int waitForActiveShards = randomIntBetween(0, 10);
updateRequest.waitForActiveShards(waitForActiveShards);
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
}
if (randomBoolean()) {
long version = randomLong();
updateRequest.version(version);
if (version != Versions.MATCH_ANY) {
expectedParams.put("version", Long.toString(version));
}
}
if (randomBoolean()) {
VersionType versionType = randomFrom(VersionType.values());
updateRequest.versionType(versionType);
if (versionType != VersionType.INTERNAL) {
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
}
}
if (randomBoolean()) {
int retryOnConflict = randomIntBetween(0, 5);
updateRequest.retryOnConflict(retryOnConflict);
if (retryOnConflict > 0) {
expectedParams.put("retry_on_conflict", String.valueOf(retryOnConflict));
}
}
if (randomBoolean()) {
randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams);
}
Request request = Request.update(updateRequest);
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.endpoint);
assertEquals(expectedParams, request.params);
assertEquals("POST", request.method);
HttpEntity entity = request.entity;
assertNotNull(entity);
assertTrue(entity instanceof ByteArrayEntity);
UpdateRequest parsedUpdateRequest = new UpdateRequest();
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(entity.getContentType().getValue());
try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) {
parsedUpdateRequest.fromXContent(parser);
}
assertEquals(updateRequest.scriptedUpsert(), parsedUpdateRequest.scriptedUpsert());
assertEquals(updateRequest.docAsUpsert(), parsedUpdateRequest.docAsUpsert());
assertEquals(updateRequest.detectNoop(), parsedUpdateRequest.detectNoop());
assertEquals(updateRequest.fetchSource(), parsedUpdateRequest.fetchSource());
assertEquals(updateRequest.script(), parsedUpdateRequest.script());
if (updateRequest.doc() != null) {
assertToXContentEquivalent(updateRequest.doc().source(), parsedUpdateRequest.doc().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.doc());
}
if (updateRequest.upsertRequest() != null) {
assertToXContentEquivalent(updateRequest.upsertRequest().source(), parsedUpdateRequest.upsertRequest().source(), xContentType);
} else {
assertNull(parsedUpdateRequest.upsertRequest());
}
}
Aggregations