use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class BulkRequest method add.
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex, XContentType xContentType) throws IOException {
XContent xContent = xContentType.xContent();
int line = 0;
int from = 0;
int length = data.length();
byte marker = xContent.streamSeparator();
while (true) {
int nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
line++;
// EMPTY is safe here because we never call namedObject
try (XContentParser parser = xContent.createParser(NamedXContentRegistry.EMPTY, data.slice(from, nextMarker - from))) {
// move pointers
from = nextMarker + 1;
// Move to START_OBJECT
XContentParser.Token token = parser.nextToken();
if (token == null) {
continue;
}
assert token == XContentParser.Token.START_OBJECT;
// Move to FIELD_NAME, that's the action
token = parser.nextToken();
assert token == XContentParser.Token.FIELD_NAME;
String action = parser.currentName();
String index = defaultIndex;
String type = defaultType;
String id = null;
String routing = defaultRouting;
String parent = null;
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
String[] fields = defaultFields;
String opType = null;
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
int retryOnConflict = 0;
String pipeline = defaultPipeline;
// at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id)
// or START_OBJECT which will have another set of parameters
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("_index".equals(currentFieldName)) {
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in bulk is not allowed");
}
index = parser.text();
} else if ("_type".equals(currentFieldName)) {
type = parser.text();
} else if ("_id".equals(currentFieldName)) {
id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
parent = parser.text();
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
opType = parser.text();
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
versionType = VersionType.fromString(parser.text());
} else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
retryOnConflict = parser.intValue();
} else if ("pipeline".equals(currentFieldName)) {
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
} else if ("_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
}
} else if (token != XContentParser.Token.END_OBJECT) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]");
}
if ("delete".equals(action)) {
add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType), payload);
} else {
nextMarker = findNextMarker(marker, from, data, length);
if (nextMarker == -1) {
break;
}
line++;
// of index request.
if ("index".equals(action)) {
if (opType == null) {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
} else {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).create("create".equals(opType)).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
}
} else if ("create".equals(action)) {
internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType).create(true).setPipeline(pipeline).source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload);
} else if ("update".equals(action)) {
UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict).version(version).versionType(versionType).routing(routing).parent(parent);
// EMPTY is safe here because we never call namedObject
try (XContentParser sliceParser = xContent.createParser(NamedXContentRegistry.EMPTY, sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType))) {
updateRequest.fromXContent(sliceParser);
}
if (fetchSourceContext != null) {
updateRequest.fetchSource(fetchSourceContext);
}
if (fields != null) {
updateRequest.fields(fields);
}
IndexRequest upsertRequest = updateRequest.upsertRequest();
if (upsertRequest != null) {
upsertRequest.version(version);
upsertRequest.versionType(versionType);
}
IndexRequest doc = updateRequest.doc();
if (doc != null) {
doc.version(version);
doc.versionType(versionType);
}
internalAdd(updateRequest, payload);
}
// move pointers
from = nextMarker + 1;
}
}
}
return this;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class RepositoriesMetaData method fromXContent.
public static RepositoriesMetaData fromXContent(XContentParser parser) throws IOException {
XContentParser.Token token;
List<RepositoryMetaData> repository = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String name = parser.currentName();
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse repository [{}], expected object", name);
}
String type = null;
Settings settings = Settings.EMPTY;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
String currentFieldName = parser.currentName();
if ("type".equals(currentFieldName)) {
if (parser.nextToken() != XContentParser.Token.VALUE_STRING) {
throw new ElasticsearchParseException("failed to parse repository [{}], unknown type", name);
}
type = parser.text();
} else if ("settings".equals(currentFieldName)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse repository [{}], incompatible params", name);
}
settings = Settings.builder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build();
} else {
throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", name, currentFieldName);
}
} else {
throw new ElasticsearchParseException("failed to parse repository [{}]", name);
}
}
if (type == null) {
throw new ElasticsearchParseException("failed to parse repository [{}], missing repository type", name);
}
repository.add(new RepositoryMetaData(name, type, settings));
} else {
throw new ElasticsearchParseException("failed to parse repositories");
}
}
return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()]));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class MetaDataStateFormat method read.
/**
* Reads the state from a given file and compares the expected version against the actual version of
* the state.
*/
public final T read(NamedXContentRegistry namedXContentRegistry, Path file) throws IOException {
try (Directory dir = newDirectory(file.getParent())) {
try (IndexInput indexInput = dir.openInput(file.getFileName().toString(), IOContext.DEFAULT)) {
// We checksum the entire file before we even go and parse it. If it's corrupted we barf right here.
CodecUtil.checksumEntireFile(indexInput);
final int fileVersion = CodecUtil.checkHeader(indexInput, STATE_FILE_CODEC, MIN_COMPATIBLE_STATE_FILE_VERSION, STATE_FILE_VERSION);
final XContentType xContentType = XContentType.values()[indexInput.readInt()];
if (fileVersion == STATE_FILE_VERSION_ES_2X_AND_BELOW) {
// format version 0, wrote a version that always came from the content state file and was never used
// version currently unused
indexInput.readLong();
}
long filePointer = indexInput.getFilePointer();
long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer;
try (IndexInput slice = indexInput.slice("state_xcontent", filePointer, contentSize)) {
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(namedXContentRegistry, new InputStreamIndexInput(slice, contentSize))) {
return fromXContent(parser);
}
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
// we trick this into a dedicated exception with the original stacktrace
throw new CorruptStateException(ex);
}
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class IndicesService method buildAliasFilter.
public AliasFilter buildAliasFilter(ClusterState state, String index, String... expressions) {
/* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch
* of dependencies we pass in a function that can perform the parsing. */
CheckedFunction<byte[], QueryBuilder, IOException> filterParser = bytes -> {
try (XContentParser parser = XContentFactory.xContent(bytes).createParser(xContentRegistry, bytes)) {
return new QueryParseContext(parser).parseInnerQueryBuilder();
}
};
String[] aliases = indexNameExpressionResolver.filteringAliases(state, index, expressions);
IndexMetaData indexMetaData = state.metaData().index(index);
return new AliasFilter(ShardSearchRequest.parseAliasFilter(filterParser, indexMetaData, aliases), aliases);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class SearchModule method registerScoreFunction.
private void registerScoreFunction(ScoreFunctionSpec<?> scoreFunction) {
namedWriteables.add(new NamedWriteableRegistry.Entry(ScoreFunctionBuilder.class, scoreFunction.getName().getPreferredName(), scoreFunction.getReader()));
// TODO remove funky contexts
namedXContents.add(new NamedXContentRegistry.Entry(ScoreFunctionBuilder.class, scoreFunction.getName(), (XContentParser p, Object c) -> scoreFunction.getParser().fromXContent((QueryParseContext) c)));
}
Aggregations