use of org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class ScaledFloatFieldMapper method parseCreateField.
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
final boolean includeInAll = context.includeInAll(this.includeInAll, this);
XContentParser parser = context.parser();
Object value;
Number numericValue = null;
if (context.externalValueSet()) {
value = context.externalValue();
} else if (parser.currentToken() == Token.VALUE_NULL) {
value = null;
} else if (coerce.value() && parser.currentToken() == Token.VALUE_STRING && parser.textLength() == 0) {
value = null;
} else {
try {
numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(parser, coerce.value());
} catch (IllegalArgumentException e) {
if (ignoreMalformed.value()) {
return;
} else {
throw e;
}
}
if (includeInAll) {
// preserve formatting
value = parser.textOrNull();
} else {
value = numericValue;
}
}
if (value == null) {
value = fieldType().nullValue();
}
if (value == null) {
return;
}
if (numericValue == null) {
numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(value, false);
}
if (includeInAll) {
context.allEntries().addText(fieldType().name(), value.toString(), fieldType().boost());
}
double doubleValue = numericValue.doubleValue();
if (Double.isFinite(doubleValue) == false) {
// since we encode to a long, we have no way to carry NaNs and infinities
throw new IllegalArgumentException("[scaled_float] only supports finite values, but got [" + doubleValue + "]");
}
long scaledValue = Math.round(doubleValue * fieldType().getScalingFactor());
boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
boolean docValued = fieldType().hasDocValues();
boolean stored = fieldType().stored();
fields.addAll(NumberFieldMapper.NumberType.LONG.createFields(fieldType().name(), scaledValue, indexed, docValued, stored));
}
use of org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class CompletionFieldMapper method parse.
/**
* Parses and indexes inputs
*
* Parsing:
* Acceptable format:
* "STRING" - interpreted as field value (input)
* "ARRAY" - each element can be one of "OBJECT" (see below)
* "OBJECT" - { "input": STRING|ARRAY, "weight": STRING|INT, "contexts": ARRAY|OBJECT }
*
* Indexing:
* if context mappings are defined, delegates to {@link ContextMappings#addField(ParseContext.Document, String, String, int, Map)}
* else adds inputs as a {@link org.apache.lucene.search.suggest.document.SuggestField}
*/
@Override
public Mapper parse(ParseContext context) throws IOException {
// parse
XContentParser parser = context.parser();
Token token = parser.currentToken();
Map<String, CompletionInputMetaData> inputMap = new HashMap<>(1);
if (token == Token.VALUE_NULL) {
throw new MapperParsingException("completion field [" + fieldType().name() + "] does not support null values");
} else if (token == Token.START_ARRAY) {
while ((token = parser.nextToken()) != Token.END_ARRAY) {
parse(context, token, parser, inputMap);
}
} else {
parse(context, token, parser, inputMap);
}
// index
for (Map.Entry<String, CompletionInputMetaData> completionInput : inputMap.entrySet()) {
String input = completionInput.getKey();
// truncate input
if (input.length() > maxInputLength) {
int len = Math.min(maxInputLength, input.length());
if (Character.isHighSurrogate(input.charAt(len - 1))) {
assert input.length() >= len + 1 && Character.isLowSurrogate(input.charAt(len));
len += 1;
}
input = input.substring(0, len);
}
CompletionInputMetaData metaData = completionInput.getValue();
if (fieldType().hasContextMappings()) {
fieldType().getContextMappings().addField(context.doc(), fieldType().name(), input, metaData.weight, metaData.contexts);
} else {
context.doc().add(new SuggestField(fieldType().name(), input, metaData.weight));
}
}
multiFields.parse(this, context);
return null;
}
use of org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class DocumentParser method parseDocument.
ParsedDocument parseDocument(SourceToParse source) throws MapperParsingException {
validateType(source);
final Mapping mapping = docMapper.mapping();
final ParseContext.InternalParseContext context;
try (XContentParser parser = XContentHelper.createParser(docMapperParser.getXContentRegistry(), source.source())) {
context = new ParseContext.InternalParseContext(indexSettings.getSettings(), docMapperParser, docMapper, source, parser);
validateStart(parser);
internalParseDocument(mapping, context, parser);
validateEnd(parser);
} catch (Exception e) {
throw wrapInMapperParsingException(source, e);
}
String remainingPath = context.path().pathAsText("");
if (remainingPath.isEmpty() == false) {
throw new IllegalStateException("found leftover path elements: " + remainingPath);
}
reverseOrder(context);
ParsedDocument doc = parsedDocument(source, context, createDynamicUpdate(mapping, docMapper, context.getDynamicMappers()));
return doc;
}
use of org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class DocumentParser method parseObjectOrNested.
static void parseObjectOrNested(ParseContext context, ObjectMapper mapper, boolean atRoot) throws IOException {
if (mapper.isEnabled() == false) {
context.parser().skipChildren();
return;
}
XContentParser parser = context.parser();
XContentParser.Token token = parser.currentToken();
if (token == XContentParser.Token.VALUE_NULL) {
// the object is null ("obj1" : null), simply bail
return;
}
String currentFieldName = parser.currentName();
if (token.isValue()) {
throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value");
}
ObjectMapper.Nested nested = mapper.nested();
if (nested.isNested()) {
context = nestedContext(context, mapper);
}
// update the default value of include_in_all if necessary
Boolean includeInAll = mapper.includeInAll();
if (includeInAll != null) {
context = context.setIncludeInAllDefault(includeInAll);
}
// if we are at the end of the previous object, advance
if (token == XContentParser.Token.END_OBJECT) {
token = parser.nextToken();
}
if (token == XContentParser.Token.START_OBJECT) {
// if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first
token = parser.nextToken();
}
innerParseObject(context, mapper, parser, currentFieldName, token);
// restore the enable path flag
if (nested.isNested()) {
nested(context, nested);
}
}
use of org.elasticsearch.common.xcontent.XContentParser in project elasticsearch by elastic.
the class BlobStoreRepository method getRepositoryData.
@Override
public RepositoryData getRepositoryData() {
try {
final long indexGen = latestIndexBlobId();
final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen);
RepositoryData repositoryData;
try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(blob, out);
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen);
} catch (NotXContentException e) {
logger.warn("[{}] index blob is not valid x-content [{} bytes]", snapshotsIndexBlobName, out.bytes().length());
throw e;
}
}
// now load the incompatible snapshot ids, if they exist
try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(blob, out);
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
repositoryData = repositoryData.incompatibleSnapshotsFromXContent(parser);
}
} catch (NoSuchFileException e) {
logger.debug("[{}] Incompatible snapshots blob [{}] does not exist, the likely reason is that " + "there are no incompatible snapshots in the repository", metadata.name(), INCOMPATIBLE_SNAPSHOTS_BLOB);
}
return repositoryData;
} catch (NoSuchFileException ex) {
// repository doesn't have an index blob, its a new blank repo
return RepositoryData.EMPTY;
} catch (IOException ioe) {
throw new RepositoryException(metadata.name(), "could not read repository data from index blob", ioe);
}
}
Aggregations