use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class DateProcessorFactoryTests method testParseMatchFormatsFailure.
public void testParseMatchFormatsFailure() throws Exception {
DateProcessor.Factory factory = new DateProcessor.Factory();
Map<String, Object> config = new HashMap<>();
String sourceField = randomAsciiOfLengthBetween(1, 10);
config.put("field", sourceField);
config.put("formats", "dd/MM/yyyy");
try {
factory.create(null, null, config);
fail("processor creation should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("[formats] property isn't a list, but of type [java.lang.String]"));
}
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class DateProcessorFactoryTests method testMatchFieldIsMandatory.
public void testMatchFieldIsMandatory() throws Exception {
DateProcessor.Factory factory = new DateProcessor.Factory();
Map<String, Object> config = new HashMap<>();
String targetField = randomAsciiOfLengthBetween(1, 10);
config.put("target_field", targetField);
config.put("formats", Collections.singletonList("dd/MM/yyyyy"));
try {
factory.create(null, null, config);
fail("processor creation should have failed");
} catch (ElasticsearchParseException e) {
assertThat(e.getMessage(), containsString("[field] required property is missing"));
}
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class RestMultiSearchTemplateAction method parseRequest.
/**
* Parses a {@link RestRequest} body and returns a {@link MultiSearchTemplateRequest}
*/
public static MultiSearchTemplateRequest parseRequest(RestRequest restRequest, boolean allowExplicitIndex) throws IOException {
MultiSearchTemplateRequest multiRequest = new MultiSearchTemplateRequest();
RestMultiSearchAction.parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, bytes) -> {
try {
SearchTemplateRequest searchTemplateRequest = RestSearchTemplateAction.parse(bytes);
if (searchTemplateRequest.getScript() != null) {
searchTemplateRequest.setRequest(searchRequest);
multiRequest.add(searchTemplateRequest);
} else {
throw new IllegalArgumentException("Malformed search template");
}
} catch (IOException e) {
throw new ElasticsearchParseException("Exception when parsing search template request", e);
}
});
return multiRequest;
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class UserAgentParser method init.
private void init(InputStream regexStream) throws IOException {
// EMPTY is safe here because we don't use namedObject
XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML).createParser(NamedXContentRegistry.EMPTY, regexStream);
XContentParser.Token token = yamlParser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
token = yamlParser.nextToken();
for (; token != null; token = yamlParser.nextToken()) {
if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("user_agent_parsers")) {
List<Map<String, String>> parserConfigurations = readParserConfigurations(yamlParser);
for (Map<String, String> map : parserConfigurations) {
uaPatterns.add(new UserAgentSubpattern(compilePattern(map.get("regex"), map.get("regex_flag")), map.get("family_replacement"), map.get("v1_replacement"), map.get("v2_replacement"), map.get("v3_replacement"), map.get("v4_replacement")));
}
} else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("os_parsers")) {
List<Map<String, String>> parserConfigurations = readParserConfigurations(yamlParser);
for (Map<String, String> map : parserConfigurations) {
osPatterns.add(new UserAgentSubpattern(compilePattern(map.get("regex"), map.get("regex_flag")), map.get("os_replacement"), map.get("os_v1_replacement"), map.get("os_v2_replacement"), map.get("os_v3_replacement"), map.get("os_v4_replacement")));
}
} else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("device_parsers")) {
List<Map<String, String>> parserConfigurations = readParserConfigurations(yamlParser);
for (Map<String, String> map : parserConfigurations) {
devicePatterns.add(new UserAgentSubpattern(compilePattern(map.get("regex"), map.get("regex_flag")), map.get("device_replacement"), null, null, null, null));
}
}
}
}
if (uaPatterns.isEmpty() && osPatterns.isEmpty() && devicePatterns.isEmpty()) {
throw new ElasticsearchParseException("not a valid regular expression file");
}
}
use of org.elasticsearch.ElasticsearchParseException in project elasticsearch by elastic.
the class AttachmentProcessor method execute.
@Override
public void execute(IngestDocument ingestDocument) {
Map<String, Object> additionalFields = new HashMap<>();
byte[] input = ingestDocument.getFieldValueAsBytes(field, ignoreMissing);
if (input == null && ignoreMissing) {
return;
} else if (input == null) {
throw new IllegalArgumentException("field [" + field + "] is null, cannot parse.");
}
try {
Metadata metadata = new Metadata();
String parsedContent = TikaImpl.parse(input, metadata, indexedChars);
if (properties.contains(Property.CONTENT) && Strings.hasLength(parsedContent)) {
// somehow tika seems to append a newline at the end automatically, lets remove that again
additionalFields.put(Property.CONTENT.toLowerCase(), parsedContent.trim());
}
if (properties.contains(Property.LANGUAGE) && Strings.hasLength(parsedContent)) {
LanguageIdentifier identifier = new LanguageIdentifier(parsedContent);
String language = identifier.getLanguage();
additionalFields.put(Property.LANGUAGE.toLowerCase(), language);
}
if (properties.contains(Property.DATE)) {
String createdDate = metadata.get(TikaCoreProperties.CREATED);
if (createdDate != null) {
additionalFields.put(Property.DATE.toLowerCase(), createdDate);
}
}
if (properties.contains(Property.TITLE)) {
String title = metadata.get(TikaCoreProperties.TITLE);
if (Strings.hasLength(title)) {
additionalFields.put(Property.TITLE.toLowerCase(), title);
}
}
if (properties.contains(Property.AUTHOR)) {
String author = metadata.get("Author");
if (Strings.hasLength(author)) {
additionalFields.put(Property.AUTHOR.toLowerCase(), author);
}
}
if (properties.contains(Property.KEYWORDS)) {
String keywords = metadata.get("Keywords");
if (Strings.hasLength(keywords)) {
additionalFields.put(Property.KEYWORDS.toLowerCase(), keywords);
}
}
if (properties.contains(Property.CONTENT_TYPE)) {
String contentType = metadata.get(Metadata.CONTENT_TYPE);
if (Strings.hasLength(contentType)) {
additionalFields.put(Property.CONTENT_TYPE.toLowerCase(), contentType);
}
}
if (properties.contains(Property.CONTENT_LENGTH)) {
String contentLength = metadata.get(Metadata.CONTENT_LENGTH);
long length;
if (Strings.hasLength(contentLength)) {
length = Long.parseLong(contentLength);
} else {
length = parsedContent.length();
}
additionalFields.put(Property.CONTENT_LENGTH.toLowerCase(), length);
}
} catch (Exception e) {
throw new ElasticsearchParseException("Error parsing document in field [{}]", e, field);
}
ingestDocument.setFieldValue(targetField, additionalFields);
}
Aggregations