use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class PercolatorFieldMapperTests method testImplicitlySetDefaultScriptLang.
public void testImplicitlySetDefaultScriptLang() throws Exception {
addQueryMapping();
XContentBuilder query = jsonBuilder();
query.startObject();
query.startObject("script");
if (randomBoolean()) {
query.field("script", "return true");
} else {
query.startObject("script");
query.field("inline", "return true");
query.endObject();
}
query.endObject();
query.endObject();
ParsedDocument doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject().rawField(fieldName, new BytesArray(query.string()), query.contentType()).endObject().bytes());
BytesRef querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
Map<String, Object> parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2();
assertEquals(Script.DEFAULT_SCRIPT_LANG, XContentMapValues.extractValue("script.script.lang", parsedQuery));
query = jsonBuilder();
query.startObject();
query.startObject("function_score");
query.startArray("functions");
query.startObject();
query.startObject("script_score");
if (randomBoolean()) {
query.field("script", "return true");
} else {
query.startObject("script");
query.field("inline", "return true");
query.endObject();
}
query.endObject();
query.endObject();
query.endArray();
query.endObject();
query.endObject();
doc = mapperService.documentMapper(typeName).parse("test", typeName, "1", XContentFactory.jsonBuilder().startObject().rawField(fieldName, new BytesArray(query.string()), query.contentType()).endObject().bytes());
querySource = doc.rootDoc().getFields(fieldType.queryBuilderField.name())[0].binaryValue();
parsedQuery = XContentHelper.convertToMap(new BytesArray(querySource), true).v2();
assertEquals(Script.DEFAULT_SCRIPT_LANG, ((List) XContentMapValues.extractValue("function_score.functions.script_score.script.lang", parsedQuery)).get(0));
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class SizeMappingTests method testSizeEnabled.
public void testSizeEnabled() throws Exception {
IndexService service = createIndex("test", Settings.EMPTY, "type", "_size", "enabled=true");
DocumentMapper docMapper = service.mapperService().documentMapper("type");
BytesReference source = XContentFactory.jsonBuilder().startObject().field("field", "value").endObject().bytes();
ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", source, XContentType.JSON));
boolean stored = false;
boolean points = false;
for (IndexableField field : doc.rootDoc().getFields("_size")) {
stored |= field.fieldType().stored();
points |= field.fieldType().pointDimensionCount() > 0;
}
assertTrue(stored);
assertTrue(points);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class ShadowEngineTests method testParsedDocument.
private ParsedDocument testParsedDocument(String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) {
Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0);
SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
document.add(uidField);
document.add(versionField);
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
// so that points report memory/disk usage
document.add(new LongPoint("point_field", 42));
return new ParsedDocument(versionField, seqID, id, type, routing, Arrays.asList(document), source, XContentType.JSON, mappingsUpdate);
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class ShadowEngineTests method testFailEngineOnCorruption.
public void testFailEngineOnCorruption() throws IOException {
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
primaryEngine.index(indexForDoc(doc));
primaryEngine.flush();
MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class);
leaf.setRandomIOExceptionRate(1.0);
leaf.setRandomIOExceptionRateOnOpen(1.0);
try {
replicaEngine.refresh("foo");
fail("exception expected");
} catch (Exception ex) {
}
try {
Engine.Searcher searchResult = replicaEngine.acquireSearcher("test");
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1));
MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1));
searchResult.close();
fail("exception expected");
} catch (AlreadyClosedException ex) {
// all is well
}
}
use of org.elasticsearch.index.mapper.ParsedDocument in project elasticsearch by elastic.
the class ShadowEngineTests method testShadowEngineCreationRetry.
public void testShadowEngineCreationRetry() throws Exception {
final Path srDir = createTempDir();
final Store srStore = createStore(srDir);
Lucene.cleanLuceneIndex(srStore.directory());
final AtomicBoolean succeeded = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
// Create a shadow Engine, which will freak out because there is no
// index yet
Thread t = new Thread(new Runnable() {
@Override
public void run() {
try {
latch.await();
} catch (InterruptedException e) {
// ignore interruptions
}
try (ShadowEngine srEngine = createShadowEngine(srStore)) {
succeeded.set(true);
} catch (Exception e) {
fail("should have been able to create the engine!");
}
}
});
t.start();
// count down latch
// now shadow engine should try to be created
latch.countDown();
// Create an InternalEngine, which creates the index so the shadow
// replica will handle it correctly
Store pStore = createStore(srDir);
InternalEngine pEngine = createInternalEngine(pStore, createTempDir("translog-primary"));
// create a document
ParseContext.Document document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null);
pEngine.index(indexForDoc(doc));
pEngine.flush(true, true);
t.join();
assertTrue("ShadowEngine should have been able to be created", succeeded.get());
// (shadow engine is already shut down in the try-with-resources)
IOUtils.close(srStore, pEngine, pStore);
}
Aggregations