use of org.elasticsearch.index.mapper.MappedFieldType in project crate by crate.
the class DoubleColumnReferenceTest method testFieldCacheExpression.
@Test
public void testFieldCacheExpression() throws Exception {
MappedFieldType fieldType = DoubleFieldMapper.Defaults.FIELD_TYPE.clone();
fieldType.setNames(new MappedFieldType.Names("d"));
DoubleColumnReference doubleColumn = new DoubleColumnReference(column, fieldType);
doubleColumn.startCollect(ctx);
doubleColumn.setNextReader(readerContext);
IndexSearcher searcher = new IndexSearcher(readerContext.reader());
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10);
double d = 0.5;
for (ScoreDoc doc : topDocs.scoreDocs) {
doubleColumn.setNextDocId(doc.doc);
assertThat(doubleColumn.value(), is(d));
d++;
}
}
use of org.elasticsearch.index.mapper.MappedFieldType in project crate by crate.
the class FloatColumnReferenceTest method testFieldCacheExpression.
@Test
public void testFieldCacheExpression() throws Exception {
MappedFieldType fieldType = FloatFieldMapper.Defaults.FIELD_TYPE.clone();
fieldType.setNames(new MappedFieldType.Names(column));
FloatColumnReference floatColumn = new FloatColumnReference(column, fieldType);
floatColumn.startCollect(ctx);
floatColumn.setNextReader(readerContext);
IndexSearcher searcher = new IndexSearcher(readerContext.reader());
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 10);
float f = -0.5f;
for (ScoreDoc doc : topDocs.scoreDocs) {
floatColumn.setNextDocId(doc.doc);
assertThat(floatColumn.value(), is(f));
f++;
}
}
use of org.elasticsearch.index.mapper.MappedFieldType in project elasticsearch by elastic.
the class TermVectorsService method addGeneratedTermVectors.
private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set<String> selectedFields) throws IOException {
/* only keep valid fields */
Set<String> validFields = new HashSet<>();
for (String field : selectedFields) {
MappedFieldType fieldType = indexShard.mapperService().fullName(field);
if (!isValidField(fieldType)) {
continue;
}
// already retrieved, only if the analyzer hasn't been overridden at the field
if (fieldType.storeTermVectors() && (request.perFieldAnalyzer() == null || !request.perFieldAnalyzer().containsKey(field))) {
continue;
}
validFields.add(field);
}
if (validFields.isEmpty()) {
return termVectorsByField;
}
/* generate term vectors from fetched document fields */
String[] getFields = validFields.toArray(new String[validFields.size() + 1]);
getFields[getFields.length - 1] = SourceFieldMapper.NAME;
GetResult getResult = indexShard.getService().get(get, request.id(), request.type(), getFields, null);
Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
/* merge with existing Fields */
if (termVectorsByField == null) {
return generatedTermVectors;
} else {
return mergeFields(termVectorsByField, generatedTermVectors);
}
}
use of org.elasticsearch.index.mapper.MappedFieldType in project elasticsearch by elastic.
the class TermVectorsService method generateTermVectorsFromDoc.
private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException {
// parse the document, at the moment we do update the mapping, just like percolate
ParsedDocument parsedDocument = parseDocument(indexShard, indexShard.shardId().getIndexName(), request.type(), request.doc(), request.xContentType());
// select the right fields and generate term vectors
ParseContext.Document doc = parsedDocument.rootDoc();
Set<String> seenFields = new HashSet<>();
Collection<GetField> getFields = new HashSet<>();
for (IndexableField field : doc.getFields()) {
MappedFieldType fieldType = indexShard.mapperService().fullName(field.name());
if (!isValidField(fieldType)) {
continue;
}
if (request.selectedFields() != null && !request.selectedFields().contains(field.name())) {
continue;
}
if (seenFields.contains(field.name())) {
continue;
} else {
seenFields.add(field.name());
}
String[] values = doc.getValues(field.name());
getFields.add(new GetField(field.name(), Arrays.asList((Object[]) values)));
}
return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), getFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
}
use of org.elasticsearch.index.mapper.MappedFieldType in project elasticsearch by elastic.
the class ValuesSourceConfig method resolve.
/**
* Resolve a {@link ValuesSourceConfig} given configuration parameters.
*/
public static <VS extends ValuesSource> ValuesSourceConfig<VS> resolve(QueryShardContext context, ValueType valueType, String field, Script script, Object missing, DateTimeZone timeZone, String format) {
if (field == null) {
if (script == null) {
@SuppressWarnings("unchecked") ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(ValuesSourceType.ANY);
config.format(resolveFormat(null, valueType));
return config;
}
ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : ValuesSourceType.ANY;
if (valuesSourceType == ValuesSourceType.ANY) {
// the specific value source type is undefined, but for scripts,
// we need to have a specific value source
// type to know how to handle the script values, so we fallback
// on Bytes
valuesSourceType = ValuesSourceType.BYTES;
}
ValuesSourceConfig<VS> config = new ValuesSourceConfig<VS>(valuesSourceType);
config.missing(missing);
config.timezone(timeZone);
config.format(resolveFormat(format, valueType));
config.script(createScript(script, context));
config.scriptValueType(valueType);
return config;
}
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : ValuesSourceType.ANY;
ValuesSourceConfig<VS> config = new ValuesSourceConfig<>(valuesSourceType);
config.missing(missing);
config.timezone(timeZone);
config.format(resolveFormat(format, valueType));
config.unmapped(true);
if (valueType != null) {
// todo do we really need this for unmapped?
config.scriptValueType(valueType);
}
return config;
}
IndexFieldData<?> indexFieldData = context.getForField(fieldType);
ValuesSourceConfig<VS> config;
if (valueType == null) {
if (indexFieldData instanceof IndexNumericFieldData) {
config = new ValuesSourceConfig<>(ValuesSourceType.NUMERIC);
} else if (indexFieldData instanceof IndexGeoPointFieldData) {
config = new ValuesSourceConfig<>(ValuesSourceType.GEOPOINT);
} else {
config = new ValuesSourceConfig<>(ValuesSourceType.BYTES);
}
} else {
config = new ValuesSourceConfig<>(valueType.getValuesSourceType());
}
config.fieldContext(new FieldContext(field, indexFieldData, fieldType));
config.missing(missing);
config.timezone(timeZone);
config.script(createScript(script, context));
config.format(fieldType.docValueFormat(format, timeZone));
return config;
}
Aggregations