use of org.opensearch.search.aggregations.support.ValuesSourceType in project OpenSearch by opensearch-project.
the class AggregatorTestCase method writeTestDoc.
/**
* Helper method to write a single document with a single value specific to the requested fieldType.
*
* Throws an exception if it encounters an unknown field type, to prevent new ones from sneaking in without
* being tested.
*/
private void writeTestDoc(MappedFieldType fieldType, String fieldName, RandomIndexWriter iw) throws IOException {
String typeName = fieldType.typeName();
ValuesSourceType vst = fieldToVST(fieldType);
Document doc = new Document();
String json;
if (vst.equals(CoreValuesSourceType.NUMERIC)) {
long v;
if (typeName.equals(NumberFieldMapper.NumberType.DOUBLE.typeName())) {
double d = Math.abs(randomDouble());
v = NumericUtils.doubleToSortableLong(d);
json = "{ \"" + fieldName + "\" : \"" + d + "\" }";
} else if (typeName.equals(NumberFieldMapper.NumberType.FLOAT.typeName())) {
float f = Math.abs(randomFloat());
v = NumericUtils.floatToSortableInt(f);
json = "{ \"" + fieldName + "\" : \"" + f + "\" }";
} else if (typeName.equals(NumberFieldMapper.NumberType.HALF_FLOAT.typeName())) {
// Generate a random float that respects the limits of half float
float f = Math.abs((randomFloat() * 2 - 1) * 65504);
v = HalfFloatPoint.halfFloatToSortableShort(f);
json = "{ \"" + fieldName + "\" : \"" + f + "\" }";
} else {
// smallest numeric is a byte so we select the smallest
v = Math.abs(randomByte());
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
}
doc.add(new SortedNumericDocValuesField(fieldName, v));
} else if (vst.equals(CoreValuesSourceType.BYTES)) {
if (typeName.equals(BinaryFieldMapper.CONTENT_TYPE)) {
doc.add(new BinaryFieldMapper.CustomBinaryDocValuesField(fieldName, new BytesRef("a").bytes));
json = "{ \"" + fieldName + "\" : \"a\" }";
} else {
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef("a")));
json = "{ \"" + fieldName + "\" : \"a\" }";
}
} else if (vst.equals(CoreValuesSourceType.DATE)) {
// positive integer because date_nanos gets unhappy with large longs
long v;
v = Math.abs(randomInt());
doc.add(new SortedNumericDocValuesField(fieldName, v));
json = "{ \"" + fieldName + "\" : \"" + v + "\" }";
} else if (vst.equals(CoreValuesSourceType.BOOLEAN)) {
long v;
v = randomBoolean() ? 0 : 1;
doc.add(new SortedNumericDocValuesField(fieldName, v));
json = "{ \"" + fieldName + "\" : \"" + (v == 0 ? "false" : "true") + "\" }";
} else if (vst.equals(CoreValuesSourceType.IP)) {
InetAddress ip = randomIp(randomBoolean());
json = "{ \"" + fieldName + "\" : \"" + NetworkAddress.format(ip) + "\" }";
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(InetAddressPoint.encode(ip))));
} else if (vst.equals(CoreValuesSourceType.RANGE)) {
Object start;
Object end;
RangeType rangeType;
if (typeName.equals(RangeType.DOUBLE.typeName())) {
start = randomDouble();
end = RangeType.DOUBLE.nextUp(start);
rangeType = RangeType.DOUBLE;
} else if (typeName.equals(RangeType.FLOAT.typeName())) {
start = randomFloat();
end = RangeType.FLOAT.nextUp(start);
rangeType = RangeType.DOUBLE;
} else if (typeName.equals(RangeType.IP.typeName())) {
boolean v4 = randomBoolean();
start = randomIp(v4);
end = RangeType.IP.nextUp(start);
rangeType = RangeType.IP;
} else if (typeName.equals(RangeType.LONG.typeName())) {
start = randomLong();
end = RangeType.LONG.nextUp(start);
rangeType = RangeType.LONG;
} else if (typeName.equals(RangeType.INTEGER.typeName())) {
start = randomInt();
end = RangeType.INTEGER.nextUp(start);
rangeType = RangeType.INTEGER;
} else if (typeName.equals(RangeType.DATE.typeName())) {
start = randomNonNegativeLong();
end = RangeType.DATE.nextUp(start);
rangeType = RangeType.DATE;
} else {
throw new IllegalStateException("Unknown type of range [" + typeName + "]");
}
final RangeFieldMapper.Range range = new RangeFieldMapper.Range(rangeType, start, end, true, true);
doc.add(new BinaryDocValuesField(fieldName, rangeType.encodeRanges(Collections.singleton(range))));
json = "{ \"" + fieldName + "\" : { \n" + " \"gte\" : \"" + start + "\",\n" + " \"lte\" : \"" + end + "\"\n" + " }}";
} else if (vst.equals(CoreValuesSourceType.GEOPOINT)) {
double lat = randomDouble();
double lon = randomDouble();
doc.add(new LatLonDocValuesField(fieldName, lat, lon));
json = "{ \"" + fieldName + "\" : \"[" + lon + "," + lat + "]\" }";
} else {
throw new IllegalStateException("Unknown field type [" + typeName + "]");
}
doc.add(new StoredField("_source", new BytesRef(json)));
iw.addDocument(doc);
}
use of org.opensearch.search.aggregations.support.ValuesSourceType in project OpenSearch by opensearch-project.
the class AggregatorTestCase method testSupportedFieldTypes.
/**
* This test will validate that an aggregator succeeds or fails to run against all the field types
* that are registered in {@link IndicesModule} (e.g. all the core field types). An aggregator
* is provided by the implementor class, and it is executed against each field type in turn. If
* an exception is thrown when the field is supported, that will fail the test. Similarly, if
* an exception _is not_ thrown when a field is unsupported, that will also fail the test.
*
* Exception types/messages are not currently checked, just presence/absence of an exception.
*/
public void testSupportedFieldTypes() throws IOException {
MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry();
Settings settings = Settings.builder().put("index.version.created", Version.CURRENT.id).build();
String fieldName = "typeTestFieldName";
List<ValuesSourceType> supportedVSTypes = getSupportedValuesSourceTypes();
List<String> unsupportedMappedFieldTypes = unsupportedMappedFieldTypes();
if (supportedVSTypes.isEmpty()) {
// If the test says it doesn't support any VStypes, it has not been converted yet so skip
return;
}
for (Map.Entry<String, Mapper.TypeParser> mappedType : mapperRegistry.getMapperParsers().entrySet()) {
// Some field types should not be tested, or require more work and are not ready yet
if (TYPE_TEST_DENYLIST.contains(mappedType.getKey())) {
continue;
}
Map<String, Object> source = new HashMap<>();
source.put("type", mappedType.getKey());
// Text is the only field that doesn't support DVs, instead FD
if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false) {
source.put("doc_values", "true");
}
Mapper.Builder builder = mappedType.getValue().parse(fieldName, source, new MockParserContext());
FieldMapper mapper = (FieldMapper) builder.build(new BuilderContext(settings, new ContentPath()));
MappedFieldType fieldType = mapper.fieldType();
// Non-aggregatable fields are not testable (they will throw an error on all aggs anyway), so skip
if (fieldType.isAggregatable() == false) {
continue;
}
try (Directory directory = newDirectory()) {
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
writeTestDoc(fieldType, fieldName, indexWriter);
indexWriter.close();
try (IndexReader indexReader = DirectoryReader.open(directory)) {
IndexSearcher indexSearcher = newIndexSearcher(indexReader);
AggregationBuilder aggregationBuilder = createAggBuilderForTypeTest(fieldType, fieldName);
ValuesSourceType vst = fieldToVST(fieldType);
// TODO in the future we can make this more explicit with expectThrows(), when the exceptions are standardized
AssertionError failure = null;
try {
searchAndReduce(indexSearcher, new MatchAllDocsQuery(), aggregationBuilder, fieldType);
if (supportedVSTypes.contains(vst) == false || unsupportedMappedFieldTypes.contains(fieldType.typeName())) {
failure = new AssertionError("Aggregator [" + aggregationBuilder.getType() + "] should not support field type [" + fieldType.typeName() + "] but executing against the field did not throw an exception");
}
} catch (Exception | AssertionError e) {
if (supportedVSTypes.contains(vst) && unsupportedMappedFieldTypes.contains(fieldType.typeName()) == false) {
failure = new AssertionError("Aggregator [" + aggregationBuilder.getType() + "] supports field type [" + fieldType.typeName() + "] but executing against the field threw an exception: [" + e.getMessage() + "]", e);
}
}
if (failure != null) {
throw failure;
}
}
}
}
}
Aggregations