use of org.elasticsearch.Version in project elasticsearch by elastic.
the class GeoDistanceSortBuilderIT method testManyToManyGeoPointsWithDifferentFormats.
public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException {
/** q d1 d2
* |4 o| x | x
* | | |
* |3 o| x | x
* | | |
* |2 o| x | x
* | | |
* |1 o|x |x
* |______________________
* 1 2 3 4 5 6
*/
Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point"));
XContentBuilder d1Builder = jsonBuilder();
GeoPoint[] d1Points = { new GeoPoint(2.5, 1), new GeoPoint(2.75, 2), new GeoPoint(3, 3), new GeoPoint(3.25, 4) };
createShuffeldJSONArray(d1Builder, d1Points);
XContentBuilder d2Builder = jsonBuilder();
GeoPoint[] d2Points = { new GeoPoint(4.5, 1), new GeoPoint(4.75, 2), new GeoPoint(5, 3), new GeoPoint(5.25, 4) };
createShuffeldJSONArray(d2Builder, d2Points);
indexRandom(true, client().prepareIndex("index", "type", "d1").setSource(d1Builder), client().prepareIndex("index", "type", "d2").setSource(d2Builder));
List<String> qHashes = new ArrayList<>();
List<GeoPoint> qPoints = new ArrayList<>();
createQPoints(qHashes, qPoints);
GeoDistanceSortBuilder geoDistanceSortBuilder = null;
for (int i = 0; i < 4; i++) {
int at = randomInt(3 - i);
if (randomBoolean()) {
if (geoDistanceSortBuilder == null) {
geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, qHashes.get(at));
} else {
geoDistanceSortBuilder.geohashes(qHashes.get(at));
}
} else {
if (geoDistanceSortBuilder == null) {
geoDistanceSortBuilder = new GeoDistanceSortBuilder(LOCATION_FIELD, qPoints.get(at));
} else {
geoDistanceSortBuilder.points(qPoints.get(at));
}
}
qHashes.remove(at);
qPoints.remove(at);
}
SearchResponse searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MIN).order(SortOrder.ASC)).execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.ARC.calculate(2.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1));
assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.ARC.calculate(4.5, 1, 2, 1, DistanceUnit.METERS), 1.e-1));
searchResponse = client().prepareSearch().setQuery(matchAllQuery()).addSort(geoDistanceSortBuilder.sortMode(SortMode.MAX).order(SortOrder.ASC)).execute().actionGet();
assertOrderedSearchHits(searchResponse, "d1", "d2");
assertThat((Double) searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.ARC.calculate(3.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1));
assertThat((Double) searchResponse.getHits().getAt(1).getSortValues()[0], closeTo(GeoDistance.ARC.calculate(5.25, 4, 2, 1, DistanceUnit.METERS), 1.e-1));
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class TCPTransportTests method testCompressRequest.
public void testCompressRequest() throws IOException {
final boolean compressed = randomBoolean();
final AtomicBoolean called = new AtomicBoolean(false);
Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100));
ThreadPool threadPool = new TestThreadPool(TCPTransportTests.class.getName());
try {
TcpTransport transport = new TcpTransport("test", Settings.builder().put("transport.tcp.compress", compressed).build(), threadPool, new BigArrays(Settings.EMPTY, null), null, null, null) {
@Override
protected InetSocketAddress getLocalAddress(Object o) {
return null;
}
@Override
protected Object bind(String name, InetSocketAddress address) throws IOException {
return null;
}
@Override
protected void closeChannels(List channel) throws IOException {
}
@Override
protected void sendMessage(Object o, BytesReference reference, Runnable sendListener) throws IOException {
StreamInput streamIn = reference.streamInput();
streamIn.skip(TcpHeader.MARKER_BYTES_SIZE);
int len = streamIn.readInt();
long requestId = streamIn.readLong();
assertEquals(42, requestId);
byte status = streamIn.readByte();
Version version = Version.fromId(streamIn.readInt());
assertEquals(Version.CURRENT, version);
assertEquals(compressed, TransportStatus.isCompress(status));
called.compareAndSet(false, true);
if (compressed) {
final int bytesConsumed = TcpHeader.HEADER_SIZE;
streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed)).streamInput(streamIn);
}
threadPool.getThreadContext().readHeaders(streamIn);
assertEquals("foobar", streamIn.readString());
Req readReq = new Req("");
readReq.readFrom(streamIn);
assertEquals(request.value, readReq.value);
}
@Override
protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile) throws IOException {
return new NodeChannels(node, new Object[profile.getNumConnections()], profile);
}
@Override
protected boolean isOpen(Object o) {
return false;
}
@Override
public long serverOpen() {
return 0;
}
@Override
public NodeChannels getConnection(DiscoveryNode node) {
return new NodeChannels(node, new Object[MockTcpTransport.LIGHT_PROFILE.getNumConnections()], MockTcpTransport.LIGHT_PROFILE);
}
};
DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT);
Transport.Connection connection = transport.getConnection(node);
connection.sendRequest(42, "foobar", request, TransportRequestOptions.EMPTY);
assertTrue(called.get());
} finally {
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class PercolateQueryBuilder method doToQuery.
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
// Call nowInMillis() so that this query becomes un-cacheable since we
// can't be sure that it doesn't use now or scripts
context.nowInMillis();
if (indexedDocumentIndex != null || indexedDocumentType != null || indexedDocumentId != null) {
throw new IllegalStateException("query builder must be rewritten first");
}
if (document == null) {
throw new IllegalStateException("no document to percolate");
}
MapperService mapperService = context.getMapperService();
DocumentMapperForType docMapperForType = mapperService.documentMapperWithAutoCreate(documentType);
DocumentMapper docMapper = docMapperForType.getDocumentMapper();
ParsedDocument doc = docMapper.parse(source(context.index().getName(), documentType, "_temp_id", document, documentXContentType));
FieldNameAnalyzer fieldNameAnalyzer = (FieldNameAnalyzer) docMapper.mappers().indexAnalyzer();
// Need to this custom impl because FieldNameAnalyzer is strict and the percolator sometimes isn't when
// 'index.percolator.map_unmapped_fields_as_string' is enabled:
Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = fieldNameAnalyzer.analyzers().get(fieldName);
if (analyzer != null) {
return analyzer;
} else {
return context.getIndexAnalyzers().getDefaultIndexAnalyzer();
}
}
};
final IndexSearcher docSearcher;
if (doc.docs().size() > 1) {
assert docMapper.hasNestedObjects();
docSearcher = createMultiDocumentSearcher(analyzer, doc);
} else {
MemoryIndex memoryIndex = MemoryIndex.fromDocument(doc.rootDoc(), analyzer, true, false);
docSearcher = memoryIndex.createSearcher();
docSearcher.setQueryCache(null);
}
Version indexVersionCreated = context.getIndexSettings().getIndexVersionCreated();
boolean mapUnmappedFieldsAsString = context.getIndexSettings().getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
// We have to make a copy of the QueryShardContext here so we can have a unfrozen version for parsing the legacy
// percolator queries
QueryShardContext percolateShardContext = new QueryShardContext(context);
MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist");
}
if (!(fieldType instanceof PercolatorFieldMapper.FieldType)) {
throw new QueryShardException(context, "expected field [" + field + "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
}
PercolatorFieldMapper.FieldType pft = (PercolatorFieldMapper.FieldType) fieldType;
PercolateQuery.QueryStore queryStore = createStore(pft, percolateShardContext, mapUnmappedFieldsAsString);
return pft.percolateQuery(documentType, queryStore, document, docSearcher);
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class PercolatorFieldMapperTests method testEmptyName.
public void testEmptyName() throws Exception {
// after 5.x
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties").startObject("").field("type", "percolator").endObject().endObject().endObject().endObject().string();
DocumentMapperParser parser = mapperService.documentMapperParser();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parser.parse("type1", new CompressedXContent(mapping)));
assertThat(e.getMessage(), containsString("name cannot be empty string"));
// before 5.x
Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5);
Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build();
DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser();
DocumentMapper defaultMapper = parser2x.parse("type1", new CompressedXContent(mapping));
assertEquals(mapping, defaultMapper.mappingSource().string());
}
use of org.elasticsearch.Version in project elasticsearch by elastic.
the class RemoteScrollableHitSourceTests method sourceWithMockedClient.
private RemoteScrollableHitSource sourceWithMockedClient(boolean mockRemoteVersion, CloseableHttpAsyncClient httpClient) throws Exception {
HttpAsyncClientBuilder clientBuilder = mock(HttpAsyncClientBuilder.class);
when(clientBuilder.build()).thenReturn(httpClient);
RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(httpClientBuilder -> clientBuilder).build();
TestRemoteScrollableHitSource hitSource = new TestRemoteScrollableHitSource(restClient) {
@Override
void lookupRemoteVersion(Consumer<Version> onVersion) {
if (mockRemoteVersion) {
onVersion.accept(Version.CURRENT);
} else {
super.lookupRemoteVersion(onVersion);
}
}
};
if (mockRemoteVersion) {
hitSource.remoteVersion = Version.CURRENT;
}
return hitSource;
}
Aggregations