use of org.elasticsearch.common.util.BigArrays in project elasticsearch by elastic.
the class GeoCentroidAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final MultiGeoPointValues values = valuesSource.geoPointValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
centroids = bigArrays.grow(centroids, bucket + 1);
counts = bigArrays.grow(counts, bucket + 1);
values.setDocument(doc);
final int valueCount = values.count();
if (valueCount > 0) {
double[] pt = new double[2];
// get the previously accumulated number of counts
long prevCounts = counts.get(bucket);
// increment by the number of points for this document
counts.increment(bucket, valueCount);
// get the previous GeoPoint if a moving avg was computed
if (prevCounts > 0) {
final long mortonCode = centroids.get(bucket);
pt[0] = GeoPointField.decodeLongitude(mortonCode);
pt[1] = GeoPointField.decodeLatitude(mortonCode);
}
// update the moving average
for (int i = 0; i < valueCount; ++i) {
GeoPoint value = values.valueAt(i);
pt[0] = pt[0] + (value.getLon() - pt[0]) / ++prevCounts;
pt[1] = pt[1] + (value.getLat() - pt[1]) / prevCounts;
}
// TODO: we do not need to interleave the lat and lon bits here
// should we just store them contiguously?
centroids.set(bucket, GeoPointField.encodeLatLon(pt[1], pt[0]));
}
}
};
}
use of org.elasticsearch.common.util.BigArrays in project elasticsearch by elastic.
the class TransportClient method buildTemplate.
private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings, Collection<Class<? extends Plugin>> plugins, HostFailureListener failureListner) {
if (Node.NODE_NAME_SETTING.exists(providedSettings) == false) {
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
}
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
final List<Closeable> resourcesToClose = new ArrayList<>();
final ThreadPool threadPool = new ThreadPool(settings);
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
final NetworkService networkService = new NetworkService(settings, Collections.emptyList());
try {
final List<Setting<?>> additionalSettings = new ArrayList<>();
final List<String> additionalSettingsFilter = new ArrayList<>();
additionalSettings.addAll(pluginsService.getPluginSettings());
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
additionalSettings.addAll(builder.getRegisteredSettings());
}
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
SearchModule searchModule = new SearchModule(settings, true, pluginsService.filterPlugins(SearchPlugin.class));
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(NetworkModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables());
entries.addAll(ClusterModule.getNamedWriteables());
entries.addAll(pluginsService.filterPlugins(Plugin.class).stream().flatMap(p -> p.getNamedWriteables().stream()).collect(Collectors.toList()));
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of(searchModule.getNamedXContents().stream(), pluginsService.filterPlugins(Plugin.class).stream().flatMap(p -> p.getNamedXContent().stream())).flatMap(Function.identity()).collect(toList()));
ModulesBuilder modules = new ModulesBuilder();
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.createGuiceModules()) {
modules.add(pluginModule);
}
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
ActionModule actionModule = new ActionModule(true, settings, null, settingsModule.getIndexScopedSettings(), settingsModule.getClusterSettings(), settingsModule.getSettingsFilter(), threadPool, pluginsService.filterPlugins(ActionPlugin.class), null, null);
modules.add(actionModule);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(), settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
final Transport transport = networkModule.getTransportSupplier().get();
final TransportService transportService = new TransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), boundTransportAddress -> DiscoveryNode.createLocal(settings, new TransportAddress(TransportAddress.META_ADDRESS, 0), UUIDs.randomBase64UUID()), null);
modules.add((b -> {
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
b.bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
b.bind(Transport.class).toInstance(transport);
b.bind(TransportService.class).toInstance(transportService);
b.bind(NetworkService.class).toInstance(networkService);
}));
Injector injector = modules.createInjector();
final TransportClientNodesService nodesService = new TransportClientNodesService(settings, transportService, threadPool, failureListner == null ? (t, e) -> {
} : failureListner);
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService, actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
List<LifecycleComponent> pluginLifecycleComponents = new ArrayList<>();
pluginLifecycleComponents.addAll(pluginsService.getGuiceServiceClasses().stream().map(injector::getInstance).collect(Collectors.toList()));
resourcesToClose.addAll(pluginLifecycleComponents);
transportService.start();
transportService.acceptIncomingRequests();
ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy, namedWriteableRegistry);
resourcesToClose.clear();
return transportClient;
} finally {
IOUtils.closeWhileHandlingException(resourcesToClose);
}
}
use of org.elasticsearch.common.util.BigArrays in project elasticsearch by elastic.
the class StatsAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
if (bucket >= counts.size()) {
final long from = counts.size();
final long overSize = BigArrays.overSize(bucket + 1);
counts = bigArrays.resize(counts, overSize);
sums = bigArrays.resize(sums, overSize);
mins = bigArrays.resize(mins, overSize);
maxes = bigArrays.resize(maxes, overSize);
mins.fill(from, overSize, Double.POSITIVE_INFINITY);
maxes.fill(from, overSize, Double.NEGATIVE_INFINITY);
}
values.setDocument(doc);
final int valuesCount = values.count();
counts.increment(bucket, valuesCount);
double sum = 0;
double min = mins.get(bucket);
double max = maxes.get(bucket);
for (int i = 0; i < valuesCount; i++) {
double value = values.valueAt(i);
sum += value;
min = Math.min(min, value);
max = Math.max(max, value);
}
sums.increment(bucket, sum);
mins.set(bucket, min);
maxes.set(bucket, max);
}
};
}
use of org.elasticsearch.common.util.BigArrays in project elasticsearch by elastic.
the class AvgAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx);
return new LeafBucketCollectorBase(sub, values) {
@Override
public void collect(int doc, long bucket) throws IOException {
counts = bigArrays.grow(counts, bucket + 1);
sums = bigArrays.grow(sums, bucket + 1);
values.setDocument(doc);
final int valueCount = values.count();
counts.increment(bucket, valueCount);
double sum = 0;
for (int i = 0; i < valueCount; i++) {
sum += values.valueAt(i);
}
sums.increment(bucket, sum);
}
};
}
use of org.elasticsearch.common.util.BigArrays in project elasticsearch by elastic.
the class MaxAggregator method getLeafCollector.
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
if (valuesSource == null) {
return LeafBucketCollector.NO_OP_COLLECTOR;
}
final BigArrays bigArrays = context.bigArrays();
final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx);
final NumericDoubleValues values = MultiValueMode.MAX.select(allValues, Double.NEGATIVE_INFINITY);
return new LeafBucketCollectorBase(sub, allValues) {
@Override
public void collect(int doc, long bucket) throws IOException {
if (bucket >= maxes.size()) {
long from = maxes.size();
maxes = bigArrays.grow(maxes, bucket + 1);
maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY);
}
final double value = values.get(doc);
double max = maxes.get(bucket);
max = Math.max(max, value);
maxes.set(bucket, max);
}
};
}
Aggregations