use of org.xbib.elasticsearch.common.metrics.ElasticsearchIngestMetric in project elasticsearch-jdbc by jprante.
the class ClientTests method testClient.
@Test
public void testClient() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(10))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
if (settings.get("transport.type") != null) {
clientSettings.put("transport.type", settings.get("transport.type"));
}
// copy found.no transport settings
Settings foundTransportSettings = settings.getAsSettings("transport.found");
if (foundTransportSettings != null) {
Map<String, String> foundTransportSettingsMap = foundTransportSettings.getAsMap();
for (Map.Entry<String, String> entry : foundTransportSettingsMap.entrySet()) {
clientSettings.put("transport.found." + entry.getKey(), entry.getValue());
}
}
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
use of org.xbib.elasticsearch.common.metrics.ElasticsearchIngestMetric in project elasticsearch-jdbc by jprante.
the class ClientTests method testFoundSettings.
@Test
public void testFoundSettings() throws IOException {
// disable DNS caching for failover
Security.setProperty("networkaddress.cache.ttl", "0");
Settings settings = Settings.settingsBuilder().putArray("elasticsearch.host", new String[] { "localhost:9300", "localhost:9301" }).put("transport.type", "org.elasticsearch.transport.netty.FoundNettyTransport").put("transport.found.ssl-ports", 9443).build();
Integer maxbulkactions = settings.getAsInt("max_bulk_actions", 10000);
Integer maxconcurrentbulkrequests = settings.getAsInt("max_concurrent_bulk_requests", Runtime.getRuntime().availableProcessors() * 2);
ByteSizeValue maxvolume = settings.getAsBytesSize("max_bulk_volume", ByteSizeValue.parseBytesSizeValue("10m", ""));
TimeValue flushinterval = settings.getAsTime("flush_interval", TimeValue.timeValueSeconds(5));
Settings.Builder clientSettings = Settings.settingsBuilder().put("cluster.name", settings.get("elasticsearch.cluster", "elasticsearch")).putArray("host", settings.getAsArray("elasticsearch.host")).put("port", settings.getAsInt("elasticsearch.port", 9300)).put("sniff", settings.getAsBoolean("elasticsearch.sniff", false)).put("autodiscover", settings.getAsBoolean("elasticsearch.autodiscover", false)).put("name", // prevents lookup of names.txt, we don't have it, and marks this node as "feeder". See also module load skipping in JDBCRiverPlugin
"feeder").put("client.transport.ignore_cluster_name", // do not ignore cluster name setting
false).put("client.transport.ping_timeout", // ping timeout
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("client.transport.nodes_sampler_interval", // for sniff sampling
settings.getAsTime("elasticsearch.timeout", TimeValue.timeValueSeconds(5))).put("path.plugins", // pointing to a non-exiting folder means, this disables loading site plugins
".dontexist").put("path.home", System.getProperty("path.home"));
try {
ClientAPI clientAPI = ClientBuilder.builder().put(settings).put(ClientBuilder.MAX_ACTIONS_PER_REQUEST, maxbulkactions).put(ClientBuilder.MAX_CONCURRENT_REQUESTS, maxconcurrentbulkrequests).put(ClientBuilder.MAX_VOLUME_PER_REQUEST, maxvolume).put(ClientBuilder.FLUSH_INTERVAL, flushinterval).setMetric(new ElasticsearchIngestMetric()).toBulkTransportClient();
} catch (NoNodeAvailableException e) {
// ok
}
}
Aggregations