use of org.elasticsearch.common.transport.InetSocketTransportAddress in project crate by crate.
the class NodeStatsContextFieldResolverTest method testPSQLPortResolution.
@Test
public void testPSQLPortResolution() throws IOException {
NodeInfo nodeInfo = mock(NodeInfo.class);
when(nodeService.info()).thenReturn(nodeInfo);
NodeStats stats = mock(NodeStats.class);
when(nodeService.stats()).thenReturn(stats);
when(stats.getNode()).thenReturn(mock(DiscoveryNode.class));
InetSocketTransportAddress inetAddress = new InetSocketTransportAddress(Inet4Address.getLocalHost(), 5432);
BoundTransportAddress boundAddress = new BoundTransportAddress(new TransportAddress[] { inetAddress }, inetAddress);
when(postgresNetty.boundAddress()).thenReturn(boundAddress);
NodeStatsContext context = resolver.forTopColumnIdents(ImmutableSet.of(SysNodesTableInfo.Columns.PORT));
assertThat(context.isComplete(), is(true));
assertThat(context.port().get("psql"), is(5432));
}
use of org.elasticsearch.common.transport.InetSocketTransportAddress in project storm by apache.
the class TransportAddresses method iterator.
@Override
public Iterator<InetSocketTransportAddress> iterator() {
List<InetSocketTransportAddress> result = new LinkedList<>();
for (String node : nodes) {
InetSocketTransportAddress transportAddress = transformToInetAddress(node);
result.add(transportAddress);
}
return result.iterator();
}
use of org.elasticsearch.common.transport.InetSocketTransportAddress in project sonarqube by SonarSource.
the class EsServerHolder method reset.
private void reset() {
TransportClient client = TransportClient.builder().settings(Settings.builder().put("network.bind_host", "localhost").put("cluster.name", clusterName).build()).build();
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getLoopbackAddress(), port));
// wait for node to be ready
client.admin().cluster().prepareHealth().setWaitForGreenStatus().get();
// delete the indices created by previous tests
DeleteIndexResponse response = client.admin().indices().prepareDelete("_all").get();
if (!response.isAcknowledged()) {
throw new IllegalStateException("Fail to delete all indices");
}
client.close();
}
use of org.elasticsearch.common.transport.InetSocketTransportAddress in project YCSB by brianfrankcooper.
the class ElasticsearchClient method init.
/**
* Initialize any state for this DB. Called once per DB instance; there is one
* DB instance per client thread.
*/
@Override
public void init() throws DBException {
final Properties props = getProperties();
// Check if transport client needs to be used (To connect to multiple
// elasticsearch nodes)
remoteMode = Boolean.parseBoolean(props.getProperty("es.remote", "false"));
final String pathHome = props.getProperty("path.home");
// when running in embedded mode, require path.home
if (!remoteMode && (pathHome == null || pathHome.isEmpty())) {
throw new IllegalArgumentException("path.home must be specified when running in embedded mode");
}
this.indexKey = props.getProperty("es.index.key", DEFAULT_INDEX_KEY);
int numberOfShards = parseIntegerProperty(props, "es.number_of_shards", NUMBER_OF_SHARDS);
int numberOfReplicas = parseIntegerProperty(props, "es.number_of_replicas", NUMBER_OF_REPLICAS);
Boolean newdb = Boolean.parseBoolean(props.getProperty("es.newdb", "false"));
Builder settings = Settings.settingsBuilder().put("cluster.name", DEFAULT_CLUSTER_NAME).put("node.local", Boolean.toString(!remoteMode)).put("path.home", pathHome);
// if properties file contains elasticsearch user defined properties
// add it to the settings file (will overwrite the defaults).
settings.put(props);
final String clusterName = settings.get("cluster.name");
System.err.println("Elasticsearch starting node = " + clusterName);
System.err.println("Elasticsearch node path.home = " + settings.get("path.home"));
System.err.println("Elasticsearch Remote Mode = " + remoteMode);
// Remote mode support for connecting to remote elasticsearch cluster
if (remoteMode) {
settings.put("client.transport.sniff", true).put("client.transport.ignore_cluster_name", false).put("client.transport.ping_timeout", "30s").put("client.transport.nodes_sampler_interval", "30s");
// Default it to localhost:9300
String[] nodeList = props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST).split(",");
System.out.println("Elasticsearch Remote Hosts = " + props.getProperty("es.hosts.list", DEFAULT_REMOTE_HOST));
TransportClient tClient = TransportClient.builder().settings(settings).build();
for (String h : nodeList) {
String[] nodes = h.split(":");
try {
tClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(nodes[0]), Integer.parseInt(nodes[1])));
} catch (NumberFormatException e) {
throw new IllegalArgumentException("Unable to parse port number.", e);
} catch (UnknownHostException e) {
throw new IllegalArgumentException("Unable to Identify host.", e);
}
}
client = tClient;
} else {
// Start node only if transport client mode is disabled
node = nodeBuilder().clusterName(clusterName).settings(settings).node();
node.start();
client = node.client();
}
final boolean exists = client.admin().indices().exists(Requests.indicesExistsRequest(indexKey)).actionGet().isExists();
if (exists && newdb) {
client.admin().indices().prepareDelete(indexKey).execute().actionGet();
}
if (!exists || newdb) {
client.admin().indices().create(new CreateIndexRequest(indexKey).settings(Settings.builder().put("index.number_of_shards", numberOfShards).put("index.number_of_replicas", numberOfReplicas).put("index.mapping._id.indexed", true))).actionGet();
}
client.admin().cluster().health(new ClusterHealthRequest().waitForGreenStatus()).actionGet();
}
use of org.elasticsearch.common.transport.InetSocketTransportAddress in project flink by apache.
the class ElasticsearchSinkExample method main.
public static void main(String[] args) throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStream<String> source = env.generateSequence(0, 20).map(new MapFunction<Long, String>() {
@Override
public String map(Long value) throws Exception {
return "message #" + value;
}
});
Map<String, String> userConfig = new HashMap<>();
userConfig.put("cluster.name", "elasticsearch");
// This instructs the sink to emit after every element, otherwise they would be buffered
userConfig.put(ElasticsearchSink.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
List<TransportAddress> transports = new ArrayList<>();
transports.add(new InetSocketTransportAddress(InetAddress.getByName("127.0.0.1"), 9300));
source.addSink(new ElasticsearchSink<>(userConfig, transports, new ElasticsearchSinkFunction<String>() {
@Override
public void process(String element, RuntimeContext ctx, RequestIndexer indexer) {
indexer.add(createIndexRequest(element));
}
}));
env.execute("Elasticsearch Sink Example");
}
Aggregations