use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SearchPreferenceIT method testNodesOnlyRandom.
public void testNodesOnlyRandom() throws Exception {
assertAcked(prepareCreate("test").setSettings(//this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
refresh();
final Client client = internalCluster().smartClient();
SearchRequestBuilder request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(// multiple wildchar to cover multi-param usecase
"_only_nodes:*,nodes*");
assertSearchOnRandomNodes(request);
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference("_only_nodes:*");
assertSearchOnRandomNodes(request);
ArrayList<String> allNodeIds = new ArrayList<>();
ArrayList<String> allNodeNames = new ArrayList<>();
ArrayList<String> allNodeHosts = new ArrayList<>();
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().execute().actionGet();
for (NodeStats node : nodeStats.getNodes()) {
allNodeIds.add(node.getNode().getId());
allNodeNames.add(node.getNode().getName());
allNodeHosts.add(node.getHostname());
}
String node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeIds.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeNames.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
node_expr = "_only_nodes:" + Strings.arrayToCommaDelimitedString(allNodeHosts.toArray());
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
// Mix of valid and invalid nodes
node_expr = "_only_nodes:*,invalidnode";
request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference(node_expr);
assertSearchOnRandomNodes(request);
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SearchPreferenceIT method testNoPreferenceRandom.
public void testNoPreferenceRandom() throws Exception {
assertAcked(prepareCreate("test").setSettings(//this test needs at least a replica to make sure two consecutive searches go to two different copies of the same data
Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(1, maximumNumberOfReplicas()))));
ensureGreen();
client().prepareIndex("test", "type1").setSource("field1", "value1").execute().actionGet();
refresh();
final Client client = internalCluster().smartClient();
SearchResponse searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
String firstNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId();
searchResponse = client.prepareSearch("test").setQuery(matchAllQuery()).execute().actionGet();
String secondNodeId = searchResponse.getHits().getAt(0).getShard().getNodeId();
assertThat(firstNodeId, not(equalTo(secondNodeId)));
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testSnapshotMoreThanOnce.
public void testSnapshotMoreThanOnce() throws ExecutionException, InterruptedException {
Client client = client();
logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
// only one shard
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)));
ensureGreen();
logger.info("--> indexing");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i);
}
indexRandom(true, builders);
flushAndRefresh();
assertNoFailures(client().admin().indices().prepareForceMerge("test").setFlush(true).setMaxNumSegments(1).get());
CreateSnapshotResponse createSnapshotResponseFirst = client.admin().cluster().prepareCreateSnapshot("test-repo", "test").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseFirst.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseFirst.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
assertThat(status.getStats().getProcessedFiles(), greaterThan(1));
}
}
CreateSnapshotResponse createSnapshotResponseSecond = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-1").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseSecond.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseSecond.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-1").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-1").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
assertThat(status.getStats().getProcessedFiles(), equalTo(0));
}
}
client().prepareDelete("test", "doc", "1").get();
CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get();
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-2").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
{
SnapshotStatus snapshotStatus = client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-2").get().getSnapshots().get(0);
List<SnapshotIndexShardStatus> shards = snapshotStatus.getShards();
for (SnapshotIndexShardStatus status : shards) {
// we flush before the snapshot such that we have to process the segments_N files plus the .del file
assertThat(status.getStats().getProcessedFiles(), equalTo(2));
}
}
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testRestoreWithDifferentMappingsAndSettings.
public void testRestoreWithDifferentMappingsAndSettings() throws Exception {
Client client = client();
logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
logger.info("--> create index with foo type");
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 10, TimeUnit.SECONDS)));
NumShards numShards = getNumShards("test-idx");
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("foo").setSource("baz", "type=text"));
ensureGreen();
logger.info("--> snapshot it");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete the index and recreate it with bar type");
cluster().wipeIndices("test-idx");
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries).put(SETTING_NUMBER_OF_REPLICAS, between(0, 1)).put("refresh_interval", 5, TimeUnit.SECONDS)));
assertAcked(client().admin().indices().preparePutMapping("test-idx").setType("bar").setSource("baz", "type=text"));
ensureGreen();
logger.info("--> close index");
client.admin().indices().prepareClose("test-idx").get();
logger.info("--> restore all indices from the snapshot");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
logger.info("--> assert that old mapping is restored");
ImmutableOpenMap<String, MappingMetaData> mappings = client().admin().cluster().prepareState().get().getState().getMetaData().getIndices().get("test-idx").getMappings();
assertThat(mappings.get("foo"), notNullValue());
assertThat(mappings.get("bar"), nullValue());
logger.info("--> assert that old settings are restored");
GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings("test-idx").execute().actionGet();
assertThat(getSettingsResponse.getSetting("test-idx", "index.refresh_interval"), equalTo("10000ms"));
}
use of org.elasticsearch.client.Client in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testDeleteSnapshotWithMissingMetadata.
public void testDeleteSnapshotWithMissingMetadata() throws Exception {
Client client = client();
Path repo = randomRepoPath();
logger.info("--> creating repository at {}", repo.toAbsolutePath());
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", repo).put("compress", false).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
createIndex("test-idx-1", "test-idx-2");
logger.info("--> indexing some data");
indexRandom(true, client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
logger.info("--> creating snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete index metadata and shard metadata");
Path metadata = repo.resolve("meta-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat");
Files.delete(metadata);
logger.info("--> delete snapshot");
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
logger.info("--> make sure snapshot doesn't exist");
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
}
Aggregations