use of org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class DiscoveryWithServiceDisruptionsIT method testRejoinDocumentExistsInAllShardCopies.
/**
* Test that a document which is indexed on the majority side of a partition, is available from the minority side,
* once the partition is healed
*/
public void testRejoinDocumentExistsInAllShardCopies() throws Exception {
List<String> nodes = startCluster(3);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)).get());
ensureGreen("test");
nodes = new ArrayList<>(nodes);
Collections.shuffle(nodes, random());
String isolatedNode = nodes.get(0);
String notIsolatedNode = nodes.get(1);
TwoPartitions partitions = isolateNode(isolatedNode);
NetworkDisruption scheme = addRandomDisruptionType(partitions);
scheme.startDisrupting();
ensureStableCluster(2, notIsolatedNode);
assertFalse(client(notIsolatedNode).admin().cluster().prepareHealth("test").setWaitForYellowStatus().get().isTimedOut());
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
assertThat(indexResponse.getVersion(), equalTo(1L));
logger.info("Verifying if document exists via node[{}]", notIsolatedNode);
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId()).setPreference("_local").get();
assertThat(getResponse.isExists(), is(true));
assertThat(getResponse.getVersion(), equalTo(1L));
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
scheme.stopDisrupting();
ensureStableCluster(3);
ensureGreen("test");
for (String node : nodes) {
logger.info("Verifying if document exists after isolating node[{}] via node[{}]", isolatedNode, node);
getResponse = internalCluster().client(node).prepareGet("test", "type", indexResponse.getId()).setPreference("_local").get();
assertThat(getResponse.isExists(), is(true));
assertThat(getResponse.getVersion(), equalTo(1L));
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
}
}
use of org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class DocumentActionsIT method testIndexActions.
public void testIndexActions() throws Exception {
createIndex();
NumShards numShards = getNumShards(getConcreteIndexName());
logger.info("Running Cluster Health");
ensureGreen();
logger.info("Indexing [type1/1]");
IndexResponse indexResponse = client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(indexResponse.getId(), equalTo("1"));
assertThat(indexResponse.getType(), equalTo("type1"));
logger.info("Refreshing");
RefreshResponse refreshResponse = refresh();
assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("--> index exists?");
assertThat(indexExists(getConcreteIndexName()), equalTo(true));
logger.info("--> index exists?, fake index");
assertThat(indexExists("test1234565"), equalTo(false));
logger.info("Clearing cache");
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).queryCache(true)).actionGet();
assertNoFailures(clearIndicesCacheResponse);
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("Force Merging");
waitForRelocation(ClusterHealthStatus.GREEN);
ForceMergeResponse mergeResponse = forceMerge();
assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
GetResponse getResult;
logger.info("Get [type1/1]");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Get [type1/1] with script");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "type1", "1").setStoredFields("name").execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat(getResult.isExists(), equalTo(true));
assertThat(getResult.getSourceAsBytes(), nullValue());
assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
}
logger.info("Get [type1/2] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Delete [type1/1]");
DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet();
assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(deleteResponse.getId(), equalTo("1"));
assertThat(deleteResponse.getType(), equalTo("type1"));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Index [type1/1]");
client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
logger.info("Index [type1/2]");
client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet();
logger.info("Flushing");
FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards));
assertThat(flushResult.getFailedShards(), equalTo(0));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] and [type1/2]");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
String ste1 = getResult.getSourceAsString();
String ste2 = source("2", "test2").string();
assertThat("cycle #" + i, ste1, equalTo(ste2));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Count");
// check count
for (int i = 0; i < 5; i++) {
// test successful
SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(termQuery("_type", "type1")).execute().actionGet();
assertNoFailures(countResponse);
assertThat(countResponse.getHits().getTotalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
// count with no query is a match all one
countResponse = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
assertThat(countResponse.getHits().getTotalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
}
}
use of org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class ShardInfoIT method testIndexAndDelete.
public void testIndexAndDelete() throws Exception {
prepareIndex(1);
IndexResponse indexResponse = client().prepareIndex("idx", "type").setSource("{}", XContentType.JSON).get();
assertShardInfo(indexResponse);
DeleteResponse deleteResponse = client().prepareDelete("idx", "type", indexResponse.getId()).get();
assertShardInfo(deleteResponse);
}
use of org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class IndexWithShadowReplicasIT method testPrimaryRelocationWithConcurrentIndexing.
public void testPrimaryRelocationWithConcurrentIndexing() throws Exception {
Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);
String node1 = internalCluster().startNode(nodeSettings);
final String IDX = "test";
Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()).put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build();
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
// Node1 has the primary, now node2 has the replica
String node2 = internalCluster().startNode(nodeSettings);
ensureGreen(IDX);
flushAndRefresh(IDX);
String node3 = internalCluster().startNode(nodeSettings);
final AtomicInteger counter = new AtomicInteger(0);
final CountDownLatch started = new CountDownLatch(1);
final int numPhase1Docs = scaledRandomIntBetween(25, 200);
final int numPhase2Docs = scaledRandomIntBetween(25, 200);
final CountDownLatch phase1finished = new CountDownLatch(1);
final CountDownLatch phase2finished = new CountDownLatch(1);
final CopyOnWriteArrayList<Exception> exceptions = new CopyOnWriteArrayList<>();
Thread thread = new Thread() {
@Override
public void run() {
started.countDown();
while (counter.get() < (numPhase1Docs + numPhase2Docs)) {
try {
final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
} catch (Exception e) {
exceptions.add(e);
}
final int docCount = counter.get();
if (docCount == numPhase1Docs) {
phase1finished.countDown();
}
}
logger.info("--> stopping indexing thread");
phase2finished.countDown();
}
};
thread.start();
started.await();
// wait for a certain number of documents to be indexed
phase1finished.await();
logger.info("--> excluding {} from allocation", node1);
// now prevent primary from being allocated on node 1 move to node_3
Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
// wait for more documents to be indexed post-recovery, also waits for
// indexing thread to stop
phase2finished.await();
ExceptionsHelper.rethrowAndSuppress(exceptions);
ensureGreen(IDX);
thread.join();
logger.info("--> performing query");
flushAndRefresh();
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
assertHitCount(resp, counter.get());
assertHitCount(resp, numPhase1Docs + numPhase2Docs);
}
use of org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class IndexWithShadowReplicasIT method testPrimaryRelocationWhereRecoveryFails.
public void testPrimaryRelocationWhereRecoveryFails() throws Exception {
Path dataPath = createTempDir();
Settings nodeSettings = Settings.builder().put("node.add_lock_id_to_custom_path", false).put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath).build();
String node1 = internalCluster().startNode(nodeSettings);
final String IDX = "test";
Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()).put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build();
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
// Node1 has the primary, now node2 has the replica
String node2 = internalCluster().startNode(nodeSettings);
ensureGreen(IDX);
flushAndRefresh(IDX);
String node3 = internalCluster().startNode(nodeSettings);
final AtomicInteger counter = new AtomicInteger(0);
final CountDownLatch started = new CountDownLatch(1);
final int numPhase1Docs = scaledRandomIntBetween(25, 200);
final int numPhase2Docs = scaledRandomIntBetween(25, 200);
final int numPhase3Docs = scaledRandomIntBetween(25, 200);
final CountDownLatch phase1finished = new CountDownLatch(1);
final CountDownLatch phase2finished = new CountDownLatch(1);
final CountDownLatch phase3finished = new CountDownLatch(1);
final AtomicBoolean keepFailing = new AtomicBoolean(true);
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node3), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (keepFailing.get() && action.equals(PeerRecoveryTargetService.Actions.TRANSLOG_OPS)) {
logger.info("--> failing translog ops");
throw new ElasticsearchException("failing on purpose");
}
super.sendRequest(connection, requestId, action, request, options);
}
});
Thread thread = new Thread() {
@Override
public void run() {
started.countDown();
while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) {
final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
final int docCount = counter.get();
if (docCount == numPhase1Docs) {
phase1finished.countDown();
} else if (docCount == (numPhase1Docs + numPhase2Docs)) {
phase2finished.countDown();
}
}
logger.info("--> stopping indexing thread");
phase3finished.countDown();
}
};
thread.start();
started.await();
// wait for a certain number of documents to be indexed
phase1finished.await();
logger.info("--> excluding {} from allocation", node1);
// now prevent primary from being allocated on node 1 move to node_3
Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
// wait for more documents to be indexed post-recovery, also waits for
// indexing thread to stop
phase2finished.await();
// stop failing
keepFailing.set(false);
// wait for more docs to be indexed
phase3finished.await();
ensureGreen(IDX);
thread.join();
logger.info("--> performing query");
flushAndRefresh();
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
assertHitCount(resp, counter.get());
}
Aggregations