use of org.elasticsearch.action.admin.indices.refresh.RefreshResponse in project elasticsearch by elastic.
the class UpdateMappingIntegrationIT method testDynamicUpdates.
public void testDynamicUpdates() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)).execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
int recCount = randomIntBetween(200, 600);
int numberOfTypes = randomIntBetween(1, 5);
List<IndexRequestBuilder> indexRequests = new ArrayList<>();
for (int rec = 0; rec < recCount; rec++) {
String type = "type" + (rec % numberOfTypes);
String fieldName = "field_" + type + "_" + rec;
indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value"));
}
indexRandom(true, indexRequests);
logger.info("checking all the documents are there");
RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(refreshResponse.getFailedShards(), equalTo(0));
SearchResponse response = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat(response.getHits().getTotalHits(), equalTo((long) recCount));
logger.info("checking all the fields are in the mappings");
for (int rec = 0; rec < recCount; rec++) {
String type = "type" + (rec % numberOfTypes);
String fieldName = "field_" + type + "_" + rec;
assertConcreteMappingsOnAll("test", type, fieldName);
}
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshResponse in project elasticsearch by elastic.
the class RestRefreshAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index")));
refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions()));
return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener<RefreshResponse>(channel) {
@Override
public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
buildBroadcastShardsHeader(builder, request, response);
builder.endObject();
return new BytesRestResponse(response.getStatus(), builder);
}
});
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshResponse in project elasticsearch by elastic.
the class DocumentActionsIT method testIndexActions.
public void testIndexActions() throws Exception {
createIndex();
NumShards numShards = getNumShards(getConcreteIndexName());
logger.info("Running Cluster Health");
ensureGreen();
logger.info("Indexing [type1/1]");
IndexResponse indexResponse = client().prepareIndex().setIndex("test").setType("type1").setId("1").setSource(source("1", "test")).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(indexResponse.getId(), equalTo("1"));
assertThat(indexResponse.getType(), equalTo("type1"));
logger.info("Refreshing");
RefreshResponse refreshResponse = refresh();
assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("--> index exists?");
assertThat(indexExists(getConcreteIndexName()), equalTo(true));
logger.info("--> index exists?, fake index");
assertThat(indexExists("test1234565"), equalTo(false));
logger.info("Clearing cache");
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).queryCache(true)).actionGet();
assertNoFailures(clearIndicesCacheResponse);
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
logger.info("Force Merging");
waitForRelocation(ClusterHealthStatus.GREEN);
ForceMergeResponse mergeResponse = forceMerge();
assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
GetResponse getResult;
logger.info("Get [type1/1]");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "type1", "1").setOperationThreaded(false).execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
assertThat("cycle(map) #" + i, (String) getResult.getSourceAsMap().get("name"), equalTo("test"));
getResult = client().get(getRequest("test").type("type1").id("1").operationThreaded(true)).actionGet();
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Get [type1/1] with script");
for (int i = 0; i < 5; i++) {
getResult = client().prepareGet("test", "type1", "1").setStoredFields("name").execute().actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat(getResult.isExists(), equalTo(true));
assertThat(getResult.getSourceAsBytes(), nullValue());
assertThat(getResult.getField("name").getValues().get(0).toString(), equalTo("test"));
}
logger.info("Get [type1/2] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Delete [type1/1]");
DeleteResponse deleteResponse = client().prepareDelete("test", "type1", "1").execute().actionGet();
assertThat(deleteResponse.getIndex(), equalTo(getConcreteIndexName()));
assertThat(deleteResponse.getId(), equalTo("1"));
assertThat(deleteResponse.getType(), equalTo("type1"));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] (should be empty)");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
assertThat(getResult.isExists(), equalTo(false));
}
logger.info("Index [type1/1]");
client().index(indexRequest("test").type("type1").id("1").source(source("1", "test"))).actionGet();
logger.info("Index [type1/2]");
client().index(indexRequest("test").type("type1").id("2").source(source("2", "test2"))).actionGet();
logger.info("Flushing");
FlushResponse flushResult = client().admin().indices().prepareFlush("test").execute().actionGet();
assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards));
assertThat(flushResult.getFailedShards(), equalTo(0));
logger.info("Refreshing");
client().admin().indices().refresh(refreshRequest("test")).actionGet();
logger.info("Get [type1/1] and [type1/2]");
for (int i = 0; i < 5; i++) {
getResult = client().get(getRequest("test").type("type1").id("1")).actionGet();
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string()));
getResult = client().get(getRequest("test").type("type1").id("2")).actionGet();
String ste1 = getResult.getSourceAsString();
String ste2 = source("2", "test2").string();
assertThat("cycle #" + i, ste1, equalTo(ste2));
assertThat(getResult.getIndex(), equalTo(getConcreteIndexName()));
}
logger.info("Count");
// check count
for (int i = 0; i < 5; i++) {
// test successful
SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(termQuery("_type", "type1")).execute().actionGet();
assertNoFailures(countResponse);
assertThat(countResponse.getHits().getTotalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
// count with no query is a match all one
countResponse = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
assertThat(countResponse.getHits().getTotalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
}
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshResponse in project elasticsearch by elastic.
the class SearchWithRandomIOExceptionsIT method testRandomDirectoryIOExceptions.
public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject().string();
final double exceptionRate;
final double exceptionOnOpenRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 0.0d;
} else {
exceptionRate = 1.0 / between(5, 100);
exceptionOnOpenRate = 0.0d;
}
} else {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 1.0 / between(5, 100);
}
} else {
// rarely no exception
exceptionRate = 0d;
exceptionOnOpenRate = 0d;
}
final boolean createIndexWithoutErrors = randomBoolean();
int numInitialDocs = 0;
if (createIndexWithoutErrors) {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", numberOfReplicas());
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
numInitialDocs = between(10, 100);
ensureGreen();
for (int i = 0; i < numInitialDocs; i++) {
client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get();
}
client().admin().indices().prepareRefresh("test").execute().get();
client().admin().indices().prepareFlush("test").execute().get();
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate));
client().admin().indices().prepareOpen("test").execute().get();
} else {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", randomIntBetween(0, 1)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), // we cannot expect that the index will be valid
exceptionOnOpenRate);
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
}
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get();
final int numDocs;
final boolean expectAllShardsFailed;
if (clusterHealthResponse.isTimedOut()) {
/* some seeds just won't let you create the index at all and we enter a ping-pong mode
* trying one node after another etc. that is ok but we need to make sure we don't wait
* forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
* when we search below.*/
logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
numDocs = 1;
expectAllShardsFailed = true;
} else {
numDocs = between(10, 100);
expectAllShardsFailed = false;
}
int numCreated = 0;
boolean[] added = new boolean[numDocs];
for (int i = 0; i < numDocs; i++) {
added[i] = false;
try {
IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
numCreated++;
added[i] = true;
}
} catch (ElasticsearchException ex) {
}
}
ESIntegTestCase.NumShards numShards = getNumShards("test");
logger.info("Start Refresh");
// don't assert on failures here
final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
final int numSearches = scaledRandomIntBetween(10, 20);
// we don't check anything here really just making sure we don't leave any open files or a broken index behind.
for (int i = 0; i < numSearches; i++) {
try {
int docToQuery = between(0, numDocs - 1);
int expectedResults = added[docToQuery] ? 1 : 0;
logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults).get();
logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(expectedResults, searchResponse);
}
// check match all
searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get();
logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse);
}
} catch (SearchPhaseExecutionException ex) {
logger.info("SearchPhaseException: [{}]", ex.getMessage());
// if a scheduled refresh or flush fails all shards we see all shards failed here
if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) {
throw ex;
}
}
}
if (createIndexWithoutErrors) {
// check the index still contains the records that we indexed without errors
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0));
client().admin().indices().prepareOpen("test").execute().get();
ensureGreen();
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
assertNoFailures(searchResponse);
assertHitCount(searchResponse, numInitialDocs);
}
}
use of org.elasticsearch.action.admin.indices.refresh.RefreshResponse in project elasticsearch by elastic.
the class AbstractAsyncBulkByScrollAction method refreshAndFinish.
/**
* Start terminating a request that finished non-catastrophically by refreshing the modified indices and then proceeding to
* {@link #finishHim(Exception, List, List, boolean)}.
*/
void refreshAndFinish(List<Failure> indexingFailures, List<SearchFailure> searchFailures, boolean timedOut) {
if (task.isCancelled() || false == mainRequest.isRefresh() || destinationIndices.isEmpty()) {
finishHim(null, indexingFailures, searchFailures, timedOut);
return;
}
RefreshRequest refresh = new RefreshRequest();
refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()]));
client.admin().indices().refresh(refresh, new ActionListener<RefreshResponse>() {
@Override
public void onResponse(RefreshResponse response) {
finishHim(null, indexingFailures, searchFailures, timedOut);
}
@Override
public void onFailure(Exception e) {
finishHim(e);
}
});
}
Aggregations