use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class SearchServiceTests method testSearchWhileIndexDeleted.
public void testSearchWhileIndexDeleted() throws IOException, InterruptedException {
createIndex("index");
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
SearchService service = getInstanceFromNode(SearchService.class);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
IndexShard indexShard = indexService.getShard(0);
AtomicBoolean running = new AtomicBoolean(true);
CountDownLatch startGun = new CountDownLatch(1);
Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
final Thread thread = new Thread() {
@Override
public void run() {
startGun.countDown();
while (running.get()) {
service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED);
if (randomBoolean()) {
// context in a non-sane way.
try {
semaphore.acquire();
} catch (InterruptedException e) {
throw new AssertionError(e);
}
client().prepareIndex("index", "type").setSource("field", "value").setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
semaphore.release();
}
@Override
public void onFailure(Exception e) {
semaphore.release();
}
});
}
}
}
};
thread.start();
startGun.await();
try {
final int rounds = scaledRandomIntBetween(100, 10000);
for (int i = 0; i < rounds; i++) {
try {
QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), new SearchTask(123L, "", "", "", null));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null);
service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null));
} catch (AlreadyClosedException ex) {
throw ex;
} catch (IllegalStateException ex) {
assertEquals("search context is already closed can't increment refCount current count [0]", ex.getMessage());
} catch (SearchContextMissingException ex) {
// that's fine
}
}
} finally {
running.set(false);
thread.join();
semaphore.acquire(Integer.MAX_VALUE);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project elasticsearch by elastic.
the class SearchWithRandomIOExceptionsIT method testRandomDirectoryIOExceptions.
public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject().string();
final double exceptionRate;
final double exceptionOnOpenRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 0.0d;
} else {
exceptionRate = 1.0 / between(5, 100);
exceptionOnOpenRate = 0.0d;
}
} else {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 1.0 / between(5, 100);
}
} else {
// rarely no exception
exceptionRate = 0d;
exceptionOnOpenRate = 0d;
}
final boolean createIndexWithoutErrors = randomBoolean();
int numInitialDocs = 0;
if (createIndexWithoutErrors) {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", numberOfReplicas());
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
numInitialDocs = between(10, 100);
ensureGreen();
for (int i = 0; i < numInitialDocs; i++) {
client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get();
}
client().admin().indices().prepareRefresh("test").execute().get();
client().admin().indices().prepareFlush("test").execute().get();
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate));
client().admin().indices().prepareOpen("test").execute().get();
} else {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", randomIntBetween(0, 1)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), // we cannot expect that the index will be valid
exceptionOnOpenRate);
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
}
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get();
final int numDocs;
final boolean expectAllShardsFailed;
if (clusterHealthResponse.isTimedOut()) {
/* some seeds just won't let you create the index at all and we enter a ping-pong mode
* trying one node after another etc. that is ok but we need to make sure we don't wait
* forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
* when we search below.*/
logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
numDocs = 1;
expectAllShardsFailed = true;
} else {
numDocs = between(10, 100);
expectAllShardsFailed = false;
}
int numCreated = 0;
boolean[] added = new boolean[numDocs];
for (int i = 0; i < numDocs; i++) {
added[i] = false;
try {
IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
numCreated++;
added[i] = true;
}
} catch (ElasticsearchException ex) {
}
}
ESIntegTestCase.NumShards numShards = getNumShards("test");
logger.info("Start Refresh");
// don't assert on failures here
final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
final int numSearches = scaledRandomIntBetween(10, 20);
// we don't check anything here really just making sure we don't leave any open files or a broken index behind.
for (int i = 0; i < numSearches; i++) {
try {
int docToQuery = between(0, numDocs - 1);
int expectedResults = added[docToQuery] ? 1 : 0;
logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults).get();
logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(expectedResults, searchResponse);
}
// check match all
searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get();
logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse);
}
} catch (SearchPhaseExecutionException ex) {
logger.info("SearchPhaseException: [{}]", ex.getMessage());
// if a scheduled refresh or flush fails all shards we see all shards failed here
if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) {
throw ex;
}
}
}
if (createIndexWithoutErrors) {
// check the index still contains the records that we indexed without errors
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0));
client().admin().indices().prepareOpen("test").execute().get();
ensureGreen();
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
assertNoFailures(searchResponse);
assertHitCount(searchResponse, numInitialDocs);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project graylog2-server by Graylog2.
the class CountsTest method totalReturnsZeroWithNoIndices.
@Test
public void totalReturnsZeroWithNoIndices() throws Exception {
for (int i = 0; i < 10; i++) {
final IndexResponse indexResponse = client.prepareIndex().setIndex(INDEX_NAME_1).setRefresh(true).setType("test").setSource("foo", "bar", "counter", i).execute().get();
assumeTrue(indexResponse.isCreated());
}
// Simulate no indices for the second index set.
when(indexSet2.getManagedIndices()).thenReturn(new String[0]);
assertThat(counts.total(indexSet1)).isEqualTo(10L);
assertThat(counts.total(indexSet2)).isEqualTo(0L);
// Simulate no indices for all index sets.
when(indexSetRegistry.getManagedIndices()).thenReturn(new String[0]);
assertThat(counts.total()).isEqualTo(0L);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project fess by codelibs.
the class FessEsClient method store.
public boolean store(final String index, final String type, final Object obj) {
final FessConfig fessConfig = ComponentUtil.getFessConfig();
@SuppressWarnings("unchecked") final Map<String, Object> source = obj instanceof Map ? (Map<String, Object>) obj : BeanUtil.copyBeanToNewMap(obj);
final String id = (String) source.remove(fessConfig.getIndexFieldId());
final Long version = (Long) source.remove(fessConfig.getIndexFieldVersion());
IndexResponse response;
try {
if (id == null) {
// create
response = client.prepareIndex(index, type).setSource(new DocMap(source)).setRefreshPolicy(RefreshPolicy.IMMEDIATE).setOpType(OpType.CREATE).execute().actionGet(fessConfig.getIndexIndexTimeout());
} else {
// create or update
final IndexRequestBuilder builder = client.prepareIndex(index, type, id).setSource(new DocMap(source)).setRefreshPolicy(RefreshPolicy.IMMEDIATE).setOpType(OpType.INDEX);
if (version != null && version.longValue() > 0) {
builder.setVersion(version);
}
response = builder.execute().actionGet(fessConfig.getIndexIndexTimeout());
}
final Result result = response.getResult();
return result == Result.CREATED || result == Result.UPDATED;
} catch (final ElasticsearchException e) {
throw new FessEsClientException("Failed to store: " + obj, e);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.index.IndexResponse in project metron by apache.
the class ElasticsearchDao method update.
@Override
public void update(Document update, Optional<String> index) throws IOException {
String indexPostfix = ElasticsearchUtils.getIndexFormat(accessConfig.getGlobalConfigSupplier().get()).format(new Date());
String sensorType = update.getSensorType();
String indexName = getIndexName(update, index, indexPostfix);
IndexRequest indexRequest = buildIndexRequest(update, sensorType, indexName);
try {
IndexResponse response = client.index(indexRequest).get();
ShardInfo shardInfo = response.getShardInfo();
int failed = shardInfo.getFailed();
if (failed > 0) {
throw new IOException("ElasticsearchDao index failed: " + Arrays.toString(shardInfo.getFailures()));
}
} catch (Exception e) {
throw new IOException(e.getMessage(), e);
}
}
Aggregations