use of org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class RestFlushAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
flushRequest.force(request.paramAsBoolean("force", flushRequest.force()));
flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing()));
return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener<FlushResponse>(channel) {
@Override
public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
buildBroadcastShardsHeader(builder, request, response);
builder.endObject();
return new BytesRestResponse(OK, builder);
}
});
}
use of org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class IndexShardTests method testSearcherWrapperWorksWithGlobalOrdinals.
public void testSearcherWrapperWorksWithGlobalOrdinals() throws IOException {
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
return new FieldMaskingReader("foo", reader);
}
@Override
public IndexSearcher wrap(IndexSearcher searcher) throws EngineException {
return searcher;
}
};
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build();
IndexMetaData metaData = IndexMetaData.builder("test").putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\", \"fielddata\": true }}}").settings(settings).primaryTerm(0, 1).build();
IndexShard shard = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, wrapper);
recoveryShardFromStore(shard);
indexDoc(shard, "test", "0", "{\"foo\" : \"bar\"}");
shard.refresh("created segment 1");
indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}");
shard.refresh("created segment 2");
// test global ordinals are evicted
MappedFieldType foo = shard.mapperService().fullName("foo");
IndexFieldData.Global ifd = shard.indexFieldDataService().getForField(foo);
FieldDataStats before = shard.fieldData().stats("foo");
assertThat(before.getMemorySizeInBytes(), equalTo(0L));
FieldDataStats after = null;
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
assertThat("we have to have more than one segment", searcher.getDirectoryReader().leaves().size(), greaterThan(1));
ifd.loadGlobal(searcher.getDirectoryReader());
after = shard.fieldData().stats("foo");
assertEquals(after.getEvictions(), before.getEvictions());
// If a field doesn't exist an empty IndexFieldData is returned and that isn't cached:
assertThat(after.getMemorySizeInBytes(), equalTo(0L));
}
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), after.getMemorySizeInBytes());
shard.flush(new FlushRequest().force(true).waitIfOngoing(true));
shard.refresh("test");
assertEquals(shard.fieldData().stats("foo").getMemorySizeInBytes(), before.getMemorySizeInBytes());
assertEquals(shard.fieldData().stats("foo").getEvictions(), before.getEvictions());
closeShards(shard);
}
use of org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class IndexShardTests method testDocStats.
public void testDocStats() throws IOException {
IndexShard indexShard = null;
try {
indexShard = newStartedShard();
// at least two documents so we have docs to delete
final long numDocs = randomIntBetween(2, 32);
// Delete at least numDocs/10 documents otherwise the number of deleted docs will be below 10%
// and forceMerge will refuse to expunge deletes
final long numDocsToDelete = randomIntBetween((int) Math.ceil(Math.nextUp(numDocs / 10.0)), Math.toIntExact(numDocs));
for (int i = 0; i < numDocs; i++) {
final String id = Integer.toString(i);
final ParsedDocument doc = testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null);
final Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
final Engine.IndexResult result = indexShard.index(index);
assertThat(result.getVersion(), equalTo(1L));
}
indexShard.refresh("test");
{
final DocsStats docsStats = indexShard.docStats();
assertThat(docsStats.getCount(), equalTo(numDocs));
assertThat(docsStats.getDeleted(), equalTo(0L));
}
final List<Integer> ids = randomSubsetOf(Math.toIntExact(numDocsToDelete), IntStream.range(0, Math.toIntExact(numDocs)).boxed().collect(Collectors.toList()));
for (final Integer i : ids) {
final String id = Integer.toString(i);
final ParsedDocument doc = testParsedDocument(id, "test", null, new ParseContext.Document(), new BytesArray("{}"), null);
final Engine.Index index = new Engine.Index(new Term("_uid", doc.uid()), doc, SequenceNumbersService.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false);
final Engine.IndexResult result = indexShard.index(index);
assertThat(result.getVersion(), equalTo(2L));
}
// flush the buffered deletes
final FlushRequest flushRequest = new FlushRequest();
flushRequest.force(false);
flushRequest.waitIfOngoing(false);
indexShard.flush(flushRequest);
indexShard.refresh("test");
{
final DocsStats docStats = indexShard.docStats();
assertThat(docStats.getCount(), equalTo(numDocs));
// Lucene will delete a segment if all docs are deleted from it; this means that we lose the deletes when deleting all docs
assertThat(docStats.getDeleted(), equalTo(numDocsToDelete == numDocs ? 0 : numDocsToDelete));
}
// merge them away
final ForceMergeRequest forceMergeRequest = new ForceMergeRequest();
forceMergeRequest.onlyExpungeDeletes(randomBoolean());
forceMergeRequest.maxNumSegments(1);
indexShard.forceMerge(forceMergeRequest);
indexShard.refresh("test");
{
final DocsStats docStats = indexShard.docStats();
assertThat(docStats.getCount(), equalTo(numDocs));
assertThat(docStats.getDeleted(), equalTo(0L));
}
} finally {
closeShards(indexShard);
}
}
use of org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class IndicesRequestIT method testFlush.
public void testFlush() {
String[] indexShardActions = new String[] { TransportShardFlushAction.NAME, TransportShardFlushAction.NAME + "[r]", TransportShardFlushAction.NAME + "[p]" };
interceptTransportActions(indexShardActions);
FlushRequest flushRequest = new FlushRequest(randomIndicesOrAliases());
internalCluster().coordOnlyNodeClient().admin().indices().flush(flushRequest).actionGet();
clearInterceptedActions();
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), flushRequest);
assertIndicesSubset(Arrays.asList(indices), indexShardActions);
}
use of org.elasticsearch.action.admin.indices.flush.FlushRequest in project elasticsearch by elastic.
the class RecoveryDuringReplicationTests method testRecoveryOfDisconnectedReplica.
public void testRecoveryOfDisconnectedReplica() throws Exception {
try (ReplicationGroup shards = createGroup(1)) {
shards.startAll();
int docs = shards.indexDocs(randomInt(50));
shards.flush();
shards.getPrimary().updateGlobalCheckpointOnPrimary();
final IndexShard originalReplica = shards.getReplicas().get(0);
long replicaCommittedLocalCheckpoint = docs - 1;
boolean replicaHasDocsSinceLastFlushedCheckpoint = false;
for (int i = 0; i < randomInt(2); i++) {
final int indexedDocs = shards.indexDocs(randomInt(5));
docs += indexedDocs;
if (indexedDocs > 0) {
replicaHasDocsSinceLastFlushedCheckpoint = true;
}
final boolean flush = randomBoolean();
if (flush) {
originalReplica.flush(new FlushRequest());
replicaHasDocsSinceLastFlushedCheckpoint = false;
replicaCommittedLocalCheckpoint = docs - 1;
}
final boolean sync = randomBoolean();
if (sync) {
shards.getPrimary().updateGlobalCheckpointOnPrimary();
}
}
shards.removeReplica(originalReplica);
final int missingOnReplica = shards.indexDocs(randomInt(5));
docs += missingOnReplica;
replicaHasDocsSinceLastFlushedCheckpoint |= missingOnReplica > 0;
if (randomBoolean()) {
shards.getPrimary().updateGlobalCheckpointOnPrimary();
}
final boolean flushPrimary = randomBoolean();
if (flushPrimary) {
shards.flush();
}
originalReplica.close("disconnected", false);
IOUtils.close(originalReplica.store());
final IndexShard recoveredReplica = shards.addReplicaWithExistingPath(originalReplica.shardPath(), originalReplica.routingEntry().currentNodeId());
shards.recoverReplica(recoveredReplica);
if (flushPrimary && replicaHasDocsSinceLastFlushedCheckpoint) {
// replica has something to catch up with, but since we flushed the primary, we should fall back to full recovery
assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), not(empty()));
} else {
assertThat(recoveredReplica.recoveryState().getIndex().fileDetails(), empty());
assertThat(recoveredReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(Math.toIntExact(docs - (replicaCommittedLocalCheckpoint + 1))));
}
docs += shards.indexDocs(randomInt(5));
shards.assertAllEqual(docs);
}
}
Aggregations