use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class SyncedFlushService method performPreSyncedFlush.
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
LOGGER.trace("{} performing pre sync flush", request.shardId());
indexShard.flush(flushRequest);
final CommitStats commitStats = indexShard.commitStats();
final Engine.CommitId commitId = commitStats.getRawCommitId();
LOGGER.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs());
return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId());
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class InternalTestCluster method assertSameDocIdsOnShards.
/**
* Asserts that all shards with the same shardId should have document Ids.
*/
public void assertSameDocIdsOnShards() throws Exception {
assertBusy(() -> {
ClusterState state = client().admin().cluster().prepareState().get().getState();
for (ObjectObjectCursor<String, IndexRoutingTable> indexRoutingTable : state.routingTable().indicesRouting()) {
for (IntObjectCursor<IndexShardRoutingTable> indexShardRoutingTable : indexRoutingTable.value.shards()) {
ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard();
IndexShard primaryShard = getShardOrNull(state, primaryShardRouting);
if (primaryShard == null) {
continue;
}
final List<DocIdSeqNoAndSource> docsOnPrimary;
try {
docsOnPrimary = IndexShardTestCase.getDocIdAndSeqNos(primaryShard);
} catch (AlreadyClosedException ex) {
continue;
}
for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) {
IndexShard replicaShard = getShardOrNull(state, replicaShardRouting);
if (replicaShard == null) {
continue;
}
final List<DocIdSeqNoAndSource> docsOnReplica;
try {
docsOnReplica = IndexShardTestCase.getDocIdAndSeqNos(replicaShard);
} catch (AlreadyClosedException ex) {
continue;
}
assertThat("out of sync shards: primary=[" + primaryShardRouting + "] num_docs_on_primary=[" + docsOnPrimary.size() + "] vs replica=[" + replicaShardRouting + "] num_docs_on_replica=[" + docsOnReplica.size() + "]", docsOnReplica, equalTo(docsOnPrimary));
}
}
}
});
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class InternalTestCluster method assertSameSyncIdSameDocs.
private void assertSameSyncIdSameDocs() {
Map<String, Long> docsOnShards = new HashMap<>();
final Collection<NodeAndClient> nodesAndClients = nodes.values();
for (NodeAndClient nodeAndClient : nodesAndClients) {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
try {
CommitStats commitStats = indexShard.commitStats();
String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
if (syncId != null) {
long liveDocsOnShard = commitStats.getNumDocs();
if (docsOnShards.get(syncId) != null) {
assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
} else {
docsOnShards.put(syncId, liveDocsOnShard);
}
}
} catch (AlreadyClosedException e) {
// the engine is closed or if the shard is recovering
}
}
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class InternalTestCluster method assertSeqNos.
public void assertSeqNos() throws Exception {
assertBusy(() -> {
final ClusterState state = clusterService().state();
for (ObjectObjectCursor<String, IndexRoutingTable> indexRoutingTable : state.routingTable().indicesRouting()) {
for (IntObjectCursor<IndexShardRoutingTable> indexShardRoutingTable : indexRoutingTable.value.shards()) {
ShardRouting primaryShardRouting = indexShardRoutingTable.value.primaryShard();
final IndexShard primaryShard = getShardOrNull(state, primaryShardRouting);
if (primaryShard == null) {
// just ignore - shard movement
continue;
}
final SeqNoStats primarySeqNoStats;
final ObjectLongMap<String> syncGlobalCheckpoints;
try {
primarySeqNoStats = primaryShard.seqNoStats();
syncGlobalCheckpoints = primaryShard.getInSyncGlobalCheckpoints();
} catch (AlreadyClosedException ex) {
// shard is closed - just ignore
continue;
}
assertThat(primaryShardRouting + " should have set the global checkpoint", primarySeqNoStats.getGlobalCheckpoint(), not(equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)));
for (ShardRouting replicaShardRouting : indexShardRoutingTable.value.replicaShards()) {
final IndexShard replicaShard = getShardOrNull(state, replicaShardRouting);
if (replicaShard == null) {
// just ignore - shard movement
continue;
}
final SeqNoStats seqNoStats;
try {
seqNoStats = replicaShard.seqNoStats();
} catch (AlreadyClosedException e) {
// shard is closed - just ignore
continue;
}
assertThat(replicaShardRouting + " seq_no_stats mismatch", seqNoStats, equalTo(primarySeqNoStats));
// the local knowledge on the primary of the global checkpoint equals the global checkpoint on the shard
assertThat(replicaShardRouting + " global checkpoint syncs mismatch", seqNoStats.getGlobalCheckpoint(), equalTo(syncGlobalCheckpoints.get(replicaShardRouting.allocationId().getId())));
}
}
}
}, 60, TimeUnit.SECONDS);
}
use of org.elasticsearch.index.shard.IndexShard in project crate by crate.
the class IndexServiceTests method testRefreshActuallyWorks.
public void testRefreshActuallyWorks() throws Exception {
execute("create table test (x int, data text) clustered into 1 shards");
var indexService = getIndexService("test");
var indexName = indexService.index().getName();
ensureGreen(indexName);
IndexService.AsyncRefreshTask refreshTask = indexService.getRefreshTask();
assertEquals(1000, refreshTask.getInterval().millis());
assertTrue(indexService.getRefreshTask().mustReschedule());
IndexShard shard = indexService.getShard(0);
execute("insert into test (x, data) values (1, 'foo')");
// now disable the refresh
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)).get();
// when we update we reschedule the existing task AND fire off an async refresh to make sure we make everything visible
// before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible
assertTrue(refreshTask.isClosed());
refreshTask = indexService.getRefreshTask();
assertBusy(() -> {
// we are running on updateMetadata if the interval changes
try (Engine.Searcher searcher = shard.acquireSearcher(indexName)) {
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
assertEquals(1, search.totalHits.value);
}
});
assertFalse(refreshTask.isClosed());
// refresh every millisecond
execute("insert into test (x, data) values (2, 'foo')");
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")).get();
assertTrue(refreshTask.isClosed());
assertBusy(() -> {
// this one becomes visible due to the force refresh we are running on updateMetadata if the interval changes
try (Engine.Searcher searcher = shard.acquireSearcher(indexName)) {
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
assertEquals(2, search.totalHits.value);
}
});
execute("insert into test (x, data) values (3, 'foo')");
assertBusy(() -> {
// this one becomes visible due to the scheduled refresh
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
assertEquals(3, search.totalHits.value);
}
});
}
Aggregations