use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class IndexWithShadowReplicasIT method testIndexWithFewDocuments.
@TestLogging("org.elasticsearch.gateway:TRACE")
public void testIndexWithFewDocuments() throws Exception {
final Path dataPath = createTempDir();
Settings nodeSettings = nodeSettings(dataPath);
internalCluster().startNodes(3, nodeSettings);
final String IDX = "test";
Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()).put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build();
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
ensureGreen(IDX);
// So basically, the primary should fail and the replica will need to
// replay the translog, this is what this tests
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get();
assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations());
assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations());
Index index = resolveIndex(IDX);
for (IndicesService service : internalCluster().getInstances(IndicesService.class)) {
IndexService indexService = service.indexService(index);
if (indexService != null) {
IndexShard shard = indexService.getShard(0);
TranslogStats translogStats = shard.translogStats();
assertTrue(translogStats != null || shard instanceof ShadowIndexShard);
if (translogStats != null) {
assertEquals(2, translogStats.estimatedNumberOfOperations());
}
}
}
// Check that we can get doc 1 and 2, because we are doing realtime
// gets and getting from the primary
GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
flushAndRefresh(IDX);
client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
refresh();
// Check that we can get doc 1 and 2 without realtime
gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get();
gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get();
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
logger.info("--> restarting all nodes");
if (randomBoolean()) {
logger.info("--> rolling restart");
internalCluster().rollingRestart();
} else {
logger.info("--> full restart");
internalCluster().fullRestart();
}
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
ensureGreen(IDX);
flushAndRefresh(IDX);
logger.info("--> performing query");
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
assertHitCount(resp, 4);
logger.info("--> deleting index");
assertAcked(client().admin().indices().prepareDelete(IDX));
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class TextFieldMapperTests method testDefaultPositionIncrementGap.
public void testDefaultPositionIncrementGap() throws IOException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("field").field("type", "text").endObject().endObject().endObject().endObject().string();
DocumentMapper mapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
assertEquals(mapping, mapper.mappingSource().toString());
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject().array("field", new String[] { "a", "b" }).endObject().bytes());
IndexableField[] fields = doc.rootDoc().getFields("field");
assertEquals(2, fields.length);
assertEquals("a", fields[0].stringValue());
assertEquals("b", fields[1].stringValue());
IndexShard shard = indexService.getShard(0);
shard.index(new Engine.Index(new Term("_uid", doc.uid()), doc));
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
LeafReader leaf = searcher.getDirectoryReader().leaves().get(0).reader();
TermsEnum terms = leaf.terms("field").iterator();
assertTrue(terms.seekExact(new BytesRef("b")));
PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS);
assertEquals(0, postings.nextDoc());
assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 1, postings.nextPosition());
}
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class IndexLevelReplicationTests method testAppendWhileRecovering.
public void testAppendWhileRecovering() throws Exception {
try (ReplicationGroup shards = createGroup(0)) {
shards.startAll();
IndexShard replica = shards.addReplica();
CountDownLatch latch = new CountDownLatch(2);
int numDocs = randomIntBetween(100, 200);
// just append one to the translog so we can assert below
shards.appendDocs(1);
Thread thread = new Thread() {
@Override
public void run() {
try {
latch.countDown();
latch.await();
shards.appendDocs(numDocs - 1);
} catch (Exception e) {
throw new AssertionError(e);
}
}
};
thread.start();
Future<Void> future = shards.asyncRecoverReplica(replica, (indexShard, node) -> new RecoveryTarget(indexShard, node, recoveryListener, version -> {
}) {
@Override
public void cleanFiles(int totalTranslogOps, Store.MetadataSnapshot sourceMetaData) throws IOException {
super.cleanFiles(totalTranslogOps, sourceMetaData);
latch.countDown();
try {
latch.await();
} catch (InterruptedException e) {
throw new AssertionError(e);
}
}
});
future.get();
thread.join();
shards.assertAllEqual(numDocs);
Engine engine = IndexShardTests.getEngineFromShard(replica);
assertEquals("expected at no version lookups ", InternalEngineTests.getNumVersionLookups((InternalEngine) engine), 0);
for (IndexShard shard : shards) {
engine = IndexShardTests.getEngineFromShard(shard);
assertEquals(0, InternalEngineTests.getNumIndexVersionsLookups((InternalEngine) engine));
assertEquals(0, InternalEngineTests.getNumVersionLookups((InternalEngine) engine));
}
}
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class IndexLevelReplicationTests method testInheritMaxValidAutoIDTimestampOnRecovery.
public void testInheritMaxValidAutoIDTimestampOnRecovery() throws Exception {
try (ReplicationGroup shards = createGroup(0)) {
shards.startAll();
final IndexRequest indexRequest = new IndexRequest(index.getName(), "type").source("{}", XContentType.JSON);
// force an update of the timestamp
indexRequest.onRetry();
final IndexResponse response = shards.index(indexRequest);
assertEquals(DocWriteResponse.Result.CREATED, response.getResult());
if (randomBoolean()) {
// lets check if that also happens if no translog record is replicated
shards.flush();
}
IndexShard replica = shards.addReplica();
shards.recoverReplica(replica);
SegmentsStats segmentsStats = replica.segmentStats(false);
SegmentsStats primarySegmentStats = shards.getPrimary().segmentStats(false);
assertNotEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, primarySegmentStats.getMaxUnsafeAutoIdTimestamp());
assertEquals(primarySegmentStats.getMaxUnsafeAutoIdTimestamp(), segmentsStats.getMaxUnsafeAutoIdTimestamp());
assertNotEquals(Long.MAX_VALUE, segmentsStats.getMaxUnsafeAutoIdTimestamp());
}
}
use of org.elasticsearch.index.shard.IndexShard in project elasticsearch by elastic.
the class IndexLevelReplicationTests method testCheckpointsAdvance.
public void testCheckpointsAdvance() throws Exception {
try (ReplicationGroup shards = createGroup(randomInt(3))) {
shards.startPrimary();
int numDocs = 0;
int startedShards;
do {
numDocs += shards.indexDocs(randomInt(20));
startedShards = shards.startReplicas(randomIntBetween(1, 2));
} while (startedShards > 0);
if (numDocs == 0 || randomBoolean()) {
// in the case we have no indexing, we simulate the background global checkpoint sync
shards.getPrimary().updateGlobalCheckpointOnPrimary();
}
for (IndexShard shard : shards) {
final SeqNoStats shardStats = shard.seqNoStats();
final ShardRouting shardRouting = shard.routingEntry();
logger.debug("seq_no stats for {}: {}", shardRouting, XContentHelper.toString(shardStats, new ToXContent.MapParams(Collections.singletonMap("pretty", "false"))));
assertThat(shardRouting + " local checkpoint mismatch", shardStats.getLocalCheckpoint(), equalTo(numDocs - 1L));
assertThat(shardRouting + " global checkpoint mismatch", shardStats.getGlobalCheckpoint(), equalTo(numDocs - 1L));
assertThat(shardRouting + " max seq no mismatch", shardStats.getMaxSeqNo(), equalTo(numDocs - 1L));
}
}
}
Aggregations