use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testFailAfterIntermediateCommit.
public void testFailAfterIntermediateCommit() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
if (randomBoolean()) {
client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("test").setForce(true).get();
String syncId = UUIDs.base64UUID();
final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class SyncedFlushSingleNodeTests method testFailWhenCommitIsMissing.
public void testFailWhenCommitIsMissing() throws InterruptedException {
createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}", XContentType.JSON).get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
assertEquals("exactly one active shard", 1, activeShards.size());
Map<String, Engine.CommitId> commitIds = SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
assertEquals("exactly one commit id", 1, commitIds.size());
// wipe it...
commitIds.clear();
String syncId = UUIDs.base64UUID();
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener();
flushService.sendSyncRequests(syncId, activeShards, state, commitIds, shardId, shardRoutingTable.size(), listener);
listener.latch.await();
assertNull(listener.error);
ShardsSyncedFlushResult syncedFlushResult = listener.result;
assertNotNull(syncedFlushResult);
assertEquals(0, syncedFlushResult.successfulShards());
assertEquals(1, syncedFlushResult.totalShards());
assertEquals(syncId, syncedFlushResult.syncId());
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class TransportClusterStatsAction method nodeOperation.
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(true, true, false, true, false, true, false, true, false, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, true, true, true, false, true, false, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) {
for (IndexShard indexShard : indexService) {
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
// only report on fully started shards
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats(), indexShard.seqNoStats()));
}
}
}
ClusterHealthStatus clusterStatus = null;
if (clusterService.state().nodes().isLocalNodeElectedMaster()) {
clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus();
}
return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]));
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class TransportAnalyzeAction method shardOperation.
@Override
protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) {
try {
final IndexService indexService;
if (shardId != null) {
indexService = indicesService.indexServiceSafe(shardId.getIndex());
} else {
indexService = null;
}
String field = null;
Analyzer analyzer = null;
if (request.field() != null) {
if (indexService == null) {
throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter");
}
MappedFieldType fieldType = indexService.mapperService().fullName(request.field());
if (fieldType != null) {
if (fieldType.tokenized()) {
analyzer = fieldType.indexAnalyzer();
} else if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
analyzer = ((KeywordFieldMapper.KeywordFieldType) fieldType).normalizer();
if (analyzer == null) {
// this will be KeywordAnalyzer
analyzer = fieldType.indexAnalyzer();
}
} else {
throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are only supported on tokenized fields");
}
field = fieldType.name();
}
}
if (field == null) {
if (indexService != null) {
field = indexService.getIndexSettings().getDefaultField();
} else {
field = AllFieldMapper.NAME;
}
}
final AnalysisRegistry analysisRegistry = indicesService.getAnalysis();
return analyze(request, field, analyzer, indexService != null ? indexService.getIndexAnalyzers() : null, analysisRegistry, environment);
} catch (IOException e) {
throw new ElasticsearchException("analysis failed", e);
}
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class MetaDataMappingServiceTests method testMappingClusterStateUpdateDoesntChangeExistingIndices.
public void testMappingClusterStateUpdateDoesntChangeExistingIndices() throws Exception {
final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test").addMapping("type"));
final CompressedXContent currentMapping = indexService.mapperService().documentMapper("type").mappingSource();
final MetaDataMappingService mappingService = getInstanceFromNode(MetaDataMappingService.class);
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
// TODO - it will be nice to get a random mapping generator
final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest().type("type");
request.source("{ \"properties\" { \"field\": { \"type\": \"string\" }}}");
mappingService.putMappingExecutor.execute(clusterService.state(), Collections.singletonList(request));
assertThat(indexService.mapperService().documentMapper("type").mappingSource(), equalTo(currentMapping));
}
Aggregations