use of org.elasticsearch.index.IndexService in project crate by crate.
the class DocLevelExpressionsTest method prepare.
@Before
public void prepare() throws Exception {
Settings settings = Settings.builder().put("index.fielddata.cache", "none").build();
IndexService indexService = createIndex("test", settings);
ifd = indexService.fieldData();
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
insertValues(writer);
DirectoryReader directoryReader = DirectoryReader.open(writer, true);
readerContext = directoryReader.leaves().get(0);
ctx = new CollectorContext(ifd, null);
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class InternalTestCluster method assertSameSyncIdSameDocs.
private void assertSameSyncIdSameDocs() {
Map<String, Long> docsOnShards = new HashMap<>();
final Collection<NodeAndClient> nodesAndClients = nodes.values();
for (NodeAndClient nodeAndClient : nodesAndClients) {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
CommitStats commitStats = indexShard.commitStats();
if (commitStats != null) {
// null if the engine is closed or if the shard is recovering
String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
if (syncId != null) {
long liveDocsOnShard = commitStats.getNumDocs();
if (docsOnShards.get(syncId) != null) {
assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
} else {
docsOnShards.put(syncId, liveDocsOnShard);
}
}
}
}
}
}
}
use of org.elasticsearch.index.IndexService in project crate by crate.
the class ArrayMapperTest method mapper.
/**
* create index with type and mapping and validate DocumentMapper serialization
*/
private DocumentMapper mapper(String indexName, String type, String mapping) throws IOException {
// we serialize and deserialize the mapping to make sure serialization works just fine
client().admin().indices().prepareCreate(indexName).addMapping(type, mapping).setSettings(Settings.builder().put("number_of_replicas", 0).build()).execute().actionGet();
client().admin().cluster().prepareHealth(indexName).setWaitForGreenStatus().setWaitForRelocatingShards(0).setWaitForEvents(Priority.LANGUID).execute().actionGet();
IndicesService instanceFromNode = internalCluster().getInstance(IndicesService.class);
IndexService indexService = instanceFromNode.indexServiceSafe(indexName);
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
DocumentMapper defaultMapper = parser.parse(type, new CompressedXContent(mapping));
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.startObject();
defaultMapper.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
String rebuildMapping = builder.string();
return parser.parse(type, new CompressedXContent(rebuildMapping));
}
use of org.elasticsearch.index.IndexService in project crate by crate.
the class TransportShardUpsertAction method processRequestItemsOnReplica.
@Override
protected void processRequestItemsOnReplica(ShardId shardId, ShardUpsertRequest request) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id());
for (ShardUpsertRequest.Item item : request.items()) {
if (item.source() == null) {
if (logger.isTraceEnabled()) {
logger.trace("[{} (R)] Document with id {}, has no source, primary operation must have failed", indexShard.shardId(), item.id());
}
continue;
}
shardIndexOperationOnReplica(request, item, indexShard);
}
}
use of org.elasticsearch.index.IndexService in project crate by crate.
the class LuceneShardCollectorProvider method getOrderedCollector.
@Override
public OrderedDocCollector getOrderedCollector(RoutedCollectPhase phase, SharedShardContext sharedShardContext, JobCollectContext jobCollectContext, boolean requiresRepeat) {
RoutedCollectPhase collectPhase = phase.normalize(shardNormalizer, null);
CollectorContext collectorContext;
InputFactory.Context<? extends LuceneCollectorExpression<?>> ctx;
Engine.Searcher searcher = null;
LuceneQueryBuilder.Context queryContext;
try {
searcher = sharedShardContext.acquireSearcher();
IndexService indexService = sharedShardContext.indexService();
queryContext = luceneQueryBuilder.convert(collectPhase.whereClause(), indexService.mapperService(), indexService.fieldData(), indexService.cache());
jobCollectContext.addSearcher(sharedShardContext.readerId(), searcher);
ctx = docInputFactory.extractImplementations(collectPhase);
collectorContext = getCollectorContext(sharedShardContext.readerId(), ctx);
} catch (Throwable t) {
if (searcher != null) {
searcher.close();
}
throw t;
}
int batchSize = collectPhase.shardQueueSize(localNodeId);
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("[{}][{}] creating LuceneOrderedDocCollector. Expected number of rows to be collected: {}", sharedShardContext.indexShard().routingEntry().currentNodeId(), sharedShardContext.indexShard().shardId(), batchSize);
}
return new LuceneOrderedDocCollector(indexShard.shardId(), searcher.searcher(), queryContext.query(), queryContext.minScore(), Symbols.containsColumn(collectPhase.toCollect(), DocSysColumns.SCORE), batchSize, fieldTypeLookup, collectorContext, collectPhase.orderBy(), LuceneSortGenerator.generateLuceneSort(collectorContext, collectPhase.orderBy(), docInputFactory, fieldTypeLookup), ctx.topLevelInputs(), ctx.expressions());
}
Aggregations