use of org.elasticsearch.index.IndexService in project crate by crate.
the class TransportShardDeleteActionTest method prepare.
@Before
public void prepare() throws Exception {
IndicesService indicesService = mock(IndicesService.class);
IndexService indexService = mock(IndexService.class);
when(indicesService.indexServiceSafe(TABLE_IDENT.indexName())).thenReturn(indexService);
indexShard = mock(IndexShard.class);
when(indexService.shardSafe(0)).thenReturn(indexShard);
transportShardDeleteAction = new TransportShardDeleteAction(Settings.EMPTY, mock(TransportService.class), mock(MappingUpdatedAction.class), mock(IndexNameExpressionResolver.class), mock(ClusterService.class), indicesService, mock(ThreadPool.class), mock(ShardStateAction.class), mock(ActionFilters.class));
}
use of org.elasticsearch.index.IndexService in project crate by crate.
the class TransportShardUpsertActionTest method prepare.
@Before
public void prepare() throws Exception {
Functions functions = getFunctions();
bindGeneratedColumnTable(functions);
IndicesService indicesService = mock(IndicesService.class);
IndexService indexService = mock(IndexService.class);
when(indicesService.indexServiceSafe(TABLE_IDENT.indexName())).thenReturn(indexService);
when(indicesService.indexServiceSafe(PARTITION_INDEX)).thenReturn(indexService);
indexShard = mock(IndexShard.class);
when(indexService.shardSafe(0)).thenReturn(indexShard);
// Avoid null pointer exceptions
DocTableInfo tableInfo = mock(DocTableInfo.class);
Schemas schemas = mock(Schemas.class);
when(tableInfo.columns()).thenReturn(Collections.<Reference>emptyList());
when(schemas.getWritableTable(any(TableIdent.class))).thenReturn(tableInfo);
transportShardUpsertAction = new TestingTransportShardUpsertAction(Settings.EMPTY, mock(ThreadPool.class), mock(ClusterService.class), mock(TransportService.class), mock(ActionFilters.class), indicesService, mock(JobContextService.class), mock(ShardStateAction.class), functions, schemas, mock(MappingUpdatedAction.class), mock(IndexNameExpressionResolver.class));
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class SearchServiceTests method testSearchWhileIndexDeleted.
public void testSearchWhileIndexDeleted() throws IOException, InterruptedException {
createIndex("index");
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
SearchService service = getInstanceFromNode(SearchService.class);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
IndexShard indexShard = indexService.getShard(0);
AtomicBoolean running = new AtomicBoolean(true);
CountDownLatch startGun = new CountDownLatch(1);
Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
final Thread thread = new Thread() {
@Override
public void run() {
startGun.countDown();
while (running.get()) {
service.afterIndexRemoved(indexService.index(), indexService.getIndexSettings(), DELETED);
if (randomBoolean()) {
// context in a non-sane way.
try {
semaphore.acquire();
} catch (InterruptedException e) {
throw new AssertionError(e);
}
client().prepareIndex("index", "type").setSource("field", "value").setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())).execute(new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
semaphore.release();
}
@Override
public void onFailure(Exception e) {
semaphore.release();
}
});
}
}
}
};
thread.start();
startGun.await();
try {
final int rounds = scaledRandomIntBetween(100, 10000);
for (int i = 0; i < rounds; i++) {
try {
QuerySearchResultProvider querySearchResultProvider = service.executeQueryPhase(new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT, new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f), new SearchTask(123L, "", "", "", null));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
ShardFetchRequest req = new ShardFetchRequest(querySearchResultProvider.id(), intCursors, null);
service.executeFetchPhase(req, new SearchTask(123L, "", "", "", null));
} catch (AlreadyClosedException ex) {
throw ex;
} catch (IllegalStateException ex) {
assertEquals("search context is already closed can't increment refCount current count [0]", ex.getMessage());
} catch (SearchContextMissingException ex) {
// that's fine
}
}
} finally {
running.set(false);
thread.join();
semaphore.acquire(Integer.MAX_VALUE);
}
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class SearchServiceTests method testCloseSearchContextOnRewriteException.
public void testCloseSearchContextOnRewriteException() {
createIndex("index");
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
SearchService service = getInstanceFromNode(SearchService.class);
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
IndexShard indexShard = indexService.getShard(0);
final int activeContexts = service.getActiveContexts();
final int activeRefs = indexShard.store().refCount();
expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch("index").setQuery(new FailOnRewriteQueryBuilder()).get());
assertEquals(activeContexts, service.getActiveContexts());
assertEquals(activeRefs, indexShard.store().refCount());
}
use of org.elasticsearch.index.IndexService in project elasticsearch by elastic.
the class NestedAggregatorTests method testResetRootDocId.
public void testResetRootDocId() throws Exception {
Directory directory = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, iwc);
List<Document> documents = new ArrayList<>();
// 1 segment with, 1 root document, with 3 nested sub docs
Document document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
indexWriter.addDocuments(documents);
indexWriter.commit();
documents.clear();
// 1 segment with:
// 1 document, with 1 nested subdoc
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
indexWriter.addDocuments(documents);
documents.clear();
// and 1 document, with 1 nested subdoc
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.NESTED_FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "__nested_field", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
documents.add(document);
indexWriter.addDocuments(documents);
indexWriter.commit();
indexWriter.close();
IndexService indexService = createIndex("test");
DirectoryReader directoryReader = DirectoryReader.open(directory);
directoryReader = ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = new IndexSearcher(directoryReader);
indexService.mapperService().merge("test", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("test", "nested_field", "type=nested").string()), MapperService.MergeReason.MAPPING_UPDATE, false);
SearchContext context = createSearchContext(indexService);
AggregatorFactories.Builder builder = AggregatorFactories.builder();
NestedAggregationBuilder factory = new NestedAggregationBuilder("test", "nested_field");
builder.addAggregator(factory);
AggregatorFactories factories = builder.build(context, null);
context.aggregations(new SearchContextAggregations(factories));
Aggregator[] aggs = factories.createTopLevelAggregators();
BucketCollector collector = BucketCollector.wrap(Arrays.asList(aggs));
collector.preCollection();
// A regular search always exclude nested docs, so we use NonNestedDocsFilter.INSTANCE here (otherwise MatchAllDocsQuery would be sufficient)
// We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because
// root doc type#3 and root doc type#1 have the same segment docid
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(Queries.newNonNestedFilter(), Occur.MUST);
bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT);
searcher.search(new ConstantScoreQuery(bq.build()), collector);
collector.postCollection();
Nested nested = (Nested) aggs[0].buildAggregation(0);
// The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first segment are emitted as hits.
assertThat(nested.getDocCount(), equalTo(4L));
directoryReader.close();
directory.close();
}
Aggregations