use of com.intellij.util.text.StringSearcher in project intellij-community by JetBrains.
the class PsiSearchHelperImpl method distributePrimitives.
private static void distributePrimitives(@NotNull Map<SearchRequestCollector, Processor<PsiReference>> collectors, @NotNull Set<RequestWithProcessor> locals, @NotNull MultiMap<Set<IdIndexEntry>, RequestWithProcessor> globals, @NotNull List<Computable<Boolean>> customs, @NotNull Map<RequestWithProcessor, Processor<PsiElement>> localProcessors, @NotNull ProgressIndicator progress) {
for (final Map.Entry<SearchRequestCollector, Processor<PsiReference>> entry : collectors.entrySet()) {
final Processor<PsiReference> processor = entry.getValue();
SearchRequestCollector collector = entry.getKey();
for (final PsiSearchRequest primitive : collector.takeSearchRequests()) {
final SearchScope scope = primitive.searchScope;
if (scope instanceof LocalSearchScope) {
registerRequest(locals, primitive, processor);
} else {
Set<IdIndexEntry> key = new HashSet<>(getWordEntries(primitive.word, primitive.caseSensitive));
registerRequest(globals.getModifiable(key), primitive, processor);
}
}
for (final Processor<Processor<PsiReference>> customAction : collector.takeCustomSearchActions()) {
customs.add(() -> customAction.process(processor));
}
}
for (Map.Entry<Set<IdIndexEntry>, Collection<RequestWithProcessor>> entry : globals.entrySet()) {
for (RequestWithProcessor singleRequest : entry.getValue()) {
PsiSearchRequest primitive = singleRequest.request;
StringSearcher searcher = new StringSearcher(primitive.word, primitive.caseSensitive, true, false);
BulkOccurrenceProcessor adapted = adaptProcessor(primitive, singleRequest.refProcessor);
Processor<PsiElement> localProcessor = localProcessor(adapted, progress, searcher);
assert !localProcessors.containsKey(singleRequest) || localProcessors.get(singleRequest) == localProcessor;
localProcessors.put(singleRequest, localProcessor);
}
}
}
use of com.intellij.util.text.StringSearcher in project intellij-community by JetBrains.
the class PsiSearchHelperImpl method processUsagesInNonJavaFiles.
@Override
public boolean processUsagesInNonJavaFiles(@Nullable final PsiElement originalElement, @NotNull String qName, @NotNull final PsiNonJavaFileReferenceProcessor processor, @NotNull final GlobalSearchScope initialScope) {
if (qName.isEmpty()) {
throw new IllegalArgumentException("Cannot search for elements with empty text. Element: " + originalElement + "; " + (originalElement == null ? null : originalElement.getClass()));
}
final ProgressIndicator progress = getOrCreateIndicator();
int dotIndex = qName.lastIndexOf('.');
int dollarIndex = qName.lastIndexOf('$');
int maxIndex = Math.max(dotIndex, dollarIndex);
final String wordToSearch = maxIndex >= 0 ? qName.substring(maxIndex + 1) : qName;
final GlobalSearchScope theSearchScope = ReadAction.compute(() -> {
if (originalElement != null && myManager.isInProject(originalElement) && initialScope.isSearchInLibraries()) {
return initialScope.intersectWith(GlobalSearchScope.projectScope(myManager.getProject()));
}
return initialScope;
});
PsiFile[] files = ReadAction.compute(() -> CacheManager.SERVICE.getInstance(myManager.getProject()).getFilesWithWord(wordToSearch, UsageSearchContext.IN_PLAIN_TEXT, theSearchScope, true));
final StringSearcher searcher = new StringSearcher(qName, true, true, false);
progress.pushState();
final Ref<Boolean> cancelled = Ref.create(Boolean.FALSE);
try {
progress.setText(PsiBundle.message("psi.search.in.non.java.files.progress"));
final SearchScope useScope = originalElement == null ? null : ReadAction.compute(() -> getUseScope(originalElement));
final int patternLength = qName.length();
for (int i = 0; i < files.length; i++) {
progress.checkCanceled();
final PsiFile psiFile = files[i];
if (psiFile instanceof PsiBinaryFile)
continue;
final CharSequence text = ReadAction.compute(() -> psiFile.getViewProvider().getContents());
LowLevelSearchUtil.processTextOccurrences(text, 0, text.length(), searcher, progress, index -> {
boolean isReferenceOK = ReadAction.compute(() -> {
PsiReference referenceAt = psiFile.findReferenceAt(index);
return referenceAt == null || useScope == null || !PsiSearchScopeUtil.isInScope(useScope.intersectWith(initialScope), psiFile);
});
if (isReferenceOK && !processor.process(psiFile, index, index + patternLength)) {
cancelled.set(Boolean.TRUE);
return false;
}
return true;
});
if (cancelled.get())
break;
progress.setFraction((double) (i + 1) / files.length);
}
} finally {
progress.popState();
}
return !cancelled.get();
}
use of com.intellij.util.text.StringSearcher in project intellij-community by JetBrains.
the class LowLevelSearchUtil method getTextOccurrences.
@NotNull
private static int[] getTextOccurrences(@NotNull CharSequence text, int startOffset, int endOffset, @NotNull StringSearcher searcher, @Nullable ProgressIndicator progress) {
if (endOffset > text.length()) {
throw new IllegalArgumentException("end: " + endOffset + " > length: " + text.length());
}
Map<StringSearcher, int[]> cachedMap = cache.get(text);
int[] cachedOccurrences = cachedMap == null ? null : cachedMap.get(searcher);
boolean hasCachedOccurrences = cachedOccurrences != null && cachedOccurrences[0] <= startOffset && cachedOccurrences[1] >= endOffset;
if (!hasCachedOccurrences) {
TIntArrayList occurrences = new TIntArrayList();
int newStart = Math.min(startOffset, cachedOccurrences == null ? startOffset : cachedOccurrences[0]);
int newEnd = Math.max(endOffset, cachedOccurrences == null ? endOffset : cachedOccurrences[1]);
occurrences.add(newStart);
occurrences.add(newEnd);
for (int index = newStart; index < newEnd; index++) {
if (progress != null)
progress.checkCanceled();
//noinspection AssignmentToForLoopParameter
index = searcher.scan(text, index, newEnd);
if (index < 0)
break;
if (checkJavaIdentifier(text, 0, text.length(), searcher, index)) {
occurrences.add(index);
}
}
cachedOccurrences = occurrences.toNativeArray();
if (cachedMap == null) {
cachedMap = ConcurrencyUtil.cacheOrGet(cache, text, ContainerUtil.createConcurrentSoftMap());
}
cachedMap.put(searcher, cachedOccurrences);
}
TIntArrayList offsets = new TIntArrayList(cachedOccurrences.length - 2);
for (int i = 2; i < cachedOccurrences.length; i++) {
int occurrence = cachedOccurrences[i];
if (occurrence > endOffset - searcher.getPatternLength())
break;
if (occurrence >= startOffset) {
offsets.add(occurrence);
}
}
return offsets.toNativeArray();
}
use of com.intellij.util.text.StringSearcher in project intellij-community by JetBrains.
the class FindManagerImpl method findInCommentsAndLiterals.
@NotNull
private FindResult findInCommentsAndLiterals(@NotNull CharSequence text, char[] textArray, int offset, @NotNull FindModel model, @NotNull final VirtualFile file) {
synchronized (model) {
FileType ftype = file.getFileType();
Language lang = null;
if (ftype instanceof LanguageFileType) {
lang = ((LanguageFileType) ftype).getLanguage();
}
CommentsLiteralsSearchData data = model.getUserData(ourCommentsLiteralsSearchDataKey);
if (data == null || !Comparing.equal(data.lastFile, file) || !data.model.equals(model)) {
SyntaxHighlighter highlighter = getHighlighter(file, lang);
if (highlighter == null) {
// no syntax highlighter -> no search
return NOT_FOUND_RESULT;
}
TokenSet tokensOfInterest = TokenSet.EMPTY;
Set<Language> relevantLanguages;
if (lang != null) {
final Language finalLang = lang;
relevantLanguages = ApplicationManager.getApplication().runReadAction(new Computable<Set<Language>>() {
@Override
public Set<Language> compute() {
THashSet<Language> result = new THashSet<>();
FileViewProvider viewProvider = PsiManager.getInstance(myProject).findViewProvider(file);
if (viewProvider != null) {
result.addAll(viewProvider.getLanguages());
}
if (result.isEmpty()) {
result.add(finalLang);
}
return result;
}
});
for (Language relevantLanguage : relevantLanguages) {
tokensOfInterest = addTokenTypesForLanguage(model, relevantLanguage, tokensOfInterest);
}
if (model.isInStringLiteralsOnly()) {
// TODO: xml does not have string literals defined so we add XmlAttributeValue element type as convenience
final Lexer xmlLexer = getHighlighter(null, Language.findLanguageByID("XML")).getHighlightingLexer();
final String marker = "xxx";
xmlLexer.start("<a href=\"" + marker + "\" />");
while (!marker.equals(xmlLexer.getTokenText())) {
xmlLexer.advance();
if (xmlLexer.getTokenType() == null)
break;
}
IElementType convenienceXmlAttrType = xmlLexer.getTokenType();
if (convenienceXmlAttrType != null) {
tokensOfInterest = TokenSet.orSet(tokensOfInterest, TokenSet.create(convenienceXmlAttrType));
}
}
} else {
relevantLanguages = ContainerUtil.newHashSet();
if (ftype instanceof AbstractFileType) {
if (model.isInCommentsOnly()) {
tokensOfInterest = TokenSet.create(CustomHighlighterTokenType.LINE_COMMENT, CustomHighlighterTokenType.MULTI_LINE_COMMENT);
}
if (model.isInStringLiteralsOnly()) {
tokensOfInterest = TokenSet.orSet(tokensOfInterest, TokenSet.create(CustomHighlighterTokenType.STRING, CustomHighlighterTokenType.SINGLE_QUOTED_STRING));
}
}
}
Matcher matcher = model.isRegularExpressions() ? compileRegExp(model, "") : null;
StringSearcher searcher = matcher != null ? null : new StringSearcher(model.getStringToFind(), model.isCaseSensitive(), true);
SyntaxHighlighterOverEditorHighlighter highlighterAdapter = new SyntaxHighlighterOverEditorHighlighter(highlighter, file, myProject);
data = new CommentsLiteralsSearchData(file, relevantLanguages, highlighterAdapter, tokensOfInterest, searcher, matcher, model.clone());
data.highlighter.restart(text);
model.putUserData(ourCommentsLiteralsSearchDataKey, data);
}
int initialStartOffset = model.isForward() && data.startOffset < offset ? data.startOffset : 0;
data.highlighter.resetPosition(initialStartOffset);
final Lexer lexer = data.highlighter.getHighlightingLexer();
IElementType tokenType;
TokenSet tokens = data.tokensOfInterest;
int lastGoodOffset = 0;
boolean scanningForward = model.isForward();
FindResultImpl prevFindResult = NOT_FOUND_RESULT;
while ((tokenType = lexer.getTokenType()) != null) {
if (lexer.getState() == 0)
lastGoodOffset = lexer.getTokenStart();
final TextAttributesKey[] keys = data.highlighter.getTokenHighlights(tokenType);
if (tokens.contains(tokenType) || (model.isInStringLiteralsOnly() && ChunkExtractor.isHighlightedAsString(keys)) || (model.isInCommentsOnly() && ChunkExtractor.isHighlightedAsComment(keys))) {
int start = lexer.getTokenStart();
int end = lexer.getTokenEnd();
if (model.isInStringLiteralsOnly()) {
// skip literal quotes itself from matching
char c = text.charAt(start);
if (c == '"' || c == '\'') {
while (start < end && c == text.charAt(start)) {
++start;
if (c == text.charAt(end - 1) && start < end)
--end;
}
}
}
while (true) {
FindResultImpl findResult = null;
if (data.searcher != null) {
int matchStart = data.searcher.scan(text, textArray, start, end);
if (matchStart != -1 && matchStart >= start) {
final int matchEnd = matchStart + model.getStringToFind().length();
if (matchStart >= offset || !scanningForward)
findResult = new FindResultImpl(matchStart, matchEnd);
else {
start = matchEnd;
continue;
}
}
} else if (start <= end) {
data.matcher.reset(StringPattern.newBombedCharSequence(text.subSequence(start, end)));
if (data.matcher.find()) {
final int matchEnd = start + data.matcher.end();
int matchStart = start + data.matcher.start();
if (matchStart >= offset || !scanningForward) {
findResult = new FindResultImpl(matchStart, matchEnd);
} else {
int diff = 0;
if (start == end) {
diff = scanningForward ? 1 : -1;
}
start = matchEnd + diff;
continue;
}
}
}
if (findResult != null) {
if (scanningForward) {
data.startOffset = lastGoodOffset;
return findResult;
} else {
if (findResult.getEndOffset() >= offset)
return prevFindResult;
prevFindResult = findResult;
start = findResult.getEndOffset();
continue;
}
}
break;
}
} else {
Language tokenLang = tokenType.getLanguage();
if (tokenLang != lang && tokenLang != Language.ANY && !data.relevantLanguages.contains(tokenLang)) {
tokens = addTokenTypesForLanguage(model, tokenLang, tokens);
data.tokensOfInterest = tokens;
data.relevantLanguages.add(tokenLang);
}
}
lexer.advance();
}
return prevFindResult;
}
}
use of com.intellij.util.text.StringSearcher in project intellij-community by JetBrains.
the class LowLevelSearchUtilTest method testProcessTextOccurrencesNeverScansBeyondStartEndOffsetIfNeverAskedTo.
public void testProcessTextOccurrencesNeverScansBeyondStartEndOffsetIfNeverAskedTo() {
StringSearcher searcher = new StringSearcher("xxx", true, true);
TIntArrayList found = new TIntArrayList(new int[] { -1 });
CharSequence text = StringUtil.repeat("xxx z ", 1000000);
PlatformTestUtil.startPerformanceTest("processTextOccurrences", 100, () -> {
for (int i = 0; i < 10000; i++) {
found.remove(0);
int startOffset = text.length() / 2 + i % 20;
int endOffset = startOffset + 8;
boolean success = LowLevelSearchUtil.processTextOccurrences(text, startOffset, endOffset, searcher, null, offset -> {
found.add(offset);
return true;
});
assertTrue(success);
assertEquals(startOffset + "," + endOffset, 1, found.size());
}
}).cpuBound().assertTiming();
}
Aggregations