use of com.intellij.usages.impl.SyntaxHighlighterOverEditorHighlighter in project intellij-community by JetBrains.
the class FindManagerImpl method findInCommentsAndLiterals.
@NotNull
private FindResult findInCommentsAndLiterals(@NotNull CharSequence text, char[] textArray, int offset, @NotNull FindModel model, @NotNull final VirtualFile file) {
synchronized (model) {
FileType ftype = file.getFileType();
Language lang = null;
if (ftype instanceof LanguageFileType) {
lang = ((LanguageFileType) ftype).getLanguage();
}
CommentsLiteralsSearchData data = model.getUserData(ourCommentsLiteralsSearchDataKey);
if (data == null || !Comparing.equal(data.lastFile, file) || !data.model.equals(model)) {
SyntaxHighlighter highlighter = getHighlighter(file, lang);
if (highlighter == null) {
// no syntax highlighter -> no search
return NOT_FOUND_RESULT;
}
TokenSet tokensOfInterest = TokenSet.EMPTY;
Set<Language> relevantLanguages;
if (lang != null) {
final Language finalLang = lang;
relevantLanguages = ApplicationManager.getApplication().runReadAction(new Computable<Set<Language>>() {
@Override
public Set<Language> compute() {
THashSet<Language> result = new THashSet<>();
FileViewProvider viewProvider = PsiManager.getInstance(myProject).findViewProvider(file);
if (viewProvider != null) {
result.addAll(viewProvider.getLanguages());
}
if (result.isEmpty()) {
result.add(finalLang);
}
return result;
}
});
for (Language relevantLanguage : relevantLanguages) {
tokensOfInterest = addTokenTypesForLanguage(model, relevantLanguage, tokensOfInterest);
}
if (model.isInStringLiteralsOnly()) {
// TODO: xml does not have string literals defined so we add XmlAttributeValue element type as convenience
final Lexer xmlLexer = getHighlighter(null, Language.findLanguageByID("XML")).getHighlightingLexer();
final String marker = "xxx";
xmlLexer.start("<a href=\"" + marker + "\" />");
while (!marker.equals(xmlLexer.getTokenText())) {
xmlLexer.advance();
if (xmlLexer.getTokenType() == null)
break;
}
IElementType convenienceXmlAttrType = xmlLexer.getTokenType();
if (convenienceXmlAttrType != null) {
tokensOfInterest = TokenSet.orSet(tokensOfInterest, TokenSet.create(convenienceXmlAttrType));
}
}
} else {
relevantLanguages = ContainerUtil.newHashSet();
if (ftype instanceof AbstractFileType) {
if (model.isInCommentsOnly()) {
tokensOfInterest = TokenSet.create(CustomHighlighterTokenType.LINE_COMMENT, CustomHighlighterTokenType.MULTI_LINE_COMMENT);
}
if (model.isInStringLiteralsOnly()) {
tokensOfInterest = TokenSet.orSet(tokensOfInterest, TokenSet.create(CustomHighlighterTokenType.STRING, CustomHighlighterTokenType.SINGLE_QUOTED_STRING));
}
}
}
Matcher matcher = model.isRegularExpressions() ? compileRegExp(model, "") : null;
StringSearcher searcher = matcher != null ? null : new StringSearcher(model.getStringToFind(), model.isCaseSensitive(), true);
SyntaxHighlighterOverEditorHighlighter highlighterAdapter = new SyntaxHighlighterOverEditorHighlighter(highlighter, file, myProject);
data = new CommentsLiteralsSearchData(file, relevantLanguages, highlighterAdapter, tokensOfInterest, searcher, matcher, model.clone());
data.highlighter.restart(text);
model.putUserData(ourCommentsLiteralsSearchDataKey, data);
}
int initialStartOffset = model.isForward() && data.startOffset < offset ? data.startOffset : 0;
data.highlighter.resetPosition(initialStartOffset);
final Lexer lexer = data.highlighter.getHighlightingLexer();
IElementType tokenType;
TokenSet tokens = data.tokensOfInterest;
int lastGoodOffset = 0;
boolean scanningForward = model.isForward();
FindResultImpl prevFindResult = NOT_FOUND_RESULT;
while ((tokenType = lexer.getTokenType()) != null) {
if (lexer.getState() == 0)
lastGoodOffset = lexer.getTokenStart();
final TextAttributesKey[] keys = data.highlighter.getTokenHighlights(tokenType);
if (tokens.contains(tokenType) || (model.isInStringLiteralsOnly() && ChunkExtractor.isHighlightedAsString(keys)) || (model.isInCommentsOnly() && ChunkExtractor.isHighlightedAsComment(keys))) {
int start = lexer.getTokenStart();
int end = lexer.getTokenEnd();
if (model.isInStringLiteralsOnly()) {
// skip literal quotes itself from matching
char c = text.charAt(start);
if (c == '"' || c == '\'') {
while (start < end && c == text.charAt(start)) {
++start;
if (c == text.charAt(end - 1) && start < end)
--end;
}
}
}
while (true) {
FindResultImpl findResult = null;
if (data.searcher != null) {
int matchStart = data.searcher.scan(text, textArray, start, end);
if (matchStart != -1 && matchStart >= start) {
final int matchEnd = matchStart + model.getStringToFind().length();
if (matchStart >= offset || !scanningForward)
findResult = new FindResultImpl(matchStart, matchEnd);
else {
start = matchEnd;
continue;
}
}
} else if (start <= end) {
data.matcher.reset(StringPattern.newBombedCharSequence(text.subSequence(start, end)));
if (data.matcher.find()) {
final int matchEnd = start + data.matcher.end();
int matchStart = start + data.matcher.start();
if (matchStart >= offset || !scanningForward) {
findResult = new FindResultImpl(matchStart, matchEnd);
} else {
int diff = 0;
if (start == end) {
diff = scanningForward ? 1 : -1;
}
start = matchEnd + diff;
continue;
}
}
}
if (findResult != null) {
if (scanningForward) {
data.startOffset = lastGoodOffset;
return findResult;
} else {
if (findResult.getEndOffset() >= offset)
return prevFindResult;
prevFindResult = findResult;
start = findResult.getEndOffset();
continue;
}
}
break;
}
} else {
Language tokenLang = tokenType.getLanguage();
if (tokenLang != lang && tokenLang != Language.ANY && !data.relevantLanguages.contains(tokenLang)) {
tokens = addTokenTypesForLanguage(model, tokenLang, tokens);
data.tokensOfInterest = tokens;
data.relevantLanguages.add(tokenLang);
}
}
lexer.advance();
}
return prevFindResult;
}
}
use of com.intellij.usages.impl.SyntaxHighlighterOverEditorHighlighter in project intellij-community by JetBrains.
the class ChunkExtractor method createTextChunks.
@NotNull
public TextChunk[] createTextChunks(@NotNull UsageInfo2UsageAdapter usageInfo2UsageAdapter, @NotNull CharSequence chars, int start, int end, boolean selectUsageWithBold, @NotNull List<TextChunk> result) {
final Lexer lexer = myHighlighter.getHighlightingLexer();
final SyntaxHighlighterOverEditorHighlighter highlighter = myHighlighter;
LOG.assertTrue(start <= end);
int i = StringUtil.indexOf(chars, '\n', start, end);
if (i != -1)
end = i;
if (myDocumentStamp != myDocument.getModificationStamp()) {
highlighter.restart(chars);
myDocumentStamp = myDocument.getModificationStamp();
} else if (lexer.getTokenType() == null || lexer.getTokenStart() > start) {
// todo restart from nearest position with initial state
highlighter.resetPosition(0);
}
boolean isBeginning = true;
for (; lexer.getTokenType() != null; lexer.advance()) {
int hiStart = lexer.getTokenStart();
int hiEnd = lexer.getTokenEnd();
if (hiStart >= end)
break;
hiStart = Math.max(hiStart, start);
hiEnd = Math.min(hiEnd, end);
if (hiStart >= hiEnd) {
continue;
}
if (isBeginning) {
String text = chars.subSequence(hiStart, hiEnd).toString();
if (text.trim().isEmpty())
continue;
}
isBeginning = false;
IElementType tokenType = lexer.getTokenType();
TextAttributesKey[] tokenHighlights = highlighter.getTokenHighlights(tokenType);
processIntersectingRange(usageInfo2UsageAdapter, chars, hiStart, hiEnd, tokenHighlights, selectUsageWithBold, result);
}
return result.toArray(new TextChunk[result.size()]);
}
Aggregations