use of com.github._1c_syntax.bsl.parser.Tokenizer in project sonar-bsl-plugin-community by 1c-syntax.
the class BSLHighlighter method saveHighlighting.
public void saveHighlighting(InputFile inputFile, DocumentContext documentContext) {
Set<HighlightingData> highlightingData = new HashSet<>(documentContext.getTokens().size());
// populate bsl highlight data
documentContext.getTokens().forEach(token -> highlightToken(token, highlightingData, getTypeOfTextBSL(token.getType())));
// compute and populate sdbl highlight data
Map<Integer, List<Token>> queryTokens = documentContext.getQueries().stream().map(Tokenizer::getTokens).flatMap(Collection::stream).collect(Collectors.groupingBy(Token::getLine));
Map<Integer, Set<HighlightingData>> highlightingDataSDBL = new HashMap<>(queryTokens.size());
queryTokens.values().stream().flatMap(Collection::stream).forEach(token -> highlightToken(token, highlightingDataSDBL.computeIfAbsent(token.getLine(), BSLHighlighter::newHashSet), getTypeOfTextSDBL(token.getType())));
// find bsl strings to check overlap with sdbl tokens
Set<HighlightingData> strings = highlightingData.stream().filter(data -> data.getType() == TypeOfText.STRING).collect(Collectors.toSet());
strings.forEach((HighlightingData string) -> {
Range stringRange = string.getRange();
// find overlapping tokens
Set<HighlightingData> dataOfCurrentLine = highlightingDataSDBL.get(stringRange.getStart().getLine());
if (Objects.isNull(dataOfCurrentLine)) {
return;
}
List<HighlightingData> currentTokens = dataOfCurrentLine.stream().filter(sdblData -> Ranges.containsRange(stringRange, sdblData.getRange())).sorted(Comparator.comparing(data -> data.getRange().getStart().getCharacter())).collect(Collectors.toList());
if (currentTokens.isEmpty()) {
return;
}
// disable current bsl token
string.setActive(false);
// split current bsl token to parts excluding sdbl tokens
Position start = stringRange.getStart();
int line = start.getLine();
int startChar;
int endChar = start.getCharacter();
for (HighlightingData currentToken : currentTokens) {
startChar = endChar;
endChar = currentToken.getRange().getStart().getCharacter();
TypeOfText typeOfText = string.getType();
if (startChar < endChar) {
// add string part
highlightingData.add(new HighlightingData(line, startChar, endChar, typeOfText));
}
endChar = currentToken.getRange().getEnd().getCharacter();
}
// add final string part
startChar = endChar;
endChar = string.getRange().getEnd().getCharacter();
TypeOfText typeOfText = string.getType();
if (startChar < endChar) {
highlightingData.add(new HighlightingData(line, startChar, endChar, typeOfText));
}
});
// merge collected bsl tokens with sdbl tokens
highlightingDataSDBL.values().forEach(highlightingData::addAll);
if (highlightingData.stream().filter(HighlightingData::isActive).findAny().isEmpty()) {
return;
}
// save only active tokens
NewHighlighting highlighting = context.newHighlighting().onFile(inputFile);
highlightingData.stream().filter(HighlightingData::isActive).forEach(data -> highlighting.highlight(data.getRange().getStart().getLine(), data.getRange().getStart().getCharacter(), data.getRange().getEnd().getLine(), data.getRange().getEnd().getCharacter(), data.getType()));
highlighting.save();
}
use of com.github._1c_syntax.bsl.parser.Tokenizer in project bsl-language-server by 1c-syntax.
the class DocumentContext method rebuild.
public void rebuild(String content, int version) {
computeLock.lock();
boolean versionMatches = version == this.version && version != 0;
boolean contentWasCleared = this.content == null;
if (versionMatches && !contentWasCleared) {
clearDependantData();
computeLock.unlock();
return;
}
clearSecondaryData();
symbolTree.clear();
this.content = content;
tokenizer = new BSLTokenizer(content);
this.version = version;
computeLock.unlock();
}
use of com.github._1c_syntax.bsl.parser.Tokenizer in project bsl-language-server by 1c-syntax.
the class CommentedCodeDiagnostic method isTextParsedAsCode.
private boolean isTextParsedAsCode(String text) {
if (!codeRecognizer.meetsCondition(text)) {
return false;
}
BSLTokenizer tokenizer = new BSLTokenizer(uncomment(text));
final List<Token> tokens = tokenizer.getTokens();
// Если меньше двух токенов нет смысла анализировать - это код
if (tokens.size() >= MINIMAL_TOKEN_COUNT) {
List<Integer> tokenTypes = tokens.stream().map(Token::getType).filter(t -> t != BSLParser.WHITE_SPACE).collect(Collectors.toList());
// Если два идентификатора идут подряд - это не код
for (int i = 0; i < tokenTypes.size() - 1; i++) {
if (tokenTypes.get(i) == BSLParser.IDENTIFIER && tokenTypes.get(i + 1) == BSLParser.IDENTIFIER) {
return false;
}
}
}
return true;
}
Aggregations