use of org.sonar.api.batch.sensor.cpd.NewCpdTokens in project sonar-web by SonarSource.
the class HtmlTokensVisitor method highlightAndDuplicate.
private void highlightAndDuplicate() {
if (!getHtmlSourceCode().shouldComputeMetric()) {
return;
}
NewHighlighting highlighting = context.newHighlighting();
InputFile inputFile = getHtmlSourceCode().inputFile();
highlighting.onFile(inputFile);
NewCpdTokens cpdTokens = context.newCpdTokens();
cpdTokens.onFile(inputFile);
String fileContent;
try {
fileContent = inputFile.contents();
} catch (IOException e) {
throw new IllegalStateException("Cannot read " + inputFile, e);
}
for (Token token : HtmlLexer.create(context.fileSystem().encoding()).lex(fileContent)) {
TokenType tokenType = token.getType();
if (!tokenType.equals(GenericTokenType.EOF)) {
TokenLocation tokenLocation = new TokenLocation(token);
cpdTokens.addToken(tokenLocation.startLine(), tokenLocation.startCharacter(), tokenLocation.endLine(), tokenLocation.endCharacter(), token.getValue());
}
if (tokenType.equals(HtmlTokenType.DOCTYPE)) {
highlight(highlighting, token, TypeOfText.STRUCTURED_COMMENT);
} else if (tokenType.equals(HtmlTokenType.EXPRESSION)) {
highlight(highlighting, token, TypeOfText.ANNOTATION);
} else if (tokenType.equals(HtmlTokenType.TAG)) {
highlight(highlighting, token, TypeOfText.KEYWORD);
} else if (tokenType.equals(HtmlTokenType.ATTRIBUTE)) {
TokenLocation tokenLocation = new TokenLocation(token);
highlighting.highlight(tokenLocation.startLine(), tokenLocation.startCharacter() + /* = */
1, tokenLocation.endLine(), tokenLocation.endCharacter(), TypeOfText.STRING);
}
for (Trivia trivia : token.getTrivia()) {
highlight(highlighting, trivia.getToken(), TypeOfText.COMMENT);
}
}
highlighting.save();
cpdTokens.save();
}
use of org.sonar.api.batch.sensor.cpd.NewCpdTokens in project sonar-web by SonarSource.
the class WebTokensVisitor method highlightAndDuplicate.
private void highlightAndDuplicate() {
NewHighlighting highlighting = context.newHighlighting();
InputFile inputFile = getWebSourceCode().inputFile();
highlighting.onFile(inputFile);
NewCpdTokens cpdTokens = context.newCpdTokens();
cpdTokens.onFile(inputFile);
for (Token token : WebLexer.create(context.fileSystem().encoding()).lex(inputFile.file())) {
TokenType tokenType = token.getType();
if (!tokenType.equals(GenericTokenType.EOF)) {
TokenLocation tokenLocation = new TokenLocation(token);
cpdTokens.addToken(tokenLocation.startLine(), tokenLocation.startCharacter(), tokenLocation.endLine(), tokenLocation.endCharacter(), token.getValue());
}
if (tokenType.equals(WebTokenType.DOCTYPE)) {
highlight(highlighting, token, TypeOfText.STRUCTURED_COMMENT);
} else if (tokenType.equals(WebTokenType.EXPRESSION)) {
highlight(highlighting, token, TypeOfText.ANNOTATION);
} else if (tokenType.equals(WebTokenType.TAG)) {
highlight(highlighting, token, TypeOfText.KEYWORD);
} else if (tokenType.equals(WebTokenType.ATTRIBUTE)) {
TokenLocation tokenLocation = new TokenLocation(token);
highlighting.highlight(tokenLocation.startLine(), tokenLocation.startCharacter() + /* = */
1, tokenLocation.endLine(), tokenLocation.endCharacter(), TypeOfText.STRING);
}
for (Trivia trivia : token.getTrivia()) {
highlight(highlighting, trivia.getToken(), TypeOfText.COMMENT);
}
}
highlighting.save();
cpdTokens.save();
}
use of org.sonar.api.batch.sensor.cpd.NewCpdTokens in project sonarqube by SonarSource.
the class CpdTokenizerSensor method tokenize.
private void tokenize(InputFile inputFile, SensorContext context) {
int lineIdx = 1;
NewCpdTokens newCpdTokens = context.newCpdTokens().onFile(inputFile);
try {
StringBuilder sb = new StringBuilder();
for (String line : FileUtils.readLines(inputFile.file(), inputFile.charset())) {
int startOffset = 0;
int endOffset = 0;
for (int i = 0; i < line.length(); i++) {
char c = line.charAt(i);
if (Character.isWhitespace(c)) {
if (sb.length() > 0) {
newCpdTokens.addToken(inputFile.newRange(lineIdx, startOffset, lineIdx, endOffset), sb.toString());
sb.setLength(0);
}
startOffset = endOffset;
} else {
sb.append(c);
}
endOffset++;
}
if (sb.length() > 0) {
newCpdTokens.addToken(inputFile.newRange(lineIdx, startOffset, lineIdx, endOffset), sb.toString());
sb.setLength(0);
}
lineIdx++;
}
} catch (IOException e) {
throw new IllegalStateException("Unable to tokenize", e);
}
newCpdTokens.save();
}
Aggregations