use of org.opengrok.indexer.analysis.Scopes.Scope in project OpenGrok by OpenGrok.
the class ScopesTest method testSerialize.
@Test
void testSerialize() throws IOException, ClassNotFoundException {
Scopes scopes = new Scopes();
scopes.addScope(new Scope(1, 100, "name", "namespace", "signature"));
byte[] bytes = scopes.serialize();
Scopes deserialized = Scopes.deserialize(bytes);
assertEquals(1, deserialized.size());
}
use of org.opengrok.indexer.analysis.Scopes.Scope in project OpenGrok by OpenGrok.
the class CAnalyzerFactoryTest method testScopeAnalyzer.
/**
* Test of writeXref method, of class CAnalyzerFactory.
*
* @throws java.lang.Exception exception
*/
@Test
void testScopeAnalyzer() throws Exception {
String path = repository.getSourceRoot() + "/c/sample.c";
File f = new File(path);
assertTrue(f.canRead() && f.isFile(), "c testfile " + f + " not found");
Document doc = new Document();
doc.add(new Field(QueryBuilder.FULLPATH, path, string_ft_nstored_nanalyzed_norms));
StringWriter xrefOut = new StringWriter();
analyzer.setCtags(ctags);
analyzer.setScopesEnabled(true);
analyzer.analyze(doc, getStreamSource(path), xrefOut);
IndexableField scopesField = doc.getField(QueryBuilder.SCOPES);
assertNotNull(scopesField);
Scopes scopes = Scopes.deserialize(scopesField.binaryValue().bytes);
Scope globalScope = scopes.getScope(-1);
// foo, bar, main
assertEquals(3, scopes.size());
for (int i = 0; i < 50; ++i) {
if (i >= 8 && i <= 22) {
assertEquals("foo", scopes.getScope(i).getName());
assertNull(scopes.getScope(i).getNamespace());
} else if (i >= 24 && i <= 38) {
assertEquals("bar", scopes.getScope(i).getName());
assertNull(scopes.getScope(i).getNamespace());
} else if (i >= 41 && i <= 48) {
assertEquals("main", scopes.getScope(i).getName());
assertNull(scopes.getScope(i).getNamespace());
} else {
assertEquals(scopes.getScope(i), globalScope);
assertNull(scopes.getScope(i).getNamespace());
}
}
}
use of org.opengrok.indexer.analysis.Scopes.Scope in project OpenGrok by OpenGrok.
the class Context method getContext.
/**
* ???.
* Closes the given <var>in</var> reader on return.
*
* @param in File to be matched
* @param out to write the context
* @param urlPrefix URL prefix
* @param morePrefix to link to more... page
* @param path path of the file
* @param tags format to highlight defs.
* @param limit should the number of matching lines be limited?
* @param isDefSearch is definition search
* @param hits list of hits
* @param scopes scopes object
* @return Did it get any matching context?
*/
public boolean getContext(Reader in, Writer out, String urlPrefix, String morePrefix, String path, Definitions tags, boolean limit, boolean isDefSearch, List<Hit> hits, Scopes scopes) {
if (m == null) {
IOUtils.close(in);
return false;
}
boolean anything = false;
TreeMap<Integer, String[]> matchingTags = null;
String urlPrefixE = (urlPrefix == null) ? "" : Util.uriEncodePath(urlPrefix);
String pathE = Util.uriEncodePath(path);
if (tags != null) {
matchingTags = new TreeMap<>();
try {
for (Definitions.Tag tag : tags.getTags()) {
for (LineMatcher lineMatcher : m) {
if (lineMatcher.match(tag.symbol) == LineMatcher.MATCHED) {
String scope = null;
String scopeUrl = null;
if (scopes != null) {
Scope scp = scopes.getScope(tag.line);
scope = scp.getName() + "()";
scopeUrl = "<a href=\"" + urlPrefixE + pathE + "#" + scp.getLineFrom() + "\">" + scope + "</a>";
}
/* desc[0] is matched symbol
* desc[1] is line number
* desc[2] is type
* desc[3] is matching line;
* desc[4] is scope
*/
String[] desc = { tag.symbol, Integer.toString(tag.line), tag.type, tag.text, scope };
if (in == null) {
if (out == null) {
Hit hit = new Hit(path, Util.htmlize(desc[3]).replace(desc[0], "<b>" + desc[0] + "</b>"), desc[1], false, alt);
hits.add(hit);
} else {
out.write("<a class=\"s\" href=\"");
out.write(urlPrefixE);
out.write(pathE);
out.write("#");
out.write(desc[1]);
out.write("\"><span class=\"l\">");
out.write(desc[1]);
out.write("</span> ");
out.write(Util.htmlize(desc[3]).replace(desc[0], "<b>" + desc[0] + "</b>"));
out.write("</a> ");
if (desc[4] != null) {
out.write("<span class=\"scope\"><a href\"");
out.write(scopeUrl);
out.write("\">in ");
out.write(desc[4]);
out.write("</a></span> ");
}
out.write("<i>");
out.write(desc[2]);
out.write("</i><br/>");
}
anything = true;
} else {
matchingTags.put(tag.line, desc);
}
break;
}
}
}
} catch (Exception e) {
if (hits != null) {
// @todo verify why we ignore all exceptions?
LOGGER.log(Level.WARNING, "Could not get context for " + path, e);
}
}
}
// Just to get the matching tag send a null in
if (in == null) {
return anything;
}
PlainLineTokenizer tokens = new PlainLineTokenizer(null);
boolean truncated = false;
boolean lim = limit;
RuntimeEnvironment env = RuntimeEnvironment.getInstance();
if (!env.isQuickContextScan()) {
lim = false;
}
if (lim) {
char[] buffer = new char[MAXFILEREAD];
int charsRead;
try {
charsRead = in.read(buffer);
if (charsRead == MAXFILEREAD) {
// we probably only read parts of the file, so set the
// truncated flag to enable the [all...] link that
// requests all matches
truncated = true;
// characters back)
for (int i = charsRead - 1; i > charsRead - 100; i--) {
if (buffer[i] == '\n') {
charsRead = i;
break;
}
}
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, "An error occurred while reading data", e);
return anything;
}
if (charsRead == 0) {
return anything;
}
tokens.reInit(buffer, charsRead, out, urlPrefixE + pathE + "#", matchingTags, scopes);
} else {
tokens.reInit(in, out, urlPrefixE + pathE + "#", matchingTags, scopes);
}
if (hits != null) {
tokens.setAlt(alt);
tokens.setHitList(hits);
tokens.setFilename(path);
}
int limit_max_lines = env.getContextLimit();
try {
String token;
int matchState;
int matchedLines = 0;
while ((token = tokens.yylex()) != null && (!lim || matchedLines < limit_max_lines)) {
for (LineMatcher lineMatcher : m) {
matchState = lineMatcher.match(token);
if (matchState == LineMatcher.MATCHED) {
if (!isDefSearch) {
tokens.printContext();
} else if (tokens.tags.containsKey(tokens.markedLine)) {
tokens.printContext();
}
matchedLines++;
break;
} else if (matchState == LineMatcher.WAIT) {
tokens.holdOn();
} else {
tokens.neverMind();
}
}
}
anything = matchedLines > 0;
tokens.dumpRest();
if (lim && (truncated || matchedLines == limit_max_lines) && out != null) {
out.write("<a href=\"" + Util.uriEncodePath(morePrefix) + pathE + "?" + queryAsURI + "\">[all...]</a>");
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Could not get context for " + path, e);
} finally {
IOUtils.close(in);
if (out != null) {
try {
out.flush();
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failed to flush stream: ", e);
}
}
}
return anything;
}
use of org.opengrok.indexer.analysis.Scopes.Scope in project OpenGrok by OpenGrok.
the class JFlexNonXref method startScope.
protected void startScope() {
Scope newScope = JFlexXrefUtils.maybeNewScope(scopesEnabled, scope, this, defs);
if (newScope != null) {
scope = newScope;
scopeLevel = 0;
}
}
use of org.opengrok.indexer.analysis.Scopes.Scope in project OpenGrok by OpenGrok.
the class JFlexXref method startScope.
protected void startScope() {
Scope newScope = JFlexXrefUtils.maybeNewScope(scopesEnabled, scope, matcher, defs);
if (newScope != null) {
scope = newScope;
scopeLevel = 0;
}
}
Aggregations