use of org.opensolaris.opengrok.analysis.Definitions in project OpenGrok by OpenGrok.
the class Context method getContext.
/**
* ???.
* Closes the given <var>in</var> reader on return.
*
* @param in File to be matched
* @param out to write the context
* @param morePrefix to link to more... page
* @param path path of the file
* @param tags format to highlight defs.
* @param limit should the number of matching lines be limited?
* @return Did it get any matching context?
*/
public boolean getContext(Reader in, Writer out, String urlPrefix, String morePrefix, String path, Definitions tags, boolean limit, boolean isDefSearch, List<Hit> hits, Scopes scopes) {
alt = !alt;
if (m == null) {
IOUtils.close(in);
return false;
}
boolean anything = false;
TreeMap<Integer, String[]> matchingTags = null;
String urlPrefixE = (urlPrefix == null) ? "" : Util.URIEncodePath(urlPrefix);
String pathE = Util.URIEncodePath(path);
if (tags != null) {
matchingTags = new TreeMap<Integer, String[]>();
try {
for (Definitions.Tag tag : tags.getTags()) {
for (int i = 0; i < m.length; i++) {
if (m[i].match(tag.symbol) == LineMatcher.MATCHED) {
String scope = null;
String scopeUrl = null;
if (scopes != null) {
Scope scp = scopes.getScope(tag.line);
scope = scp.getName() + "()";
scopeUrl = "<a href=\"" + urlPrefixE + pathE + "#" + Integer.toString(scp.getLineFrom()) + "\">" + scope + "</a>";
}
/* desc[0] is matched symbol
* desc[1] is line number
* desc[2] is type
* desc[3] is matching line;
* desc[4] is scope
*/
String[] desc = { tag.symbol, Integer.toString(tag.line), tag.type, tag.text, scope };
if (in == null) {
if (out == null) {
Hit hit = new Hit(path, Util.htmlize(desc[3]).replace(desc[0], "<b>" + desc[0] + "</b>"), desc[1], false, alt);
hits.add(hit);
anything = true;
} else {
out.write("<a class=\"s\" href=\"");
out.write(urlPrefixE);
out.write(pathE);
out.write("#");
out.write(desc[1]);
out.write("\"><span class=\"l\">");
out.write(desc[1]);
out.write("</span> ");
out.write(Util.htmlize(desc[3]).replace(desc[0], "<b>" + desc[0] + "</b>"));
out.write("</a> ");
if (desc[4] != null) {
out.write("<span class=\"scope\"><a href\"");
out.write(scopeUrl);
out.write("\">in ");
out.write(desc[4]);
out.write("</a></span> ");
}
out.write("<i>");
out.write(desc[2]);
out.write("</i><br/>");
anything = true;
}
} else {
matchingTags.put(tag.line, desc);
}
break;
}
}
}
} catch (Exception e) {
if (hits != null) {
// @todo verify why we ignore all exceptions?
LOGGER.log(Level.WARNING, "Could not get context for " + path, e);
}
}
}
/**
* Just to get the matching tag send a null in
*/
if (in == null) {
return anything;
}
int charsRead = 0;
boolean truncated = false;
boolean lim = limit;
if (!RuntimeEnvironment.getInstance().isQuickContextScan()) {
lim = false;
}
if (lim) {
try {
charsRead = in.read(buffer);
if (charsRead == MAXFILEREAD) {
// we probably only read parts of the file, so set the
// truncated flag to enable the [all...] link that
// requests all matches
truncated = true;
// characters back)
for (int i = charsRead - 1; i > charsRead - 100; i--) {
if (buffer[i] == '\n') {
charsRead = i;
break;
}
}
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, "An error occured while reading data", e);
return anything;
}
if (charsRead == 0) {
return anything;
}
tokens.reInit(buffer, charsRead, out, urlPrefixE + pathE + "#", matchingTags, scopes);
} else {
tokens.reInit(in, out, urlPrefixE + pathE + "#", matchingTags, scopes);
}
if (hits != null) {
tokens.setAlt(alt);
tokens.setHitList(hits);
tokens.setFilename(path);
}
try {
String token;
int matchState = LineMatcher.NOT_MATCHED;
int matchedLines = 0;
while ((token = tokens.yylex()) != null && (!lim || matchedLines < 10)) {
for (int i = 0; i < m.length; i++) {
matchState = m[i].match(token);
if (matchState == LineMatcher.MATCHED) {
if (!isDefSearch) {
tokens.printContext();
} else if (tokens.tags.containsKey(tokens.markedLine)) {
tokens.printContext();
}
matchedLines++;
//out.write("<br> <i>Matched " + token + " maxlines = " + matchedLines + "</i><br>");
break;
} else if (matchState == LineMatcher.WAIT) {
tokens.holdOn();
} else {
tokens.neverMind();
}
}
}
anything = matchedLines > 0;
tokens.dumpRest();
if (lim && (truncated || matchedLines == 10) && out != null) {
out.write("<a href=\"" + Util.URIEncodePath(morePrefix) + pathE + "?" + queryAsURI + "\">[all...]</a>");
}
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Could not get context for " + path, e);
} finally {
IOUtils.close(in);
if (out != null) {
try {
out.flush();
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failed to flush stream: ", e);
}
}
}
return anything;
}
use of org.opensolaris.opengrok.analysis.Definitions in project OpenGrok by OpenGrok.
the class Results method prettyPrint.
/**
* Prints out results in html form. The following search helper fields are
* required to be properly initialized: <ul>
* <li>{@link SearchHelper#dataRoot}</li>
* <li>{@link SearchHelper#contextPath}</li>
* <li>{@link SearchHelper#searcher}</li> <li>{@link SearchHelper#hits}</li>
* <li>{@link SearchHelper#historyContext} (ignored if {@code null})</li>
* <li>{@link SearchHelper#sourceContext} (ignored if {@code null})</li>
* <li>{@link SearchHelper#summarizer} (if sourceContext is not
* {@code null})</li> <li>{@link SearchHelper#compressed} (if sourceContext
* is not {@code null})</li> <li>{@link SearchHelper#sourceRoot} (if
* sourceContext or historyContext is not {@code null})</li> </ul>
*
* @param out write destination
* @param sh search helper which has all required fields set
* @param start index of the first hit to print
* @param end index of the last hit to print
* @throws HistoryException
* @throws IOException
* @throws ClassNotFoundException
*/
public static void prettyPrint(Writer out, SearchHelper sh, int start, int end) throws HistoryException, IOException, ClassNotFoundException {
Project p;
String ctxE = Util.URIEncodePath(sh.contextPath);
String xrefPrefix = sh.contextPath + Prefix.XREF_P;
String morePrefix = sh.contextPath + Prefix.MORE_P;
String xrefPrefixE = ctxE + Prefix.XREF_P;
File xrefDataDir = new File(sh.dataRoot, Prefix.XREF_P.toString());
for (Map.Entry<String, ArrayList<Document>> entry : createMap(sh.searcher, sh.hits, start, end).entrySet()) {
String parent = entry.getKey();
out.write("<tr class=\"dir\"><td colspan=\"3\"><a href=\"");
out.write(xrefPrefixE);
out.write(Util.URIEncodePath(parent));
out.write("/\">");
// htmlize ???
out.write(parent);
out.write("/</a>");
if (sh.desc != null) {
out.write(" - <i>");
// htmlize ???
out.write(sh.desc.get(parent));
out.write("</i>");
}
JSONArray messages;
if ((p = Project.getProject(parent)) != null && (messages = Util.messagesToJson(p, RuntimeEnvironment.MESSAGES_MAIN_PAGE_TAG)).size() > 0) {
out.write(" <a ");
out.write("href=\"" + xrefPrefix + "/" + p.getName() + "\">");
out.write("<span class=\"important-note important-note-rounded\" data-messages='" + messages + "'>!</span>");
out.write("</a>");
}
out.write("</td></tr>");
for (Document doc : entry.getValue()) {
String rpath = doc.get(QueryBuilder.PATH);
String rpathE = Util.URIEncodePath(rpath);
DateFormat df;
out.write("<tr>");
Util.writeHAD(out, sh.contextPath, rpathE, false);
out.write("<td class=\"f\"><a href=\"");
out.write(xrefPrefixE);
out.write(rpathE);
out.write("\"");
if (RuntimeEnvironment.getInstance().isLastEditedDisplayMode()) {
try {
// insert last edited date if possible
df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT);
String dd = df.format(DateTools.stringToDate(doc.get("date")));
out.write(" class=\"result-annotate\" title=\"");
out.write("Last modified: ");
out.write(dd);
out.write("\"");
} catch (ParseException ex) {
LOGGER.log(Level.WARNING, "An error parsing date information", ex);
}
}
out.write(">");
// htmlize ???
out.write(rpath.substring(rpath.lastIndexOf('/') + 1));
out.write("</a>");
out.write("</td><td><tt class=\"con\">");
if (sh.sourceContext != null) {
Genre genre = Genre.get(doc.get("t"));
Definitions tags = null;
IndexableField tagsField = doc.getField(QueryBuilder.TAGS);
if (tagsField != null) {
tags = Definitions.deserialize(tagsField.binaryValue().bytes);
}
Scopes scopes;
IndexableField scopesField = doc.getField(QueryBuilder.SCOPES);
if (scopesField != null) {
scopes = Scopes.deserialize(scopesField.binaryValue().bytes);
} else {
scopes = new Scopes();
}
if (Genre.XREFABLE == genre && sh.summarizer != null) {
String xtags = getTags(xrefDataDir, rpath, sh.compressed);
// FIXME use Highlighter from lucene contrib here,
// instead of summarizer, we'd also get rid of
// apache lucene in whole source ...
out.write(sh.summarizer.getSummary(xtags).toString());
} else if (Genre.HTML == genre && sh.summarizer != null) {
String htags = getTags(sh.sourceRoot, rpath, false);
out.write(sh.summarizer.getSummary(htags).toString());
} else {
FileReader r = genre == Genre.PLAIN ? new FileReader(new File(sh.sourceRoot, rpath)) : null;
sh.sourceContext.getContext(r, out, xrefPrefix, morePrefix, rpath, tags, true, sh.builder.isDefSearch(), null, scopes);
}
}
if (sh.historyContext != null) {
sh.historyContext.getContext(new File(sh.sourceRoot, rpath), rpath, out, sh.contextPath);
}
out.write("</tt></td></tr>\n");
}
}
}
use of org.opensolaris.opengrok.analysis.Definitions in project OpenGrok by OpenGrok.
the class HaskellXrefTest method sampleTest.
@Test
public void sampleTest() throws IOException {
// load sample source
InputStream sampleInputStream = getClass().getClassLoader().getResourceAsStream("org/opensolaris/opengrok/analysis/haskell/sample.hs");
ByteArrayOutputStream sampleOutputStream = new ByteArrayOutputStream();
Definitions defs = new Definitions();
defs.addTag(6, "x'y'", "functions", "x'y' = let f' = 1; g'h = 2 in f' + g'h");
try {
writeHaskellXref(sampleInputStream, new PrintStream(sampleOutputStream), defs);
} finally {
sampleInputStream.close();
sampleOutputStream.close();
}
// load expected xref
InputStream expectedInputStream = getClass().getClassLoader().getResourceAsStream("org/opensolaris/opengrok/analysis/haskell/sampleXrefExpected.html");
ByteArrayOutputStream expectedOutputSteam = new ByteArrayOutputStream();
try {
byte[] buffer = new byte[8192];
int numBytesRead;
do {
numBytesRead = expectedInputStream.read(buffer, 0, buffer.length);
if (numBytesRead > 0) {
expectedOutputSteam.write(buffer, 0, numBytesRead);
}
} while (numBytesRead >= 0);
} finally {
expectedInputStream.close();
expectedOutputSteam.close();
}
String[] actual = new String(sampleOutputStream.toByteArray(), "UTF-8").split("\n");
String[] expected = new String(expectedOutputSteam.toByteArray(), "UTF-8").split("\n");
assertArrayEquals(expected, actual);
}
use of org.opensolaris.opengrok.analysis.Definitions in project OpenGrok by OpenGrok.
the class PascalAnalyzerFactoryTest method testAnalyzer.
/**
* Test of writeXref method, of class PascalAnalyzerFactory.
*
* @throws java.lang.Exception
*/
@Test
public void testAnalyzer() throws Exception {
String path = repository.getSourceRoot() + "/pascal/Sample.pas";
File f = new File(path);
if (!(f.canRead() && f.isFile())) {
fail("pascal testfile " + f + " not found");
}
Document doc = new Document();
doc.add(new Field(QueryBuilder.FULLPATH, path, string_ft_nstored_nanalyzed_norms));
StringWriter xrefOut = new StringWriter();
analyzer.setCtags(ctags);
analyzer.setScopesEnabled(true);
analyzer.analyze(doc, getStreamSource(path), xrefOut);
Definitions definitions = Definitions.deserialize(doc.getField(QueryBuilder.TAGS).binaryValue().bytes);
assertNotNull(definitions);
String[] type = new String[1];
assertTrue(definitions.hasDefinitionAt("Sample", 22, type));
assertThat(type[0], is("unit"));
assertTrue(definitions.hasDefinitionAt("TSample", 28, type));
assertThat(type[0], is("Class"));
assertTrue(definitions.hasDefinitionAt("Id", 40, type));
assertThat(type[0], is("property"));
assertTrue(definitions.hasDefinitionAt("Description", 41, type));
assertThat(type[0], is("property"));
assertTrue(definitions.hasDefinitionAt("TSample.GetId", 48, type));
assertThat(type[0], is("function"));
assertTrue(definitions.hasDefinitionAt("TSample.SetId", 53, type));
assertThat(type[0], is("procedure"));
assertTrue(definitions.hasDefinitionAt("TSample.GetClassName", 58, type));
assertThat(type[0], is("function"));
assertTrue(definitions.hasDefinitionAt("TSample.GetUser", 63, type));
assertThat(type[0], is("function"));
}
use of org.opensolaris.opengrok.analysis.Definitions in project OpenGrok by OpenGrok.
the class IndexDatabaseTest method testGetDefinitions.
@Test
public void testGetDefinitions() throws Exception {
// Test that we can get definitions for one of the files in the
// repository.
File f1 = new File(repository.getSourceRoot() + "/c/foobar.c");
Definitions defs1 = IndexDatabase.getDefinitions(f1);
assertNotNull(defs1);
assertTrue(defs1.hasSymbol("foobar"));
assertTrue(defs1.hasSymbol("a"));
assertFalse(defs1.hasSymbol("b"));
assertTrue(defs1.hasDefinitionAt("foobar", 1, new String[1]));
//same for windows delimiters
f1 = new File(repository.getSourceRoot() + "\\c\\foobar.c");
defs1 = IndexDatabase.getDefinitions(f1);
assertNotNull(defs1);
assertTrue(defs1.hasSymbol("foobar"));
assertTrue(defs1.hasSymbol("a"));
assertFalse(defs1.hasSymbol("b"));
assertTrue(defs1.hasDefinitionAt("foobar", 1, new String[1]));
// Test that we get null back if we request definitions for a file
// that's not in the repository.
File f2 = new File(repository.getSourceRoot() + "/c/foobar.d");
Definitions defs2 = IndexDatabase.getDefinitions(f2);
assertNull(defs2);
}
Aggregations