use of org.apache.wiki.WikiPage in project jspwiki by apache.
the class BasicSearchProvider method findPages.
private Collection findPages(QueryItem[] query, WikiContext wikiContext) {
TreeSet<SearchResult> res = new TreeSet<SearchResult>(new SearchResultComparator());
SearchMatcher matcher = new SearchMatcher(m_engine, query);
Collection allPages = null;
try {
allPages = m_engine.getPageManager().getAllPages();
} catch (ProviderException pe) {
log.error("Unable to retrieve page list", pe);
return null;
}
AuthorizationManager mgr = m_engine.getAuthorizationManager();
Iterator it = allPages.iterator();
while (it.hasNext()) {
try {
WikiPage page = (WikiPage) it.next();
if (page != null) {
PagePermission pp = new PagePermission(page, PagePermission.VIEW_ACTION);
if (wikiContext == null || mgr.checkPermission(wikiContext.getWikiSession(), pp)) {
String pageName = page.getName();
String pageContent = m_engine.getPageManager().getPageText(pageName, WikiPageProvider.LATEST_VERSION) + attachmentNames(page, " ");
SearchResult comparison = matcher.matchPageContent(pageName, pageContent);
if (comparison != null) {
res.add(comparison);
}
}
}
} catch (ProviderException pe) {
log.error("Unable to retrieve page from cache", pe);
} catch (IOException ioe) {
log.error("Failed to search page", ioe);
}
}
return res;
}
use of org.apache.wiki.WikiPage in project jspwiki by apache.
the class LuceneSearchProvider method doFullLuceneReindex.
/**
* Performs a full Lucene reindex, if necessary.
*
* @throws IOException If there's a problem during indexing
*/
protected void doFullLuceneReindex() throws IOException {
File dir = new File(m_luceneDirectory);
String[] filelist = dir.list();
if (filelist == null) {
throw new IOException("Invalid Lucene directory: cannot produce listing: " + dir.getAbsolutePath());
}
try {
if (filelist.length == 0) {
//
// No files? Reindex!
//
Date start = new Date();
IndexWriter writer = null;
log.info("Starting Lucene reindexing, this can take a couple of minutes...");
Directory luceneDir = new SimpleFSDirectory(dir, null);
try {
writer = getIndexWriter(luceneDir);
Collection allPages = m_engine.getPageManager().getAllPages();
for (Iterator iterator = allPages.iterator(); iterator.hasNext(); ) {
WikiPage page = (WikiPage) iterator.next();
try {
String text = m_engine.getPageManager().getPageText(page.getName(), WikiProvider.LATEST_VERSION);
luceneIndexPage(page, text, writer);
} catch (IOException e) {
log.warn("Unable to index page " + page.getName() + ", continuing to next ", e);
}
}
Collection allAttachments = m_engine.getAttachmentManager().getAllAttachments();
for (Iterator iterator = allAttachments.iterator(); iterator.hasNext(); ) {
Attachment att = (Attachment) iterator.next();
try {
String text = getAttachmentContent(att.getName(), WikiProvider.LATEST_VERSION);
luceneIndexPage(att, text, writer);
} catch (IOException e) {
log.warn("Unable to index attachment " + att.getName() + ", continuing to next", e);
}
}
} finally {
close(writer);
}
Date end = new Date();
log.info("Full Lucene index finished in " + (end.getTime() - start.getTime()) + " milliseconds.");
} else {
log.info("Files found in Lucene directory, not reindexing.");
}
} catch (NoClassDefFoundError e) {
log.info("Lucene libraries do not exist - not using Lucene.");
} catch (IOException e) {
log.error("Problem while creating Lucene index - not using Lucene.", e);
} catch (ProviderException e) {
log.error("Problem reading pages while creating Lucene index (JSPWiki won't start.)", e);
throw new IllegalArgumentException("unable to create Lucene index");
} catch (Exception e) {
log.error("Unable to start lucene", e);
}
}
use of org.apache.wiki.WikiPage in project jspwiki by apache.
the class SearchManager method postSave.
/**
* Reindexes the page.
*
* @param wikiContext {@inheritDoc}
* @param content {@inheritDoc}
*/
@Override
public void postSave(WikiContext wikiContext, String content) {
//
// Makes sure that we're indexing the latest version of this
// page.
//
WikiPage p = m_engine.getPage(wikiContext.getPage().getName());
reindexPage(p);
}
use of org.apache.wiki.WikiPage in project jspwiki by apache.
the class AtomFeed method getItems.
private Collection getItems() {
ArrayList<Element> list = new ArrayList<Element>();
WikiEngine engine = m_wikiContext.getEngine();
ServletContext servletContext = null;
if (m_wikiContext.getHttpRequest() != null)
servletContext = m_wikiContext.getHttpRequest().getSession().getServletContext();
for (Iterator i = m_entries.iterator(); i.hasNext(); ) {
Entry e = (Entry) i.next();
WikiPage p = e.getPage();
Element entryEl = getElement("entry");
//
// Mandatory elements
//
entryEl.addContent(getElement("id").setText(getEntryID(e)));
entryEl.addContent(getElement("title").setAttribute("type", "html").setText(e.getTitle()));
entryEl.addContent(getElement("updated").setText(DateFormatUtils.formatUTC(p.getLastModified(), RFC3339FORMAT)));
//
// Optional elements
//
entryEl.addContent(getElement("author").addContent(getElement("name").setText(e.getAuthor())));
entryEl.addContent(getElement("link").setAttribute("rel", "alternate").setAttribute("href", e.getURL()));
entryEl.addContent(getElement("content").setAttribute("type", "html").setText(e.getContent()));
if (engine.getAttachmentManager().hasAttachments(p) && servletContext != null) {
try {
Collection c = engine.getAttachmentManager().listAttachments(p);
for (Iterator a = c.iterator(); a.hasNext(); ) {
Attachment att = (Attachment) a.next();
Element attEl = getElement("link");
attEl.setAttribute("rel", "enclosure");
attEl.setAttribute("href", engine.getURL(WikiContext.ATTACH, att.getName(), null, true));
attEl.setAttribute("length", Long.toString(att.getSize()));
attEl.setAttribute("type", getMimeType(servletContext, att.getFileName()));
entryEl.addContent(attEl);
}
} catch (ProviderException ex) {
// FIXME: log.info("Can't get attachment data",ex);
}
}
list.add(entryEl);
}
return list;
}
use of org.apache.wiki.WikiPage in project jspwiki by apache.
the class RSSGenerator method generateBlogRSS.
/**
* Creates RSS from modifications as if this page was a blog (using the WeblogPlugin).
*
* @param wikiContext The WikiContext, as usual.
* @param changed A list of the changed pages.
* @param feed A valid Feed object. The feed will be used to create the RSS/Atom, depending
* on which kind of an object you want to put in it.
* @return A String of valid RSS or Atom.
* @throws ProviderException If reading of pages was not possible.
*/
@SuppressWarnings("unchecked")
protected String generateBlogRSS(WikiContext wikiContext, List changed, Feed feed) throws ProviderException {
if (log.isDebugEnabled())
log.debug("Generating RSS for blog, size=" + changed.size());
String ctitle = m_engine.getVariable(wikiContext, PROP_CHANNEL_TITLE);
if (ctitle != null)
feed.setChannelTitle(ctitle);
else
feed.setChannelTitle(m_engine.getApplicationName() + ":" + wikiContext.getPage().getName());
feed.setFeedURL(wikiContext.getViewURL(wikiContext.getPage().getName()));
String language = m_engine.getVariable(wikiContext, PROP_CHANNEL_LANGUAGE);
if (language != null)
feed.setChannelLanguage(language);
else
feed.setChannelLanguage(m_channelLanguage);
String channelDescription = m_engine.getVariable(wikiContext, PROP_CHANNEL_DESCRIPTION);
if (channelDescription != null) {
feed.setChannelDescription(channelDescription);
}
Collections.sort(changed, new PageTimeComparator());
int items = 0;
for (Iterator i = changed.iterator(); i.hasNext() && items < 15; items++) {
WikiPage page = (WikiPage) i.next();
Entry e = new Entry();
e.setPage(page);
String url;
if (page instanceof Attachment) {
url = m_engine.getURL(WikiContext.ATTACH, page.getName(), null, true);
} else {
url = m_engine.getURL(WikiContext.VIEW, page.getName(), null, true);
}
e.setURL(url);
//
// Title
//
String pageText = m_engine.getPureText(page.getName(), WikiProvider.LATEST_VERSION);
String title = "";
int firstLine = pageText.indexOf('\n');
if (firstLine > 0) {
title = pageText.substring(0, firstLine).trim();
}
if (title.length() == 0)
title = page.getName();
// Remove wiki formatting
while (title.startsWith("!")) title = title.substring(1);
e.setTitle(title);
if (firstLine > 0) {
int maxlen = pageText.length();
if (maxlen > MAX_CHARACTERS)
maxlen = MAX_CHARACTERS;
if (maxlen > 0) {
pageText = m_engine.textToHTML(wikiContext, pageText.substring(firstLine + 1, maxlen).trim());
if (maxlen == MAX_CHARACTERS)
pageText += "...";
e.setContent(pageText);
} else {
e.setContent(title);
}
} else {
e.setContent(title);
}
e.setAuthor(getAuthor(page));
feed.addEntry(e);
}
return feed.getString();
}
Aggregations