use of org.apache.nutch.parse.Outlink in project nutch by apache.
the class SmallStack method getParse.
@Override
public ParseResult getParse(Content content) {
String text = null;
Vector<Outlink> outlinks = new Vector<>();
try {
byte[] raw = content.getContent();
String contentLength = content.getMetadata().get(Response.CONTENT_LENGTH);
if (contentLength != null && raw.length != Integer.parseInt(contentLength)) {
return new ParseStatus(ParseStatus.FAILED, ParseStatus.FAILED_TRUNCATED, "Content truncated at " + raw.length + " bytes. Parser can't handle incomplete files.").getEmptyParseResult(content.getUrl(), getConf());
}
ExtractText extractor = new ExtractText();
// TagParser implements SWFTags and drives a SWFTagTypes interface
TagParser parser = new TagParser(extractor);
// use this instead to debug the file
// TagParser parser = new TagParser( new SWFTagDumper(true, true) );
// SWFReader reads an input file and drives a SWFTags interface
SWFReader reader = new SWFReader(parser, new InStream(raw));
// read the input SWF file and pass it through the interface pipeline
reader.readFile();
text = extractor.getText();
String atext = extractor.getActionText();
if (atext != null && atext.length() > 0)
text += "\n--------\n" + atext;
// harvest potential outlinks
String[] links = extractor.getUrls();
for (int i = 0; i < links.length; i++) {
Outlink out = new Outlink(links[i], "");
outlinks.add(out);
}
Outlink[] olinks = OutlinkExtractor.getOutlinks(text, conf);
if (olinks != null)
for (int i = 0; i < olinks.length; i++) {
outlinks.add(olinks[i]);
}
} catch (Exception e) {
// run time exception
LOG.error("Error, runtime exception: ", e);
return new ParseStatus(ParseStatus.FAILED, "Can't be handled as SWF document. " + e).getEmptyParseResult(content.getUrl(), getConf());
}
if (text == null)
text = "";
Outlink[] links = (Outlink[]) outlinks.toArray(new Outlink[outlinks.size()]);
ParseData parseData = new ParseData(ParseStatus.STATUS_SUCCESS, "", links, content.getMetadata());
return ParseResult.createParseResult(content.getUrl(), new ParseImpl(text, parseData));
}
use of org.apache.nutch.parse.Outlink in project nutch by apache.
the class DOMContentUtils method getOutlinks.
/**
* This method finds all anchors below the supplied DOM <code>node</code>, and
* creates appropriate {@link Outlink} records for each (relative to the
* supplied <code>base</code> URL), and adds them to the <code>outlinks</code>
* {@link ArrayList}.
*
* <p>
*
* Links without inner structure (tags, text, etc) are discarded, as are links
* which contain only single nested links and empty text nodes (this is a
* common DOM-fixup artifact, at least with nekohtml).
*/
public void getOutlinks(URL base, ArrayList<Outlink> outlinks, Node node) {
NodeWalker walker = new NodeWalker(node);
while (walker.hasNext()) {
Node currentNode = walker.nextNode();
String nodeName = currentNode.getNodeName();
short nodeType = currentNode.getNodeType();
NodeList children = currentNode.getChildNodes();
int childLen = (children != null) ? children.getLength() : 0;
if (nodeType == Node.ELEMENT_NODE) {
nodeName = nodeName.toLowerCase();
LinkParams params = (LinkParams) linkParams.get(nodeName);
if (params != null) {
if (!shouldThrowAwayLink(currentNode, children, childLen, params)) {
StringBuffer linkText = new StringBuffer();
getText(linkText, currentNode, true);
NamedNodeMap attrs = currentNode.getAttributes();
String target = null;
boolean noFollow = false;
boolean post = false;
for (int i = 0; i < attrs.getLength(); i++) {
Node attr = attrs.item(i);
String attrName = attr.getNodeName();
if (params.attrName.equalsIgnoreCase(attrName)) {
target = attr.getNodeValue();
} else if ("rel".equalsIgnoreCase(attrName) && "nofollow".equalsIgnoreCase(attr.getNodeValue())) {
noFollow = true;
} else if ("method".equalsIgnoreCase(attrName) && "post".equalsIgnoreCase(attr.getNodeValue())) {
post = true;
}
}
if (target != null && !noFollow && !post)
try {
URL url = URLUtil.resolveURL(base, target);
Outlink outlink = new Outlink(url.toString(), linkText.toString().trim());
outlinks.add(outlink);
// the outlink metadata
if (keepNodenames) {
MapWritable metadata = new MapWritable();
metadata.put(new Text(srcTagMetaName), new Text(nodeName));
outlink.setMetadata(metadata);
}
} catch (MalformedURLException e) {
// don't care
}
}
// this should not have any children, skip them
if (params.childLen == 0)
continue;
}
}
}
}
use of org.apache.nutch.parse.Outlink in project nutch by apache.
the class TikaParser method getParse.
@SuppressWarnings("deprecation")
public ParseResult getParse(Content content) {
String mimeType = content.getContentType();
boolean useBoilerpipe = getConf().get("tika.extractor", "none").equals("boilerpipe");
String boilerpipeExtractorName = getConf().get("tika.extractor.boilerpipe.algorithm", "ArticleExtractor");
URL base;
try {
base = new URL(content.getBaseUrl());
} catch (MalformedURLException e) {
return new ParseStatus(e).getEmptyParseResult(content.getUrl(), getConf());
}
// get the right parser using the mime type as a clue
Parser parser = tikaConfig.getParser(MediaType.parse(mimeType));
byte[] raw = content.getContent();
if (parser == null) {
String message = "Can't retrieve Tika parser for mime-type " + mimeType;
LOG.error(message);
return new ParseStatus(ParseStatus.FAILED, message).getEmptyParseResult(content.getUrl(), getConf());
}
LOG.debug("Using Tika parser " + parser.getClass().getName() + " for mime-type " + mimeType);
Metadata tikamd = new Metadata();
HTMLDocumentImpl doc = new HTMLDocumentImpl();
doc.setErrorChecking(false);
DocumentFragment root = doc.createDocumentFragment();
ContentHandler domHandler;
// Check whether to use Tika's BoilerplateContentHandler
if (useBoilerpipe) {
BoilerpipeContentHandler bpHandler = new BoilerpipeContentHandler((ContentHandler) new DOMBuilder(doc, root), BoilerpipeExtractorRepository.getExtractor(boilerpipeExtractorName));
bpHandler.setIncludeMarkup(true);
domHandler = (ContentHandler) bpHandler;
} else {
DOMBuilder domBuilder = new DOMBuilder(doc, root);
domBuilder.setUpperCaseElementNames(upperCaseElementNames);
domBuilder.setDefaultNamespaceURI(XHTMLContentHandler.XHTML);
domHandler = (ContentHandler) domBuilder;
}
LinkContentHandler linkContentHandler = new LinkContentHandler();
ParseContext context = new ParseContext();
TeeContentHandler teeContentHandler = new TeeContentHandler(domHandler, linkContentHandler);
if (HTMLMapper != null)
context.set(HtmlMapper.class, HTMLMapper);
tikamd.set(Metadata.CONTENT_TYPE, mimeType);
try {
parser.parse(new ByteArrayInputStream(raw), (ContentHandler) teeContentHandler, tikamd, context);
} catch (Exception e) {
LOG.error("Error parsing " + content.getUrl(), e);
return new ParseStatus(ParseStatus.FAILED, e.getMessage()).getEmptyParseResult(content.getUrl(), getConf());
}
HTMLMetaTags metaTags = new HTMLMetaTags();
String text = "";
String title = "";
Outlink[] outlinks = new Outlink[0];
org.apache.nutch.metadata.Metadata nutchMetadata = new org.apache.nutch.metadata.Metadata();
// we have converted the sax events generated by Tika into a DOM object
// so we can now use the usual HTML resources from Nutch
// get meta directives
HTMLMetaProcessor.getMetaTags(metaTags, root, base);
if (LOG.isTraceEnabled()) {
LOG.trace("Meta tags for " + base + ": " + metaTags.toString());
}
// check meta directives
if (!metaTags.getNoIndex()) {
// okay to index
StringBuffer sb = new StringBuffer();
if (LOG.isTraceEnabled()) {
LOG.trace("Getting text...");
}
// extract text
utils.getText(sb, root);
text = sb.toString();
sb.setLength(0);
if (LOG.isTraceEnabled()) {
LOG.trace("Getting title...");
}
// extract title
utils.getTitle(sb, root);
title = sb.toString().trim();
}
if (!metaTags.getNoFollow()) {
// okay to follow links
// extract outlinks
ArrayList<Outlink> l = new ArrayList<Outlink>();
URL baseTag = base;
String baseTagHref = tikamd.get("Content-Location");
if (baseTagHref != null) {
try {
baseTag = new URL(base, baseTagHref);
} catch (MalformedURLException e) {
LOG.trace("Invalid <base href=\"{}\">", baseTagHref);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Getting links (base URL = {}) ...", baseTag);
}
// pre-1233 outlink extraction
// utils.getOutlinks(baseTag != null ? baseTag : base, l, root);
// Get outlinks from Tika
List<Link> tikaExtractedOutlinks = linkContentHandler.getLinks();
utils.getOutlinks(baseTag, l, tikaExtractedOutlinks);
outlinks = l.toArray(new Outlink[l.size()]);
if (LOG.isTraceEnabled()) {
LOG.trace("found " + outlinks.length + " outlinks in " + content.getUrl());
}
}
// populate Nutch metadata with Tika metadata
String[] TikaMDNames = tikamd.names();
for (String tikaMDName : TikaMDNames) {
if (tikaMDName.equalsIgnoreCase(Metadata.TITLE))
continue;
String[] values = tikamd.getValues(tikaMDName);
for (String v : values) nutchMetadata.add(tikaMDName, v);
}
if (outlinks.length == 0) {
outlinks = OutlinkExtractor.getOutlinks(text, getConf());
}
ParseStatus status = new ParseStatus(ParseStatus.SUCCESS);
if (metaTags.getRefresh()) {
status.setMinorCode(ParseStatus.SUCCESS_REDIRECT);
status.setArgs(new String[] { metaTags.getRefreshHref().toString(), Integer.toString(metaTags.getRefreshTime()) });
}
ParseData parseData = new ParseData(status, title, outlinks, content.getMetadata(), nutchMetadata);
ParseResult parseResult = ParseResult.createParseResult(content.getUrl(), new ParseImpl(text, parseData));
// run filters on parse
ParseResult filteredParse = this.htmlParseFilters.filter(content, parseResult, metaTags, root);
if (metaTags.getNoCache()) {
// not okay to cache
for (Map.Entry<org.apache.hadoop.io.Text, Parse> entry : filteredParse) entry.getValue().getData().getParseMeta().set(Nutch.CACHING_FORBIDDEN_KEY, cachingPolicy);
}
return filteredParse;
}
use of org.apache.nutch.parse.Outlink in project nutch by apache.
the class TestFeedParser method testIt.
/**
* <p>
* The test method: tests out the following 2 asserts:
* </p>
*
* <ul>
* <li>There are 3 outlinks read from the sample rss file</li>
* <li>The 3 outlinks read are in fact the correct outlinks from the sample
* file</li>
* </ul>
*/
@Test
public void testIt() throws ProtocolException, ParseException {
String urlString;
Protocol protocol;
Content content;
Parse parse;
Configuration conf = NutchConfiguration.create();
for (int i = 0; i < sampleFiles.length; i++) {
urlString = "file:" + sampleDir + fileSeparator + sampleFiles[i];
protocol = new ProtocolFactory(conf).getProtocol(urlString);
content = protocol.getProtocolOutput(new Text(urlString), new CrawlDatum()).getContent();
parse = new ParseUtil(conf).parseByExtensionId("parse-tika", content).get(content.getUrl());
// check that there are 2 outlinks:
// unlike the original parse-rss
// tika ignores the URL and description of the channel
// http://test.channel.com
// http://www-scf.usc.edu/~mattmann/
// http://www.nutch.org
ParseData theParseData = parse.getData();
Outlink[] theOutlinks = theParseData.getOutlinks();
Assert.assertTrue("There aren't 2 outlinks read!", theOutlinks.length == 2);
// now check to make sure that those are the two outlinks
boolean hasLink1 = false, hasLink2 = false;
for (int j = 0; j < theOutlinks.length; j++) {
if (theOutlinks[j].getToUrl().equals("http://www-scf.usc.edu/~mattmann/")) {
hasLink1 = true;
}
if (theOutlinks[j].getToUrl().equals("http://www.nutch.org/")) {
hasLink2 = true;
}
}
if (!hasLink1 || !hasLink2) {
Assert.fail("Outlinks read from sample rss file are not correct!");
}
}
}
use of org.apache.nutch.parse.Outlink in project nutch by apache.
the class TestHtmlParser method testResolveBaseUrl.
@Test
public void testResolveBaseUrl() {
byte[] contentBytes = resolveBaseUrlTestContent.getBytes(StandardCharsets.UTF_8);
// parse using http://example.com/ as "fetch" URL
Parse parse = parse(contentBytes);
LOG.info(parse.getData().toString());
Outlink[] outlinks = parse.getData().getOutlinks();
Assert.assertEquals(1, outlinks.length);
Assert.assertEquals("http://www.example.com/index.html", outlinks[0].getToUrl());
}
Aggregations