use of org.apache.nutch.parse.ParseResult in project nutch by apache.
the class TestRegexParseFilter method testPositiveFilter.
public void testPositiveFilter() throws Exception {
Configuration conf = NutchConfiguration.create();
String file = SAMPLES + SEPARATOR + "regex-parsefilter.txt";
conf.set("parsefilter.regex.file", file);
RegexParseFilter filter = new RegexParseFilter();
filter.setConf(conf);
String url = "http://nutch.apache.org/";
String html = "<body><html><h1>nutch</h1><p>this is the extracted text blablabla</p></body></html>";
Content content = new Content(url, url, html.getBytes("UTF-8"), "text/html", new Metadata(), conf);
Parse parse = new ParseImpl("nutch this is the extracted text blablabla", new ParseData());
ParseResult result = ParseResult.createParseResult(url, parse);
result = filter.filter(content, result, null, null);
Metadata meta = parse.getData().getParseMeta();
assertEquals("true", meta.get("first"));
assertEquals("true", meta.get("second"));
}
use of org.apache.nutch.parse.ParseResult in project nutch by apache.
the class TestRegexParseFilter method testNegativeFilter.
public void testNegativeFilter() throws Exception {
Configuration conf = NutchConfiguration.create();
String file = SAMPLES + SEPARATOR + "regex-parsefilter.txt";
conf.set("parsefilter.regex.file", file);
RegexParseFilter filter = new RegexParseFilter();
filter.setConf(conf);
String url = "http://nutch.apache.org/";
String html = "<body><html><h2>nutch</h2><p>this is the extracted text no bla</p></body></html>";
Content content = new Content(url, url, html.getBytes("UTF-8"), "text/html", new Metadata(), conf);
Parse parse = new ParseImpl("nutch this is the extracted text bla", new ParseData());
ParseResult result = ParseResult.createParseResult(url, parse);
result = filter.filter(content, result, null, null);
Metadata meta = parse.getData().getParseMeta();
assertEquals("false", meta.get("first"));
assertEquals("false", meta.get("second"));
}
use of org.apache.nutch.parse.ParseResult in project nutch by apache.
the class TestFeedParser method testParseFetchChannel.
/**
* Calls the {@link FeedParser} on a sample RSS file and checks that there are
* 3 {@link ParseResult} entries including the below 2 links:
* <ul>
* <li>http://www-scf.usc.edu/~mattmann/</li>
* <li>http://www.nutch.org</li>
* </ul>
*
* @throws ProtocolNotFound
* If the {@link Protocol}Layer cannot be loaded (required to fetch
* the {@link Content} for the RSS file).
* @throws ParseException
* If the {@link Parser}Layer cannot be loaded.
*/
@Test
public void testParseFetchChannel() throws ProtocolNotFound, ParseException {
String urlString;
Protocol protocol;
Content content;
ParseResult parseResult;
Configuration conf = NutchConfiguration.create();
for (int i = 0; i < sampleFiles.length; i++) {
urlString = "file:" + sampleDir + fileSeparator + sampleFiles[i];
urlString = urlString.replace('\\', '/');
protocol = new ProtocolFactory(conf).getProtocol(urlString);
content = protocol.getProtocolOutput(new Text(urlString), new CrawlDatum()).getContent();
parseResult = new ParseUtil(conf).parseByExtensionId("feed", content);
Assert.assertEquals(3, parseResult.size());
boolean hasLink1 = false, hasLink2 = false, hasLink3 = false;
for (Iterator<Map.Entry<Text, Parse>> j = parseResult.iterator(); j.hasNext(); ) {
Map.Entry<Text, Parse> entry = j.next();
if (entry.getKey().toString().equals("http://www-scf.usc.edu/~mattmann/")) {
hasLink1 = true;
} else if (entry.getKey().toString().equals("http://www.nutch.org/")) {
hasLink2 = true;
} else if (entry.getKey().toString().equals(urlString)) {
hasLink3 = true;
}
Assert.assertNotNull(entry.getValue());
Assert.assertNotNull(entry.getValue().getData());
}
if (!hasLink1 || !hasLink2 || !hasLink3) {
Assert.fail("Outlinks read from sample rss file are not correct!");
}
}
}
use of org.apache.nutch.parse.ParseResult in project nutch by apache.
the class IndexingFiltersChecker method process.
protected int process(String url, StringBuilder output) throws Exception {
if (normalizers != null) {
url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
}
LOG.info("fetching: " + url);
CrawlDatum datum = new CrawlDatum();
Iterator<String> iter = metadata.keySet().iterator();
while (iter.hasNext()) {
String key = iter.next();
String value = metadata.get(key);
if (value == null)
value = "";
datum.getMetaData().put(new Text(key), new Text(value));
}
int maxRedirects = getConf().getInt("http.redirect.max", 3);
if (followRedirects) {
if (maxRedirects == 0) {
LOG.info("Following max. 3 redirects (ignored http.redirect.max == 0)");
maxRedirects = 3;
} else {
LOG.info("Following max. {} redirects", maxRedirects);
}
}
ProtocolOutput protocolOutput = getProtocolOutput(url, datum, checkRobotsTxt);
Text turl = new Text(url);
// Following redirects and not reached maxRedirects?
int numRedirects = 0;
while (protocolOutput != null && !protocolOutput.getStatus().isSuccess() && followRedirects && protocolOutput.getStatus().isRedirect() && maxRedirects >= numRedirects) {
String[] stuff = protocolOutput.getStatus().getArgs();
url = stuff[0];
LOG.info("Follow redirect to {}", url);
if (normalizers != null) {
url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
}
turl.set(url);
// try again
protocolOutput = getProtocolOutput(url, datum, checkRobotsTxt);
numRedirects++;
}
if (checkRobotsTxt && protocolOutput == null) {
System.err.println("Fetch disallowed by robots.txt");
return -1;
}
if (!protocolOutput.getStatus().isSuccess()) {
System.err.println("Fetch failed with protocol status: " + protocolOutput.getStatus());
if (protocolOutput.getStatus().isRedirect()) {
System.err.println("Redirect(s) not handled due to configuration.");
System.err.println("Max Redirects to handle per config: " + maxRedirects);
System.err.println("Number of Redirects handled: " + numRedirects);
}
return -1;
}
Content content = protocolOutput.getContent();
if (content == null) {
output.append("No content for " + url + "\n");
return 0;
}
String contentType = content.getContentType();
if (contentType == null) {
LOG.error("Failed to determine content type!");
return -1;
}
// store the guessed content type in the crawldatum
datum.getMetaData().put(new Text(Metadata.CONTENT_TYPE), new Text(contentType));
if (ParseSegment.isTruncated(content)) {
LOG.warn("Content is truncated, parse may fail!");
}
ScoringFilters scfilters = new ScoringFilters(getConf());
// call the scoring filters
try {
scfilters.passScoreBeforeParsing(turl, datum, content);
} catch (Exception e) {
LOG.warn("Couldn't pass score, url {} ({})", url, e);
}
LOG.info("parsing: {}", url);
LOG.info("contentType: {}", contentType);
ParseResult parseResult = new ParseUtil(getConf()).parse(content);
NutchDocument doc = new NutchDocument();
doc.add("id", url);
Text urlText = new Text(url);
Inlinks inlinks = null;
Parse parse = parseResult.get(urlText);
if (parse == null) {
LOG.error("Failed to get parse from parse result");
LOG.error("Available parses in parse result (by URL key):");
for (Map.Entry<Text, Parse> entry : parseResult) {
LOG.error(" " + entry.getKey());
}
LOG.error("Parse result does not contain a parse for URL to be checked:");
LOG.error(" " + urlText);
return -1;
}
byte[] signature = SignatureFactory.getSignature(getConf()).calculate(content, parse);
parse.getData().getContentMeta().set(Nutch.SIGNATURE_KEY, StringUtil.toHexString(signature));
String digest = parse.getData().getContentMeta().get(Nutch.SIGNATURE_KEY);
doc.add("digest", digest);
datum.setSignature(signature);
// call the scoring filters
try {
scfilters.passScoreAfterParsing(turl, content, parseResult.get(turl));
} catch (Exception e) {
LOG.warn("Couldn't pass score, url {} ({})", turl, e);
}
IndexingFilters indexers = new IndexingFilters(getConf());
try {
doc = indexers.filter(doc, parse, urlText, datum, inlinks);
} catch (IndexingException e) {
e.printStackTrace();
}
if (doc == null) {
output.append("Document discarded by indexing filter\n");
return 0;
}
for (String fname : doc.getFieldNames()) {
List<Object> values = doc.getField(fname).getValues();
if (values != null) {
for (Object value : values) {
String str = value.toString();
int minText = dumpText ? str.length() : Math.min(100, str.length());
output.append(fname + " :\t" + str.substring(0, minText) + "\n");
}
}
}
// For readability if keepClientCnxOpen
output.append("\n");
if (doIndex) {
IndexWriters writers = IndexWriters.get(getConf());
writers.open(getConf(), "IndexingFilterChecker");
writers.write(doc);
writers.close();
}
return 0;
}
use of org.apache.nutch.parse.ParseResult in project nutch by apache.
the class TikaParser method getParse.
ParseResult getParse(Content content, HTMLDocumentImpl doc, DocumentFragment root) {
String mimeType = content.getContentType();
URL base;
try {
base = new URL(content.getBaseUrl());
} catch (MalformedURLException e) {
return new ParseStatus(e).getEmptyParseResult(content.getUrl(), getConf());
}
// get the right parser using the mime type as a clue
CompositeParser compositeParser = (CompositeParser) tikaConfig.getParser();
Parser parser = compositeParser.getParsers().get(MediaType.parse(mimeType));
if (parser == null) {
String message = "Can't retrieve Tika parser for mime-type " + mimeType;
LOG.error(message);
return new ParseStatus(ParseStatus.FAILED, message).getEmptyParseResult(content.getUrl(), getConf());
}
LOG.debug("Using Tika parser {} for mime-type {}.", parser.getClass().getName(), mimeType);
byte[] raw = content.getContent();
Metadata tikamd = new Metadata();
ContentHandler domHandler;
// Check whether to use Tika's BoilerplateContentHandler
if (useBoilerpipe && boilerpipeMimeTypes.contains(mimeType)) {
BoilerpipeContentHandler bpHandler = new BoilerpipeContentHandler((ContentHandler) new DOMBuilder(doc, root), BoilerpipeExtractorRepository.getExtractor(boilerpipeExtractorName));
bpHandler.setIncludeMarkup(true);
domHandler = (ContentHandler) bpHandler;
} else {
DOMBuilder domBuilder = new DOMBuilder(doc, root);
domBuilder.setUpperCaseElementNames(upperCaseElementNames);
domBuilder.setDefaultNamespaceURI(XHTMLContentHandler.XHTML);
domHandler = (ContentHandler) domBuilder;
}
LinkContentHandler linkContentHandler = new LinkContentHandler();
ParseContext context = new ParseContext();
if (parseEmbedded) {
context.set(Parser.class, new AutoDetectParser(tikaConfig));
}
TeeContentHandler teeContentHandler = new TeeContentHandler(domHandler, linkContentHandler);
if (HTMLMapper != null)
context.set(HtmlMapper.class, HTMLMapper);
tikamd.set(Metadata.CONTENT_TYPE, mimeType);
try {
parser.parse(new ByteArrayInputStream(raw), (ContentHandler) teeContentHandler, tikamd, context);
} catch (Exception e) {
LOG.error("Error parsing " + content.getUrl(), e);
return new ParseStatus(ParseStatus.FAILED, e.getMessage()).getEmptyParseResult(content.getUrl(), getConf());
}
HTMLMetaTags metaTags = new HTMLMetaTags();
String text = "";
String title = "";
Outlink[] outlinks = new Outlink[0];
org.apache.nutch.metadata.Metadata nutchMetadata = new org.apache.nutch.metadata.Metadata();
// we have converted the sax events generated by Tika into a DOM object
// so we can now use the usual HTML resources from Nutch
// get meta directives
HTMLMetaProcessor.getMetaTags(metaTags, root, base);
if (LOG.isTraceEnabled()) {
LOG.trace("Meta tags for " + base + ": " + metaTags.toString());
}
// check meta directives
if (!metaTags.getNoIndex()) {
// okay to index
StringBuffer sb = new StringBuffer();
if (LOG.isTraceEnabled()) {
LOG.trace("Getting text...");
}
// extract text
utils.getText(sb, root);
text = sb.toString();
sb.setLength(0);
if (LOG.isTraceEnabled()) {
LOG.trace("Getting title...");
}
// extract title
utils.getTitle(sb, root);
title = sb.toString().trim();
}
if (!metaTags.getNoFollow()) {
// okay to follow links
// extract outlinks
ArrayList<Outlink> l = new ArrayList<Outlink>();
URL baseTag = base;
String baseTagHref = tikamd.get("Content-Location");
if (baseTagHref != null) {
try {
baseTag = new URL(base, baseTagHref);
} catch (MalformedURLException e) {
LOG.trace("Invalid <base href=\"{}\">", baseTagHref);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Getting links (base URL = {}) ...", baseTag);
}
// pre-1233 outlink extraction
// utils.getOutlinks(baseTag != null ? baseTag : base, l, root);
// Get outlinks from Tika
List<Link> tikaExtractedOutlinks = linkContentHandler.getLinks();
utils.getOutlinks(baseTag, l, tikaExtractedOutlinks);
outlinks = l.toArray(new Outlink[l.size()]);
if (LOG.isTraceEnabled()) {
LOG.trace("found " + outlinks.length + " outlinks in " + content.getUrl());
}
}
// populate Nutch metadata with Tika metadata
String[] TikaMDNames = tikamd.names();
for (String tikaMDName : TikaMDNames) {
if (tikaMDName.equalsIgnoreCase(TikaCoreProperties.TITLE.toString()))
continue;
String[] values = tikamd.getValues(tikaMDName);
for (String v : values) {
nutchMetadata.add(tikaMDName, v);
if (tikaMDName.equalsIgnoreCase(Nutch.ROBOTS_METATAG) && nutchMetadata.get(Nutch.ROBOTS_METATAG) == null) {
// NUTCH-2720 force lowercase robots directive
nutchMetadata.add(Nutch.ROBOTS_METATAG, v);
}
}
}
if (outlinks.length == 0) {
outlinks = OutlinkExtractor.getOutlinks(text, getConf());
}
ParseStatus status = new ParseStatus(ParseStatus.SUCCESS);
if (metaTags.getRefresh()) {
status.setMinorCode(ParseStatus.SUCCESS_REDIRECT);
status.setArgs(new String[] { metaTags.getRefreshHref().toString(), Integer.toString(metaTags.getRefreshTime()) });
}
ParseData parseData = new ParseData(status, title, outlinks, content.getMetadata(), nutchMetadata);
ParseResult parseResult = ParseResult.createParseResult(content.getUrl(), new ParseImpl(text, parseData));
// run filters on parse
ParseResult filteredParse = this.htmlParseFilters.filter(content, parseResult, metaTags, root);
if (metaTags.getNoCache()) {
// not okay to cache
for (Map.Entry<org.apache.hadoop.io.Text, Parse> entry : filteredParse) entry.getValue().getData().getParseMeta().set(Nutch.CACHING_FORBIDDEN_KEY, cachingPolicy);
}
return filteredParse;
}
Aggregations