use of org.apache.nutch.crawl.CrawlDatum in project nutch by apache.
the class TestLinksIndexingFilter method testFilterOutlinks.
@Test
public void testFilterOutlinks() throws Exception {
conf.set(LinksIndexingFilter.LINKS_OUTLINKS_HOST, "true");
filter.setConf(conf);
Outlink[] outlinks = generateOutlinks();
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", outlinks, metadata)), new Text("http://www.example.com/"), new CrawlDatum(), new Inlinks());
Assert.assertEquals(1, doc.getField("outlinks").getValues().size());
Assert.assertEquals("Filter outlinks, allow only those from a different host", outlinks[0].getToUrl(), doc.getFieldValue("outlinks"));
}
use of org.apache.nutch.crawl.CrawlDatum in project nutch by apache.
the class TestLinksIndexingFilter method testNoFilterInlinks.
@Test
public void testNoFilterInlinks() throws Exception {
conf.set(LinksIndexingFilter.LINKS_INLINKS_HOST, "false");
filter.setConf(conf);
Inlinks inlinks = new Inlinks();
inlinks.add(new Inlink("http://www.test.com", "test"));
inlinks.add(new Inlink("http://www.example.com", "example"));
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", new Outlink[0], metadata)), new Text("http://www.example.com/"), new CrawlDatum(), inlinks);
Assert.assertEquals("All inlinks must be indexed even those from the same host", inlinks.size(), doc.getField("inlinks").getValues().size());
}
use of org.apache.nutch.crawl.CrawlDatum in project nutch by apache.
the class TestLinksIndexingFilter method testNoFilterOutlinks.
@Test
public void testNoFilterOutlinks() throws Exception {
filter.setConf(conf);
Outlink[] outlinks = generateOutlinks();
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", outlinks, metadata)), new Text("http://www.example.com/"), new CrawlDatum(), new Inlinks());
Assert.assertEquals("All outlinks must be indexed even those from the same host", outlinks.length, doc.getField("outlinks").getValues().size());
}
use of org.apache.nutch.crawl.CrawlDatum in project nutch by apache.
the class TestLinksIndexingFilter method testIndexOnlyHostPart.
@Test
public void testIndexOnlyHostPart() throws Exception {
conf.set(LinksIndexingFilter.LINKS_INLINKS_HOST, "true");
conf.set(LinksIndexingFilter.LINKS_OUTLINKS_HOST, "true");
conf.set(LinksIndexingFilter.LINKS_ONLY_HOSTS, "true");
filter.setConf(conf);
Outlink[] outlinks = generateOutlinks(true);
Inlinks inlinks = new Inlinks();
inlinks.add(new Inlink("http://www.test.com/one-awesome-page", "test"));
inlinks.add(new Inlink("http://www.test.com/other-awesome-page", "test"));
inlinks.add(new Inlink("http://www.example.com/my-first-awesome-example", "example"));
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", outlinks, metadata)), new Text("http://www.example.com/"), new CrawlDatum(), inlinks);
NutchField docOutlinks = doc.getField("outlinks");
Assert.assertEquals("Only the host portion of the outlink URL must be indexed", new URL("http://www.test.com").getHost(), docOutlinks.getValues().get(0));
Assert.assertEquals("The inlinks coming from the same host must count only once", 1, doc.getField("inlinks").getValues().size());
Assert.assertEquals("Only the host portion of the inlinks URL must be indexed", new URL("http://www.test.com").getHost(), doc.getFieldValue("inlinks"));
}
use of org.apache.nutch.crawl.CrawlDatum in project nutch by apache.
the class TestIndexReplace method parseAndFilterFile.
/**
* Run a test file through the Nutch parser and index filters.
*
* @param fileName
* @param conf
* @return the Nutch document with the replace indexer applied
*/
public NutchDocument parseAndFilterFile(String fileName, Configuration conf) {
NutchDocument doc = new NutchDocument();
BasicIndexingFilter basicIndexer = new BasicIndexingFilter();
basicIndexer.setConf(conf);
Assert.assertNotNull(basicIndexer);
MetadataIndexer metaIndexer = new MetadataIndexer();
metaIndexer.setConf(conf);
Assert.assertNotNull(basicIndexer);
ReplaceIndexer replaceIndexer = new ReplaceIndexer();
replaceIndexer.setConf(conf);
Assert.assertNotNull(replaceIndexer);
try {
String urlString = "file:" + sampleDir + fileSeparator + fileName;
Text text = new Text(urlString);
CrawlDatum crawlDatum = new CrawlDatum();
Protocol protocol = new ProtocolFactory(conf).getProtocol(urlString);
Content content = protocol.getProtocolOutput(text, crawlDatum).getContent();
Parse parse = new ParseUtil(conf).parse(content).get(content.getUrl());
crawlDatum.setFetchTime(100L);
Inlinks inlinks = new Inlinks();
doc = basicIndexer.filter(doc, parse, text, crawlDatum, inlinks);
doc = metaIndexer.filter(doc, parse, text, crawlDatum, inlinks);
doc = replaceIndexer.filter(doc, parse, text, crawlDatum, inlinks);
} catch (Exception e) {
e.printStackTrace();
Assert.fail(e.toString());
}
return doc;
}
Aggregations