use of org.apache.nutch.parse.ParseStatus in project nutch by apache.
the class TestLinksIndexingFilter method testNoFilterInlinks.
@Test
public void testNoFilterInlinks() throws Exception {
conf.set(LinksIndexingFilter.LINKS_INLINKS_HOST, "false");
filter.setConf(conf);
Inlinks inlinks = new Inlinks();
inlinks.add(new Inlink("http://www.test.com", "test"));
inlinks.add(new Inlink("http://www.example.com", "example"));
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", new Outlink[0], metadata)), new Text("http://www.example.com/"), new CrawlDatum(), inlinks);
Assert.assertEquals("All inlinks must be indexed even those from the same host", inlinks.size(), doc.getField("inlinks").getValues().size());
}
use of org.apache.nutch.parse.ParseStatus in project nutch by apache.
the class TestLinksIndexingFilter method testNoFilterOutlinks.
@Test
public void testNoFilterOutlinks() throws Exception {
filter.setConf(conf);
Outlink[] outlinks = generateOutlinks();
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", outlinks, metadata)), new Text("http://www.example.com/"), new CrawlDatum(), new Inlinks());
Assert.assertEquals("All outlinks must be indexed even those from the same host", outlinks.length, doc.getField("outlinks").getValues().size());
}
use of org.apache.nutch.parse.ParseStatus in project nutch by apache.
the class TestLinksIndexingFilter method testIndexOnlyHostPart.
@Test
public void testIndexOnlyHostPart() throws Exception {
conf.set(LinksIndexingFilter.LINKS_INLINKS_HOST, "true");
conf.set(LinksIndexingFilter.LINKS_OUTLINKS_HOST, "true");
conf.set(LinksIndexingFilter.LINKS_ONLY_HOSTS, "true");
filter.setConf(conf);
Outlink[] outlinks = generateOutlinks(true);
Inlinks inlinks = new Inlinks();
inlinks.add(new Inlink("http://www.test.com/one-awesome-page", "test"));
inlinks.add(new Inlink("http://www.test.com/other-awesome-page", "test"));
inlinks.add(new Inlink("http://www.example.com/my-first-awesome-example", "example"));
NutchDocument doc = filter.filter(new NutchDocument(), new ParseImpl("text", new ParseData(new ParseStatus(), "title", outlinks, metadata)), new Text("http://www.example.com/"), new CrawlDatum(), inlinks);
NutchField docOutlinks = doc.getField("outlinks");
Assert.assertEquals("Only the host portion of the outlink URL must be indexed", new URL("http://www.test.com").getHost(), docOutlinks.getValues().get(0));
Assert.assertEquals("The inlinks coming from the same host must count only once", 1, doc.getField("inlinks").getValues().size());
Assert.assertEquals("Only the host portion of the inlinks URL must be indexed", new URL("http://www.test.com").getHost(), doc.getFieldValue("inlinks"));
}
use of org.apache.nutch.parse.ParseStatus in project nutch by apache.
the class MimeTypeIndexingFilter method main.
/**
* Main method for invoking this tool
*
* @throws IOException
* @throws IndexingException
*/
public static void main(String[] args) throws IOException, IndexingException {
Option helpOpt = new Option("h", "help", false, "show this help message");
Option rulesOpt = OptionBuilder.withArgName("file").hasArg().withDescription("Rules file to be used in the tests relative to the conf directory").isRequired().create("rules");
Options options = new Options();
options.addOption(helpOpt).addOption(rulesOpt);
CommandLineParser parser = new GnuParser();
HelpFormatter formatter = new HelpFormatter();
String rulesFile;
try {
CommandLine line = parser.parse(options, args);
if (line.hasOption("help") || !line.hasOption("rules")) {
formatter.printHelp("org.apache.nutch.indexer.filter.MimeTypeIndexingFilter", options, true);
return;
}
rulesFile = line.getOptionValue("rules");
} catch (UnrecognizedOptionException e) {
formatter.printHelp("org.apache.nutch.indexer.filter.MimeTypeIndexingFilter", options, true);
return;
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
e.printStackTrace();
return;
}
MimeTypeIndexingFilter filter = new MimeTypeIndexingFilter();
Configuration conf = NutchConfiguration.create();
conf.set(MimeTypeIndexingFilter.MIMEFILTER_REGEX_FILE, rulesFile);
filter.setConf(conf);
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
String line;
while ((line = in.readLine()) != null && !line.isEmpty()) {
Metadata metadata = new Metadata();
metadata.set(Response.CONTENT_TYPE, line);
ParseImpl parse = new ParseImpl("text", new ParseData(new ParseStatus(), "title", new Outlink[0], metadata));
NutchDocument doc = filter.filter(new NutchDocument(), parse, new Text("http://www.example.com/"), new CrawlDatum(), new Inlinks());
if (doc != null) {
System.out.print("+ ");
System.out.println(line);
} else {
System.out.print("- ");
System.out.println(line);
}
}
}
use of org.apache.nutch.parse.ParseStatus in project nutch by apache.
the class SmallStack method getParse.
@Override
public ParseResult getParse(Content content) {
String text = null;
Vector<Outlink> outlinks = new Vector<>();
try {
byte[] raw = content.getContent();
String contentLength = content.getMetadata().get(Response.CONTENT_LENGTH);
if (contentLength != null && raw.length != Integer.parseInt(contentLength)) {
return new ParseStatus(ParseStatus.FAILED, ParseStatus.FAILED_TRUNCATED, "Content truncated at " + raw.length + " bytes. Parser can't handle incomplete files.").getEmptyParseResult(content.getUrl(), getConf());
}
ExtractText extractor = new ExtractText();
// TagParser implements SWFTags and drives a SWFTagTypes interface
TagParser parser = new TagParser(extractor);
// use this instead to debug the file
// TagParser parser = new TagParser( new SWFTagDumper(true, true) );
// SWFReader reads an input file and drives a SWFTags interface
SWFReader reader = new SWFReader(parser, new InStream(raw));
// read the input SWF file and pass it through the interface pipeline
reader.readFile();
text = extractor.getText();
String atext = extractor.getActionText();
if (atext != null && atext.length() > 0)
text += "\n--------\n" + atext;
// harvest potential outlinks
String[] links = extractor.getUrls();
for (int i = 0; i < links.length; i++) {
Outlink out = new Outlink(links[i], "");
outlinks.add(out);
}
Outlink[] olinks = OutlinkExtractor.getOutlinks(text, conf);
if (olinks != null)
for (int i = 0; i < olinks.length; i++) {
outlinks.add(olinks[i]);
}
} catch (Exception e) {
// run time exception
LOG.error("Error, runtime exception: ", e);
return new ParseStatus(ParseStatus.FAILED, "Can't be handled as SWF document. " + e).getEmptyParseResult(content.getUrl(), getConf());
}
if (text == null)
text = "";
Outlink[] links = (Outlink[]) outlinks.toArray(new Outlink[outlinks.size()]);
ParseData parseData = new ParseData(ParseStatus.STATUS_SUCCESS, "", links, content.getMetadata());
return ParseResult.createParseResult(content.getUrl(), new ParseImpl(text, parseData));
}
Aggregations