use of org.apache.nutch.scoring.ScoringFilters in project nutch by apache.
the class ParserChecker method run.
public int run(String[] args) throws Exception {
boolean dumpText = false;
boolean force = false;
String contentType = null;
String url = null;
String usage = "Usage: ParserChecker [-dumpText] [-forceAs mimeType] [-md key=value] url";
if (args.length == 0) {
LOG.error(usage);
return (-1);
}
// used to simulate the metadata propagated from injection
HashMap<String, String> metadata = new HashMap<>();
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-forceAs")) {
force = true;
contentType = args[++i];
} else if (args[i].equals("-dumpText")) {
dumpText = true;
} else if (args[i].equals("-md")) {
String k = null, v = null;
String nextOne = args[++i];
int firstEquals = nextOne.indexOf("=");
if (firstEquals != -1) {
k = nextOne.substring(0, firstEquals);
v = nextOne.substring(firstEquals + 1);
} else
k = nextOne;
metadata.put(k, v);
} else if (i != args.length - 1) {
LOG.error(usage);
System.exit(-1);
} else {
url = URLUtil.toASCII(args[i]);
}
}
if (LOG.isInfoEnabled()) {
LOG.info("fetching: " + url);
}
CrawlDatum cd = new CrawlDatum();
Iterator<String> iter = metadata.keySet().iterator();
while (iter.hasNext()) {
String key = iter.next();
String value = metadata.get(key);
if (value == null)
value = "";
cd.getMetaData().put(new Text(key), new Text(value));
}
ProtocolFactory factory = new ProtocolFactory(conf);
Protocol protocol = factory.getProtocol(url);
Text turl = new Text(url);
ProtocolOutput output = protocol.getProtocolOutput(turl, cd);
// if the configuration permits, handle redirects until we either run
// out of allowed redirects or we stop getting redirect statuses.
int maxRedirects = conf.getInt("http.redirect.max", 0);
int numRedirects = 0;
while (output.getStatus().isRedirect() && numRedirects < maxRedirects) {
String newURL = URLUtil.toASCII(output.getStatus().getArgs()[0]);
LOG.info("Handling redirect to " + newURL);
protocol = factory.getProtocol(newURL);
turl = new Text(newURL);
output = protocol.getProtocolOutput(turl, cd);
numRedirects++;
}
if (!output.getStatus().isSuccess()) {
System.err.println("Fetch failed with protocol status: " + output.getStatus());
if (output.getStatus().isRedirect()) {
System.err.println("Redirect(s) not handled due to configuration.");
System.err.println("Max Redirects to handle per config: " + maxRedirects);
System.err.println("Number of Redirects handled: " + numRedirects);
}
return (-1);
}
Content content = output.getContent();
if (content == null) {
LOG.error("No content for " + url);
return (-1);
}
if (force) {
content.setContentType(contentType);
} else {
contentType = content.getContentType();
}
if (contentType == null) {
LOG.error("Failed to determine content type!");
return (-1);
}
if (ParseSegment.isTruncated(content)) {
LOG.warn("Content is truncated, parse may fail!");
}
ScoringFilters scfilters = new ScoringFilters(conf);
// call the scoring filters
try {
scfilters.passScoreBeforeParsing(turl, cd, content);
} catch (Exception e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Couldn't pass score before parsing, url " + turl + " (" + e + ")");
LOG.warn(StringUtils.stringifyException(e));
}
}
ParseResult parseResult = new ParseUtil(conf).parse(content);
if (parseResult == null) {
LOG.error("Parsing content failed!");
return (-1);
}
// calculate the signature
byte[] signature = SignatureFactory.getSignature(getConf()).calculate(content, parseResult.get(new Text(url)));
if (LOG.isInfoEnabled()) {
LOG.info("parsing: " + url);
LOG.info("contentType: " + contentType);
LOG.info("signature: " + StringUtil.toHexString(signature));
}
Parse parse = parseResult.get(turl);
if (parse == null) {
LOG.error("Failed to get parse from parse result");
LOG.error("Available parses in parse result (by URL key):");
for (Map.Entry<Text, Parse> entry : parseResult) {
LOG.error(" " + entry.getKey());
}
LOG.error("Parse result does not contain a parse for URL to be checked:");
LOG.error(" " + turl);
return -1;
}
// call the scoring filters
try {
scfilters.passScoreAfterParsing(turl, content, parse);
} catch (Exception e) {
if (LOG.isWarnEnabled()) {
LOG.warn("Couldn't pass score after parsing, url " + turl + " (" + e + ")");
LOG.warn(StringUtils.stringifyException(e));
}
}
for (Map.Entry<Text, Parse> entry : parseResult) {
parse = entry.getValue();
LOG.info("---------\nUrl\n---------------\n");
System.out.print(entry.getKey());
LOG.info("\n---------\nParseData\n---------\n");
System.out.print(parse.getData().toString());
if (dumpText) {
LOG.info("---------\nParseText\n---------\n");
System.out.print(parse.getText());
}
}
return 0;
}
use of org.apache.nutch.scoring.ScoringFilters in project nutch by apache.
the class TestCrawlDbStates method testCrawlDbStatTransitionInject.
/**
* Test states after inject: inject must not modify the status of CrawlDatums
* already in CrawlDb. Newly injected elements have status "db_unfetched".
* Inject is simulated by calling {@link Injector.InjectReducer#reduce()}.
*/
@Test
public void testCrawlDbStatTransitionInject() {
LOG.info("Test CrawlDatum states in Injector after inject");
Configuration conf = CrawlDBTestUtil.createContext().getConfiguration();
Injector.InjectReducer injector = new Injector.InjectReducer();
CrawlDbUpdateTestDriver<Injector.InjectReducer> injectDriver = new CrawlDbUpdateTestDriver<Injector.InjectReducer>(injector, conf);
ScoringFilters scfilters = new ScoringFilters(conf);
for (String sched : schedules) {
LOG.info("Testing inject with " + sched);
conf.set("db.fetch.schedule.class", "org.apache.nutch.crawl." + sched);
FetchSchedule schedule = FetchScheduleFactory.getFetchSchedule(conf);
List<CrawlDatum> values = new ArrayList<CrawlDatum>();
for (int i = 0; i < fetchDbStatusPairs.length; i++) {
byte fromDbStatus = fetchDbStatusPairs[i][1];
byte toDbStatus = fromDbStatus;
if (fromDbStatus == -1) {
toDbStatus = STATUS_DB_UNFETCHED;
} else {
CrawlDatum fromDb = new CrawlDatum();
fromDb.setStatus(fromDbStatus);
schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fromDb);
values.add(fromDb);
}
LOG.info("inject " + (fromDbStatus == -1 ? "<not in CrawlDb>" : CrawlDatum.getStatusName(fromDbStatus)) + " + " + getStatusName(STATUS_INJECTED) + " => " + getStatusName(toDbStatus));
CrawlDatum injected = new CrawlDatum(STATUS_INJECTED, conf.getInt("db.fetch.interval.default", 2592000), 0.1f);
schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, injected);
try {
scfilters.injectedScore(CrawlDbUpdateUtil.dummyURL, injected);
} catch (ScoringFilterException e) {
LOG.error(StringUtils.stringifyException(e));
}
values.add(injected);
List<CrawlDatum> res = injectDriver.update(values);
if (res.size() != 1) {
fail("Inject didn't result in one single CrawlDatum per URL");
continue;
}
byte status = res.get(0).getStatus();
if (status != toDbStatus) {
fail("Inject for " + (fromDbStatus == -1 ? "" : getStatusName(fromDbStatus) + " and ") + getStatusName(STATUS_INJECTED) + " results in " + getStatusName(status) + " (expected: " + getStatusName(toDbStatus) + ")");
}
values.clear();
}
}
}
use of org.apache.nutch.scoring.ScoringFilters in project nutch by apache.
the class IndexingFiltersChecker method process.
protected int process(String url, StringBuilder output) throws Exception {
if (normalizers != null) {
url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
}
LOG.info("fetching: " + url);
CrawlDatum datum = new CrawlDatum();
Iterator<String> iter = metadata.keySet().iterator();
while (iter.hasNext()) {
String key = iter.next();
String value = metadata.get(key);
if (value == null)
value = "";
datum.getMetaData().put(new Text(key), new Text(value));
}
IndexingFilters indexers = new IndexingFilters(getConf());
int maxRedirects = 3;
ProtocolOutput protocolOutput = getProtocolOutput(url, datum);
Text turl = new Text(url);
// Following redirects and not reached maxRedirects?
while (!protocolOutput.getStatus().isSuccess() && followRedirects && protocolOutput.getStatus().isRedirect() && maxRedirects != 0) {
String[] stuff = protocolOutput.getStatus().getArgs();
url = stuff[0];
if (normalizers != null) {
url = normalizers.normalize(url, URLNormalizers.SCOPE_DEFAULT);
}
turl.set(url);
// try again
protocolOutput = getProtocolOutput(url, datum);
maxRedirects--;
}
if (!protocolOutput.getStatus().isSuccess()) {
output.append("Fetch failed with protocol status: " + protocolOutput.getStatus() + "\n");
return 0;
}
Content content = protocolOutput.getContent();
if (content == null) {
output.append("No content for " + url + "\n");
return 0;
}
String contentType = content.getContentType();
if (contentType == null) {
return -1;
}
// store the guessed content type in the crawldatum
datum.getMetaData().put(new Text(Metadata.CONTENT_TYPE), new Text(contentType));
if (ParseSegment.isTruncated(content)) {
LOG.warn("Content is truncated, parse may fail!");
}
ScoringFilters scfilters = new ScoringFilters(getConf());
// call the scoring filters
try {
scfilters.passScoreBeforeParsing(turl, datum, content);
} catch (Exception e) {
LOG.warn("Couldn't pass score, url {} ({})", url, e);
}
LOG.info("parsing: {}", url);
LOG.info("contentType: {}", contentType);
ParseResult parseResult = new ParseUtil(getConf()).parse(content);
NutchDocument doc = new NutchDocument();
doc.add("id", url);
Text urlText = new Text(url);
Inlinks inlinks = null;
Parse parse = parseResult.get(urlText);
if (parse == null) {
LOG.error("Failed to get parse from parse result");
LOG.error("Available parses in parse result (by URL key):");
for (Map.Entry<Text, Parse> entry : parseResult) {
LOG.error(" " + entry.getKey());
}
LOG.error("Parse result does not contain a parse for URL to be checked:");
LOG.error(" " + urlText);
return -1;
}
byte[] signature = SignatureFactory.getSignature(getConf()).calculate(content, parse);
parse.getData().getContentMeta().set(Nutch.SIGNATURE_KEY, StringUtil.toHexString(signature));
String digest = parse.getData().getContentMeta().get(Nutch.SIGNATURE_KEY);
doc.add("digest", digest);
datum.setSignature(signature);
// call the scoring filters
try {
scfilters.passScoreAfterParsing(turl, content, parseResult.get(turl));
} catch (Exception e) {
LOG.warn("Couldn't pass score, url {} ({})", turl, e);
}
try {
doc = indexers.filter(doc, parse, urlText, datum, inlinks);
} catch (IndexingException e) {
e.printStackTrace();
}
if (doc == null) {
output.append("Document discarded by indexing filter\n");
return 0;
}
for (String fname : doc.getFieldNames()) {
List<Object> values = doc.getField(fname).getValues();
if (values != null) {
for (Object value : values) {
String str = value.toString();
int minText = dumpText ? str.length() : Math.min(100, str.length());
output.append(fname + " :\t" + str.substring(0, minText) + "\n");
}
}
}
// For readability if keepClientCnxOpen
output.append("\n");
if (getConf().getBoolean("doIndex", false) && doc != null) {
IndexWriters writers = new IndexWriters(getConf());
writers.open(getConf(), "IndexingFilterChecker");
writers.write(doc);
writers.close();
}
return 0;
}
use of org.apache.nutch.scoring.ScoringFilters in project nutch by apache.
the class ParseOutputFormat method getRecordWriter.
public RecordWriter<Text, Parse> getRecordWriter(TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
String name = getUniqueFile(context, "part");
Path dir = FileOutputFormat.getOutputPath(context);
FileSystem fs = dir.getFileSystem(context.getConfiguration());
if (conf.getBoolean("parse.filter.urls", true)) {
filters = new URLFilters(conf);
exemptionFilters = new URLExemptionFilters(conf);
}
if (conf.getBoolean("parse.normalize.urls", true)) {
normalizers = new URLNormalizers(conf, URLNormalizers.SCOPE_OUTLINK);
}
this.scfilters = new ScoringFilters(conf);
final int interval = conf.getInt("db.fetch.interval.default", 2592000);
final boolean ignoreInternalLinks = conf.getBoolean("db.ignore.internal.links", false);
final boolean ignoreExternalLinks = conf.getBoolean("db.ignore.external.links", false);
final String ignoreExternalLinksMode = conf.get("db.ignore.external.links.mode", "byHost");
// NUTCH-2435 - parameter "parser.store.text" allowing to choose whether to
// store 'parse_text' directory or not:
final boolean storeText = conf.getBoolean("parser.store.text", true);
int maxOutlinksPerPage = conf.getInt("db.max.outlinks.per.page", 100);
final boolean isParsing = conf.getBoolean("fetcher.parse", true);
final int maxOutlinks = (maxOutlinksPerPage < 0) ? Integer.MAX_VALUE : maxOutlinksPerPage;
final CompressionType compType = SequenceFileOutputFormat.getOutputCompressionType(context);
Path out = FileOutputFormat.getOutputPath(context);
Path text = new Path(new Path(out, ParseText.DIR_NAME), name);
Path data = new Path(new Path(out, ParseData.DIR_NAME), name);
Path crawl = new Path(new Path(out, CrawlDatum.PARSE_DIR_NAME), name);
final String[] parseMDtoCrawlDB = conf.get("db.parsemeta.to.crawldb", "").split(" *, *");
// textOut Options
final MapFile.Writer textOut;
if (storeText) {
Option tKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
org.apache.hadoop.io.SequenceFile.Writer.Option tValClassOpt = SequenceFile.Writer.valueClass(ParseText.class);
org.apache.hadoop.io.SequenceFile.Writer.Option tProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
org.apache.hadoop.io.SequenceFile.Writer.Option tCompOpt = SequenceFile.Writer.compression(CompressionType.RECORD);
textOut = new MapFile.Writer(conf, text, tKeyClassOpt, tValClassOpt, tCompOpt, tProgressOpt);
} else {
textOut = null;
}
// dataOut Options
Option dKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
org.apache.hadoop.io.SequenceFile.Writer.Option dValClassOpt = SequenceFile.Writer.valueClass(ParseData.class);
org.apache.hadoop.io.SequenceFile.Writer.Option dProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
org.apache.hadoop.io.SequenceFile.Writer.Option dCompOpt = SequenceFile.Writer.compression(compType);
final MapFile.Writer dataOut = new MapFile.Writer(conf, data, dKeyClassOpt, dValClassOpt, dCompOpt, dProgressOpt);
final SequenceFile.Writer crawlOut = SequenceFile.createWriter(conf, SequenceFile.Writer.file(crawl), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(CrawlDatum.class), SequenceFile.Writer.bufferSize(fs.getConf().getInt("io.file.buffer.size", 4096)), SequenceFile.Writer.replication(fs.getDefaultReplication(crawl)), SequenceFile.Writer.blockSize(1073741824), SequenceFile.Writer.compression(compType, new DefaultCodec()), SequenceFile.Writer.progressable((Progressable) context), SequenceFile.Writer.metadata(new Metadata()));
return new RecordWriter<Text, Parse>() {
public void write(Text key, Parse parse) throws IOException {
String fromUrl = key.toString();
// host or domain name of the source URL
String origin = null;
if (textOut != null) {
textOut.append(key, new ParseText(parse.getText()));
}
ParseData parseData = parse.getData();
// recover the signature prepared by Fetcher or ParseSegment
String sig = parseData.getContentMeta().get(Nutch.SIGNATURE_KEY);
if (sig != null) {
byte[] signature = StringUtil.fromHexString(sig);
if (signature != null) {
// append a CrawlDatum with a signature
CrawlDatum d = new CrawlDatum(CrawlDatum.STATUS_SIGNATURE, 0);
d.setSignature(signature);
crawlOut.append(key, d);
}
}
// see if the parse metadata contain things that we'd like
// to pass to the metadata of the crawlDB entry
CrawlDatum parseMDCrawlDatum = null;
for (String mdname : parseMDtoCrawlDB) {
String mdvalue = parse.getData().getParseMeta().get(mdname);
if (mdvalue != null) {
if (parseMDCrawlDatum == null)
parseMDCrawlDatum = new CrawlDatum(CrawlDatum.STATUS_PARSE_META, 0);
parseMDCrawlDatum.getMetaData().put(new Text(mdname), new Text(mdvalue));
}
}
if (parseMDCrawlDatum != null)
crawlOut.append(key, parseMDCrawlDatum);
// need to determine origin (once for all outlinks)
if (ignoreExternalLinks || ignoreInternalLinks) {
URL originURL = new URL(fromUrl.toString());
// based on domain?
if ("bydomain".equalsIgnoreCase(ignoreExternalLinksMode)) {
origin = URLUtil.getDomainName(originURL).toLowerCase();
} else // use host
{
origin = originURL.getHost().toLowerCase();
}
}
ParseStatus pstatus = parseData.getStatus();
if (pstatus != null && pstatus.isSuccess() && pstatus.getMinorCode() == ParseStatus.SUCCESS_REDIRECT) {
String newUrl = pstatus.getMessage();
int refreshTime = Integer.valueOf(pstatus.getArgs()[1]);
newUrl = filterNormalize(fromUrl, newUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers, URLNormalizers.SCOPE_FETCHER);
if (newUrl != null) {
String reprUrl = URLUtil.chooseRepr(fromUrl, newUrl, refreshTime < Fetcher.PERM_REFRESH_TIME);
CrawlDatum newDatum = new CrawlDatum();
newDatum.setStatus(CrawlDatum.STATUS_LINKED);
if (reprUrl != null && !reprUrl.equals(newUrl)) {
newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY, new Text(reprUrl));
}
crawlOut.append(new Text(newUrl), newDatum);
}
}
// collect outlinks for subsequent db update
Outlink[] links = parseData.getOutlinks();
int outlinksToStore = Math.min(maxOutlinks, links.length);
int validCount = 0;
CrawlDatum adjust = null;
List<Entry<Text, CrawlDatum>> targets = new ArrayList<>(outlinksToStore);
List<Outlink> outlinkList = new ArrayList<>(outlinksToStore);
for (int i = 0; i < links.length && validCount < outlinksToStore; i++) {
String toUrl = links[i].getToUrl();
// only normalize and filter if fetcher.parse = false
if (!isParsing) {
toUrl = ParseOutputFormat.filterNormalize(fromUrl, toUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers);
if (toUrl == null) {
continue;
}
}
CrawlDatum target = new CrawlDatum(CrawlDatum.STATUS_LINKED, interval);
Text targetUrl = new Text(toUrl);
// see if the outlink has any metadata attached
// and if so pass that to the crawldatum so that
// the initial score or distribution can use that
MapWritable outlinkMD = links[i].getMetadata();
if (outlinkMD != null) {
target.getMetaData().putAll(outlinkMD);
}
try {
scfilters.initialScore(targetUrl, target);
} catch (ScoringFilterException e) {
LOG.warn("Cannot filter init score for url " + key + ", using default: " + e.getMessage());
target.setScore(0.0f);
}
targets.add(new SimpleEntry(targetUrl, target));
// overwrite URL in Outlink object with normalized URL (NUTCH-1174)
links[i].setUrl(toUrl);
outlinkList.add(links[i]);
validCount++;
}
try {
// compute score contributions and adjustment to the original score
adjust = scfilters.distributeScoreToOutlinks(key, parseData, targets, null, links.length);
} catch (ScoringFilterException e) {
LOG.warn("Cannot distribute score from " + key + ": " + e.getMessage());
}
for (Entry<Text, CrawlDatum> target : targets) {
crawlOut.append(target.getKey(), target.getValue());
}
if (adjust != null)
crawlOut.append(key, adjust);
Outlink[] filteredLinks = outlinkList.toArray(new Outlink[outlinkList.size()]);
parseData = new ParseData(parseData.getStatus(), parseData.getTitle(), filteredLinks, parseData.getContentMeta(), parseData.getParseMeta());
dataOut.append(key, parseData);
if (!parse.isCanonical()) {
CrawlDatum datum = new CrawlDatum();
datum.setStatus(CrawlDatum.STATUS_FETCH_SUCCESS);
String timeString = parse.getData().getContentMeta().get(Nutch.FETCH_TIME_KEY);
try {
datum.setFetchTime(Long.parseLong(timeString));
} catch (Exception e) {
LOG.warn("Can't read fetch time for: " + key);
datum.setFetchTime(System.currentTimeMillis());
}
crawlOut.append(key, datum);
}
}
public void close(TaskAttemptContext context) throws IOException {
if (textOut != null)
textOut.close();
dataOut.close();
crawlOut.close();
}
};
}
use of org.apache.nutch.scoring.ScoringFilters in project nutch by apache.
the class InlinkPriorityQueue method setup.
public void setup(Reducer<Text, CrawlDatum, Text, CrawlDatum>.Context context) {
Configuration conf = context.getConfiguration();
retryMax = conf.getInt("db.fetch.retry.max", 3);
scfilters = new ScoringFilters(conf);
additionsAllowed = conf.getBoolean(CrawlDb.CRAWLDB_ADDITIONS_ALLOWED, true);
maxInterval = conf.getInt("db.fetch.interval.max", 0);
schedule = FetchScheduleFactory.getFetchSchedule(conf);
int maxLinks = conf.getInt("db.update.max.inlinks", 10000);
linked = new InlinkPriorityQueue(maxLinks);
}
Aggregations