Search in sources :

Example 1 with Option

use of org.apache.hadoop.io.MapFile.Writer.Option in project nutch by apache.

the class CrawlDBTestUtil method createCrawlDb.

/**
 * Creates synthetic crawldb
 *
 * @param fs
 *          filesystem where db will be created
 * @param crawldb
 *          path were db will be created
 * @param init
 *          urls to be inserted, objects are of type URLCrawlDatum
 * @throws Exception
 */
public static void createCrawlDb(Configuration conf, FileSystem fs, Path crawldb, List<URLCrawlDatum> init) throws Exception {
    LOG.trace("* creating crawldb: " + crawldb);
    Path dir = new Path(crawldb, CrawlDb.CURRENT_NAME);
    Option wKeyOpt = MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option wValueOpt = SequenceFile.Writer.valueClass(CrawlDatum.class);
    MapFile.Writer writer = new MapFile.Writer(conf, new Path(dir, "part-r-00000"), wKeyOpt, wValueOpt);
    Iterator<URLCrawlDatum> it = init.iterator();
    while (it.hasNext()) {
        URLCrawlDatum row = it.next();
        LOG.info("adding:" + row.url.toString());
        writer.append(new Text(row.url), row.datum);
    }
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MapFile(org.apache.hadoop.io.MapFile) Option(org.apache.hadoop.io.MapFile.Writer.Option) Text(org.apache.hadoop.io.Text)

Example 2 with Option

use of org.apache.hadoop.io.MapFile.Writer.Option in project nutch by apache.

the class TestCrawlDbMerger method createCrawlDb.

private void createCrawlDb(Configuration config, FileSystem fs, Path crawldb, TreeSet<String> init, CrawlDatum cd) throws Exception {
    LOG.debug("* creating crawldb: " + crawldb);
    Path dir = new Path(crawldb, CrawlDb.CURRENT_NAME);
    Option wKeyOpt = MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option wValueOpt = SequenceFile.Writer.valueClass(CrawlDatum.class);
    MapFile.Writer writer = new MapFile.Writer(config, new Path(dir, "part-r-00000"), wKeyOpt, wValueOpt);
    Iterator<String> it = init.iterator();
    while (it.hasNext()) {
        String key = it.next();
        writer.append(new Text(key), cd);
    }
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) MapFile(org.apache.hadoop.io.MapFile) Option(org.apache.hadoop.io.MapFile.Writer.Option) Text(org.apache.hadoop.io.Text)

Example 3 with Option

use of org.apache.hadoop.io.MapFile.Writer.Option in project nutch by apache.

the class TestSegmentMerger method setUp.

@Before
public void setUp() throws Exception {
    conf = NutchConfiguration.create();
    fs = FileSystem.get(conf);
    testDir = new Path(conf.get("hadoop.tmp.dir"), "merge-" + System.currentTimeMillis());
    seg1 = new Path(testDir, "seg1");
    seg2 = new Path(testDir, "seg2");
    out = new Path(testDir, "out");
    // create large parse-text segments
    System.err.println("Creating large segment 1...");
    DecimalFormat df = new DecimalFormat("0000000");
    Text k = new Text();
    Path ptPath = new Path(new Path(seg1, ParseText.DIR_NAME), "part-00000");
    Option kOpt = MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option vOpt = SequenceFile.Writer.valueClass(ParseText.class);
    MapFile.Writer w = new MapFile.Writer(conf, ptPath, kOpt, vOpt);
    long curSize = 0;
    countSeg1 = 0;
    FileStatus fileStatus = fs.getFileStatus(ptPath);
    long blkSize = fileStatus.getBlockSize();
    while (curSize < blkSize * 2) {
        k.set("seg1-" + df.format(countSeg1));
        w.append(k, new ParseText("seg1 text " + countSeg1));
        countSeg1++;
        // roughly ...
        curSize += 40;
    }
    w.close();
    System.err.println(" - done: " + countSeg1 + " records.");
    System.err.println("Creating large segment 2...");
    ptPath = new Path(new Path(seg2, ParseText.DIR_NAME), "part-00000");
    Option wKeyOpt = MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option wValueOpt = SequenceFile.Writer.valueClass(ParseText.class);
    w = new MapFile.Writer(conf, ptPath, wKeyOpt, wValueOpt);
    curSize = 0;
    countSeg2 = 0;
    while (curSize < blkSize * 2) {
        k.set("seg2-" + df.format(countSeg2));
        w.append(k, new ParseText("seg2 text " + countSeg2));
        countSeg2++;
        // roughly ...
        curSize += 40;
    }
    w.close();
    System.err.println(" - done: " + countSeg2 + " records.");
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) DecimalFormat(java.text.DecimalFormat) MapFile(org.apache.hadoop.io.MapFile) Text(org.apache.hadoop.io.Text) ParseText(org.apache.nutch.parse.ParseText) ParseText(org.apache.nutch.parse.ParseText) Option(org.apache.hadoop.io.MapFile.Writer.Option) Before(org.junit.Before)

Example 4 with Option

use of org.apache.hadoop.io.MapFile.Writer.Option in project nutch by apache.

the class TestSegmentMergerCrawlDatums method createSegment.

protected void createSegment(Path segment, byte status, boolean fetch, boolean redirect) throws Exception {
    LOG.info("\nSegment: " + segment.toString());
    // The URL of our main record
    String url = "http://nutch.apache.org/";
    // The URL of our redirecting URL
    String redirectUrl = "http://nutch.apache.org/i_redirect_to_the_root/";
    // Our value
    CrawlDatum value = new CrawlDatum();
    // Path of the segment's crawl_fetch directory
    Path crawlFetchPath = new Path(new Path(segment, CrawlDatum.FETCH_DIR_NAME), "part-00000");
    // Get a writer for map files containing <Text,CrawlDatum> pairs
    Option wKeyOpt = MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option wValueOpt = SequenceFile.Writer.valueClass(CrawlDatum.class);
    MapFile.Writer writer = new MapFile.Writer(conf, crawlFetchPath, wKeyOpt, wValueOpt);
    // linked datum when merging
    if (redirect) {
        // We're writing our our main record URL with status linked
        LOG.info(url + " > " + CrawlDatum.getStatusName(CrawlDatum.STATUS_LINKED));
        value = new CrawlDatum();
        value.setStatus(CrawlDatum.STATUS_LINKED);
        writer.append(new Text(url), value);
    }
    // Whether we're fetching now
    if (fetch) {
        LOG.info(url + " > " + CrawlDatum.getStatusName(status));
        // Set the status
        value.setStatus(status);
        // Write the pair and ok
        writer.append(new Text(url), value);
    }
    // Whether we're handing a redirect now
    if (redirect) {
        // And the redirect URL with redirect status, pointing to our main URL
        LOG.info(redirectUrl + " > " + CrawlDatum.getStatusName(CrawlDatum.STATUS_FETCH_REDIR_TEMP));
        value.setStatus(CrawlDatum.STATUS_FETCH_REDIR_TEMP);
        writer.append(new Text(redirectUrl), value);
    }
    // Close the stuff
    writer.close();
}
Also used : Path(org.apache.hadoop.fs.Path) CrawlDatum(org.apache.nutch.crawl.CrawlDatum) MapFile(org.apache.hadoop.io.MapFile) Option(org.apache.hadoop.io.MapFile.Writer.Option) Text(org.apache.hadoop.io.Text)

Example 5 with Option

use of org.apache.hadoop.io.MapFile.Writer.Option in project nutch by apache.

the class ParseOutputFormat method getRecordWriter.

@Override
public RecordWriter<Text, Parse> getRecordWriter(TaskAttemptContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    String name = getUniqueFile(context, "part");
    Path dir = FileOutputFormat.getOutputPath(context);
    FileSystem fs = dir.getFileSystem(context.getConfiguration());
    if (conf.getBoolean("parse.filter.urls", true)) {
        filters = new URLFilters(conf);
        exemptionFilters = new URLExemptionFilters(conf);
    }
    if (conf.getBoolean("parse.normalize.urls", true)) {
        normalizers = new URLNormalizers(conf, URLNormalizers.SCOPE_OUTLINK);
    }
    this.scfilters = new ScoringFilters(conf);
    final int interval = conf.getInt("db.fetch.interval.default", 2592000);
    final boolean ignoreInternalLinks = conf.getBoolean("db.ignore.internal.links", false);
    final boolean ignoreExternalLinks = conf.getBoolean("db.ignore.external.links", false);
    final String ignoreExternalLinksMode = conf.get("db.ignore.external.links.mode", "byHost");
    // NUTCH-2435 - parameter "parser.store.text" allowing to choose whether to
    // store 'parse_text' directory or not:
    final boolean storeText = conf.getBoolean("parser.store.text", true);
    int maxOutlinksPerPage = conf.getInt("db.max.outlinks.per.page", 100);
    final int maxOutlinks = (maxOutlinksPerPage < 0) ? Integer.MAX_VALUE : maxOutlinksPerPage;
    int maxOutlinkL = conf.getInt("db.max.outlink.length", 4096);
    final int maxOutlinkLength = (maxOutlinkL < 0) ? Integer.MAX_VALUE : maxOutlinkL;
    final boolean isParsing = conf.getBoolean("fetcher.parse", true);
    final CompressionType compType = SequenceFileOutputFormat.getOutputCompressionType(context);
    Path out = FileOutputFormat.getOutputPath(context);
    Path text = new Path(new Path(out, ParseText.DIR_NAME), name);
    Path data = new Path(new Path(out, ParseData.DIR_NAME), name);
    Path crawl = new Path(new Path(out, CrawlDatum.PARSE_DIR_NAME), name);
    final String[] parseMDtoCrawlDB = conf.get("db.parsemeta.to.crawldb", "").split(" *, *");
    // textOut Options
    final MapFile.Writer textOut;
    if (storeText) {
        Option tKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
        org.apache.hadoop.io.SequenceFile.Writer.Option tValClassOpt = SequenceFile.Writer.valueClass(ParseText.class);
        org.apache.hadoop.io.SequenceFile.Writer.Option tProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
        org.apache.hadoop.io.SequenceFile.Writer.Option tCompOpt = SequenceFile.Writer.compression(CompressionType.RECORD);
        textOut = new MapFile.Writer(conf, text, tKeyClassOpt, tValClassOpt, tCompOpt, tProgressOpt);
    } else {
        textOut = null;
    }
    // dataOut Options
    Option dKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option dValClassOpt = SequenceFile.Writer.valueClass(ParseData.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option dProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
    org.apache.hadoop.io.SequenceFile.Writer.Option dCompOpt = SequenceFile.Writer.compression(compType);
    final MapFile.Writer dataOut = new MapFile.Writer(conf, data, dKeyClassOpt, dValClassOpt, dCompOpt, dProgressOpt);
    final SequenceFile.Writer crawlOut = SequenceFile.createWriter(conf, SequenceFile.Writer.file(crawl), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(CrawlDatum.class), SequenceFile.Writer.bufferSize(fs.getConf().getInt("io.file.buffer.size", 4096)), SequenceFile.Writer.replication(fs.getDefaultReplication(crawl)), SequenceFile.Writer.blockSize(1073741824), SequenceFile.Writer.compression(compType, new DefaultCodec()), SequenceFile.Writer.progressable((Progressable) context), SequenceFile.Writer.metadata(new Metadata()));
    return new RecordWriter<Text, Parse>() {

        @Override
        public void write(Text key, Parse parse) throws IOException {
            String fromUrl = key.toString();
            // host or domain name of the source URL
            String origin = null;
            if (textOut != null) {
                textOut.append(key, new ParseText(parse.getText()));
            }
            ParseData parseData = parse.getData();
            // recover the signature prepared by Fetcher or ParseSegment
            String sig = parseData.getContentMeta().get(Nutch.SIGNATURE_KEY);
            if (sig != null) {
                byte[] signature = StringUtil.fromHexString(sig);
                if (signature != null) {
                    // append a CrawlDatum with a signature
                    CrawlDatum d = new CrawlDatum(CrawlDatum.STATUS_SIGNATURE, 0);
                    d.setSignature(signature);
                    crawlOut.append(key, d);
                }
            }
            // see if the parse metadata contain things that we'd like
            // to pass to the metadata of the crawlDB entry
            CrawlDatum parseMDCrawlDatum = null;
            for (String mdname : parseMDtoCrawlDB) {
                String mdvalue = parse.getData().getParseMeta().get(mdname);
                if (mdvalue != null) {
                    if (parseMDCrawlDatum == null)
                        parseMDCrawlDatum = new CrawlDatum(CrawlDatum.STATUS_PARSE_META, 0);
                    parseMDCrawlDatum.getMetaData().put(new Text(mdname), new Text(mdvalue));
                }
            }
            if (parseMDCrawlDatum != null)
                crawlOut.append(key, parseMDCrawlDatum);
            // need to determine origin (once for all outlinks)
            if (ignoreExternalLinks || ignoreInternalLinks) {
                URL originURL = new URL(fromUrl.toString());
                // based on domain?
                if ("bydomain".equalsIgnoreCase(ignoreExternalLinksMode)) {
                    origin = URLUtil.getDomainName(originURL).toLowerCase();
                } else // use host
                {
                    origin = originURL.getHost().toLowerCase();
                }
            }
            ParseStatus pstatus = parseData.getStatus();
            if (pstatus != null && pstatus.isSuccess() && pstatus.getMinorCode() == ParseStatus.SUCCESS_REDIRECT) {
                String newUrl = pstatus.getMessage();
                int refreshTime = Integer.parseInt(pstatus.getArgs()[1]);
                newUrl = filterNormalize(fromUrl, newUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers, URLNormalizers.SCOPE_FETCHER);
                if (newUrl != null) {
                    String reprUrl = URLUtil.chooseRepr(fromUrl, newUrl, refreshTime < Fetcher.PERM_REFRESH_TIME);
                    CrawlDatum newDatum = new CrawlDatum();
                    newDatum.setStatus(CrawlDatum.STATUS_LINKED);
                    if (reprUrl != null && !reprUrl.equals(newUrl)) {
                        newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY, new Text(reprUrl));
                    }
                    crawlOut.append(new Text(newUrl), newDatum);
                }
            }
            // collect outlinks for subsequent db update
            Outlink[] links = parseData.getOutlinks();
            int outlinksToStore = Math.min(maxOutlinks, links.length);
            int validCount = 0;
            CrawlDatum adjust = null;
            List<Entry<Text, CrawlDatum>> targets = new ArrayList<>(outlinksToStore);
            List<Outlink> outlinkList = new ArrayList<>(outlinksToStore);
            for (int i = 0; i < links.length && validCount < outlinksToStore; i++) {
                String toUrl = links[i].getToUrl();
                // only normalize and filter if fetcher.parse = false
                if (!isParsing) {
                    if (toUrl.length() > maxOutlinkLength) {
                        continue;
                    }
                    toUrl = ParseOutputFormat.filterNormalize(fromUrl, toUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers);
                    if (toUrl == null) {
                        continue;
                    }
                }
                CrawlDatum target = new CrawlDatum(CrawlDatum.STATUS_LINKED, interval);
                Text targetUrl = new Text(toUrl);
                // see if the outlink has any metadata attached
                // and if so pass that to the crawldatum so that
                // the initial score or distribution can use that
                MapWritable outlinkMD = links[i].getMetadata();
                if (outlinkMD != null) {
                    target.getMetaData().putAll(outlinkMD);
                }
                try {
                    scfilters.initialScore(targetUrl, target);
                } catch (ScoringFilterException e) {
                    LOG.warn("Cannot filter init score for url " + key + ", using default: " + e.getMessage());
                    target.setScore(0.0f);
                }
                targets.add(new SimpleEntry(targetUrl, target));
                // overwrite URL in Outlink object with normalized URL (NUTCH-1174)
                links[i].setUrl(toUrl);
                outlinkList.add(links[i]);
                validCount++;
            }
            try {
                // compute score contributions and adjustment to the original score
                adjust = scfilters.distributeScoreToOutlinks(key, parseData, targets, null, links.length);
            } catch (ScoringFilterException e) {
                LOG.warn("Cannot distribute score from " + key + ": " + e.getMessage());
            }
            for (Entry<Text, CrawlDatum> target : targets) {
                crawlOut.append(target.getKey(), target.getValue());
            }
            if (adjust != null)
                crawlOut.append(key, adjust);
            Outlink[] filteredLinks = outlinkList.toArray(new Outlink[outlinkList.size()]);
            parseData = new ParseData(parseData.getStatus(), parseData.getTitle(), filteredLinks, parseData.getContentMeta(), parseData.getParseMeta());
            dataOut.append(key, parseData);
            if (!parse.isCanonical()) {
                CrawlDatum datum = new CrawlDatum();
                datum.setStatus(CrawlDatum.STATUS_FETCH_SUCCESS);
                String timeString = parse.getData().getContentMeta().get(Nutch.FETCH_TIME_KEY);
                try {
                    datum.setFetchTime(Long.parseLong(timeString));
                } catch (Exception e) {
                    LOG.warn("Can't read fetch time for: " + key);
                    datum.setFetchTime(System.currentTimeMillis());
                }
                crawlOut.append(key, datum);
            }
        }

        @Override
        public void close(TaskAttemptContext context) throws IOException {
            if (textOut != null)
                textOut.close();
            dataOut.close();
            crawlOut.close();
        }
    };
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Metadata(org.apache.hadoop.io.SequenceFile.Metadata) ArrayList(java.util.ArrayList) MapFile(org.apache.hadoop.io.MapFile) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) URL(java.net.URL) Entry(java.util.Map.Entry) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) SequenceFile(org.apache.hadoop.io.SequenceFile) FileSystem(org.apache.hadoop.fs.FileSystem) ScoringFilters(org.apache.nutch.scoring.ScoringFilters) URLFilters(org.apache.nutch.net.URLFilters) Path(org.apache.hadoop.fs.Path) CrawlDatum(org.apache.nutch.crawl.CrawlDatum) Text(org.apache.hadoop.io.Text) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) MapWritable(org.apache.hadoop.io.MapWritable) MalformedURLException(java.net.MalformedURLException) ScoringFilterException(org.apache.nutch.scoring.ScoringFilterException) IOException(java.io.IOException) Progressable(org.apache.hadoop.util.Progressable) ScoringFilterException(org.apache.nutch.scoring.ScoringFilterException) Option(org.apache.hadoop.io.MapFile.Writer.Option) CompressionType(org.apache.hadoop.io.SequenceFile.CompressionType) URLExemptionFilters(org.apache.nutch.net.URLExemptionFilters) URLNormalizers(org.apache.nutch.net.URLNormalizers) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter)

Aggregations

Path (org.apache.hadoop.fs.Path)7 MapFile (org.apache.hadoop.io.MapFile)7 Option (org.apache.hadoop.io.MapFile.Writer.Option)7 Text (org.apache.hadoop.io.Text)7 CrawlDatum (org.apache.nutch.crawl.CrawlDatum)3 Configuration (org.apache.hadoop.conf.Configuration)2 CompressionType (org.apache.hadoop.io.SequenceFile.CompressionType)2 RecordWriter (org.apache.hadoop.mapreduce.RecordWriter)2 TaskAttemptContext (org.apache.hadoop.mapreduce.TaskAttemptContext)2 IOException (java.io.IOException)1 MalformedURLException (java.net.MalformedURLException)1 URL (java.net.URL)1 DecimalFormat (java.text.DecimalFormat)1 ArrayList (java.util.ArrayList)1 Entry (java.util.Map.Entry)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 MapWritable (org.apache.hadoop.io.MapWritable)1 SequenceFile (org.apache.hadoop.io.SequenceFile)1 Metadata (org.apache.hadoop.io.SequenceFile.Metadata)1