Search in sources :

Example 1 with Metadata

use of org.apache.hadoop.io.SequenceFile.Metadata in project hadoop by apache.

the class TestSequenceFile method testRecursiveSeqFileCreate.

@SuppressWarnings("deprecation")
@Test
public void testRecursiveSeqFileCreate() throws IOException {
    FileSystem fs = FileSystem.getLocal(conf);
    Path name = new Path(new Path(GenericTestUtils.getTempPath("recursiveCreateDir")), "file");
    boolean createParent = false;
    try {
        SequenceFile.createWriter(fs, conf, name, RandomDatum.class, RandomDatum.class, 512, (short) 1, 4096, createParent, CompressionType.NONE, null, new Metadata());
        fail("Expected an IOException due to missing parent");
    } catch (IOException ioe) {
    // Expected
    }
    createParent = true;
    SequenceFile.createWriter(fs, conf, name, RandomDatum.class, RandomDatum.class, 512, (short) 1, 4096, createParent, CompressionType.NONE, null, new Metadata());
// should succeed, fails if exception thrown
}
Also used : Metadata(org.apache.hadoop.io.SequenceFile.Metadata) Test(org.junit.Test)

Example 2 with Metadata

use of org.apache.hadoop.io.SequenceFile.Metadata in project hadoop by apache.

the class TestSequenceFile method testCreateWriterOnExistingFile.

/**
   * Test that makes sure createWriter succeeds on a file that was 
   * already created
   * @throws IOException
   */
@SuppressWarnings("deprecation")
@Test
public void testCreateWriterOnExistingFile() throws IOException {
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    Path name = new Path(new Path(GenericTestUtils.getTempPath("createWriterOnExistingFile")), "file");
    fs.create(name);
    SequenceFile.createWriter(fs, conf, name, RandomDatum.class, RandomDatum.class, 512, (short) 1, 4096, false, CompressionType.NONE, null, new Metadata());
}
Also used : Metadata(org.apache.hadoop.io.SequenceFile.Metadata) Test(org.junit.Test)

Example 3 with Metadata

use of org.apache.hadoop.io.SequenceFile.Metadata in project elephant-bird by twitter.

the class RCFileOutputFormat method createRCFileWriter.

protected RCFile.Writer createRCFileWriter(TaskAttemptContext job, Text columnMetadata) throws IOException {
    Configuration conf = HadoopCompat.getConfiguration(job);
    // override compression codec if set.
    String codecOverride = conf.get(COMPRESSION_CODEC_CONF);
    if (codecOverride != null) {
        conf.setBoolean("mapred.output.compress", true);
        conf.set("mapred.output.compression.codec", codecOverride);
    }
    CompressionCodec codec = null;
    if (getCompressOutput(job)) {
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
        codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
    }
    Metadata metadata = null;
    if (columnMetadata != null) {
        metadata = new Metadata();
        metadata.set(new Text(RCFileUtil.COLUMN_METADATA_PROTOBUF_KEY), columnMetadata);
    }
    String ext = conf.get(EXTENSION_OVERRIDE_CONF, DEFAULT_EXTENSION);
    Path file = getDefaultWorkFile(job, ext.equalsIgnoreCase("none") ? null : ext);
    LOG.info("writing to rcfile " + file.toString());
    return new RCFile.Writer(file.getFileSystem(conf), conf, file, job, metadata, codec);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Metadata(org.apache.hadoop.io.SequenceFile.Metadata) Text(org.apache.hadoop.io.Text) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter)

Example 4 with Metadata

use of org.apache.hadoop.io.SequenceFile.Metadata in project hadoop by apache.

the class TestSequenceFile method testSequenceFileMetadata.

/** Unit tests for SequenceFile metadata. */
@Test
public void testSequenceFileMetadata() throws Exception {
    LOG.info("Testing SequenceFile with metadata");
    int count = 1024 * 10;
    CompressionCodec codec = new DefaultCodec();
    Path file = new Path(GenericTestUtils.getTempPath("test.seq.metadata"));
    Path sortedFile = new Path(GenericTestUtils.getTempPath("test.sorted.seq.metadata"));
    Path recordCompressedFile = new Path(GenericTestUtils.getTempPath("test.rc.seq.metadata"));
    Path blockCompressedFile = new Path(GenericTestUtils.getTempPath("test.bc.seq.metadata"));
    FileSystem fs = FileSystem.getLocal(conf);
    SequenceFile.Metadata theMetadata = new SequenceFile.Metadata();
    theMetadata.set(new Text("name_1"), new Text("value_1"));
    theMetadata.set(new Text("name_2"), new Text("value_2"));
    theMetadata.set(new Text("name_3"), new Text("value_3"));
    theMetadata.set(new Text("name_4"), new Text("value_4"));
    int seed = new Random().nextInt();
    try {
        // SequenceFile.Writer
        writeMetadataTest(fs, count, seed, file, CompressionType.NONE, null, theMetadata);
        SequenceFile.Metadata aMetadata = readMetadata(fs, file);
        if (!theMetadata.equals(aMetadata)) {
            LOG.info("The original metadata:\n" + theMetadata.toString());
            LOG.info("The retrieved metadata:\n" + aMetadata.toString());
            throw new RuntimeException("metadata not match:  " + 1);
        }
        // SequenceFile.RecordCompressWriter
        writeMetadataTest(fs, count, seed, recordCompressedFile, CompressionType.RECORD, codec, theMetadata);
        aMetadata = readMetadata(fs, recordCompressedFile);
        if (!theMetadata.equals(aMetadata)) {
            LOG.info("The original metadata:\n" + theMetadata.toString());
            LOG.info("The retrieved metadata:\n" + aMetadata.toString());
            throw new RuntimeException("metadata not match:  " + 2);
        }
        // SequenceFile.BlockCompressWriter
        writeMetadataTest(fs, count, seed, blockCompressedFile, CompressionType.BLOCK, codec, theMetadata);
        aMetadata = readMetadata(fs, blockCompressedFile);
        if (!theMetadata.equals(aMetadata)) {
            LOG.info("The original metadata:\n" + theMetadata.toString());
            LOG.info("The retrieved metadata:\n" + aMetadata.toString());
            throw new RuntimeException("metadata not match:  " + 3);
        }
        // SequenceFile.Sorter
        sortMetadataTest(fs, file, sortedFile, theMetadata);
        aMetadata = readMetadata(fs, recordCompressedFile);
        if (!theMetadata.equals(aMetadata)) {
            LOG.info("The original metadata:\n" + theMetadata.toString());
            LOG.info("The retrieved metadata:\n" + aMetadata.toString());
            throw new RuntimeException("metadata not match:  " + 4);
        }
    } finally {
        fs.close();
    }
    LOG.info("Successfully tested SequenceFile with metadata");
}
Also used : Metadata(org.apache.hadoop.io.SequenceFile.Metadata) Metadata(org.apache.hadoop.io.SequenceFile.Metadata) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) Test(org.junit.Test)

Example 5 with Metadata

use of org.apache.hadoop.io.SequenceFile.Metadata in project nutch by apache.

the class ParseOutputFormat method getRecordWriter.

public RecordWriter<Text, Parse> getRecordWriter(TaskAttemptContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    String name = getUniqueFile(context, "part");
    Path dir = FileOutputFormat.getOutputPath(context);
    FileSystem fs = dir.getFileSystem(context.getConfiguration());
    if (conf.getBoolean("parse.filter.urls", true)) {
        filters = new URLFilters(conf);
        exemptionFilters = new URLExemptionFilters(conf);
    }
    if (conf.getBoolean("parse.normalize.urls", true)) {
        normalizers = new URLNormalizers(conf, URLNormalizers.SCOPE_OUTLINK);
    }
    this.scfilters = new ScoringFilters(conf);
    final int interval = conf.getInt("db.fetch.interval.default", 2592000);
    final boolean ignoreInternalLinks = conf.getBoolean("db.ignore.internal.links", false);
    final boolean ignoreExternalLinks = conf.getBoolean("db.ignore.external.links", false);
    final String ignoreExternalLinksMode = conf.get("db.ignore.external.links.mode", "byHost");
    // NUTCH-2435 - parameter "parser.store.text" allowing to choose whether to
    // store 'parse_text' directory or not:
    final boolean storeText = conf.getBoolean("parser.store.text", true);
    int maxOutlinksPerPage = conf.getInt("db.max.outlinks.per.page", 100);
    final boolean isParsing = conf.getBoolean("fetcher.parse", true);
    final int maxOutlinks = (maxOutlinksPerPage < 0) ? Integer.MAX_VALUE : maxOutlinksPerPage;
    final CompressionType compType = SequenceFileOutputFormat.getOutputCompressionType(context);
    Path out = FileOutputFormat.getOutputPath(context);
    Path text = new Path(new Path(out, ParseText.DIR_NAME), name);
    Path data = new Path(new Path(out, ParseData.DIR_NAME), name);
    Path crawl = new Path(new Path(out, CrawlDatum.PARSE_DIR_NAME), name);
    final String[] parseMDtoCrawlDB = conf.get("db.parsemeta.to.crawldb", "").split(" *, *");
    // textOut Options
    final MapFile.Writer textOut;
    if (storeText) {
        Option tKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
        org.apache.hadoop.io.SequenceFile.Writer.Option tValClassOpt = SequenceFile.Writer.valueClass(ParseText.class);
        org.apache.hadoop.io.SequenceFile.Writer.Option tProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
        org.apache.hadoop.io.SequenceFile.Writer.Option tCompOpt = SequenceFile.Writer.compression(CompressionType.RECORD);
        textOut = new MapFile.Writer(conf, text, tKeyClassOpt, tValClassOpt, tCompOpt, tProgressOpt);
    } else {
        textOut = null;
    }
    // dataOut Options
    Option dKeyClassOpt = (Option) MapFile.Writer.keyClass(Text.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option dValClassOpt = SequenceFile.Writer.valueClass(ParseData.class);
    org.apache.hadoop.io.SequenceFile.Writer.Option dProgressOpt = SequenceFile.Writer.progressable((Progressable) context);
    org.apache.hadoop.io.SequenceFile.Writer.Option dCompOpt = SequenceFile.Writer.compression(compType);
    final MapFile.Writer dataOut = new MapFile.Writer(conf, data, dKeyClassOpt, dValClassOpt, dCompOpt, dProgressOpt);
    final SequenceFile.Writer crawlOut = SequenceFile.createWriter(conf, SequenceFile.Writer.file(crawl), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(CrawlDatum.class), SequenceFile.Writer.bufferSize(fs.getConf().getInt("io.file.buffer.size", 4096)), SequenceFile.Writer.replication(fs.getDefaultReplication(crawl)), SequenceFile.Writer.blockSize(1073741824), SequenceFile.Writer.compression(compType, new DefaultCodec()), SequenceFile.Writer.progressable((Progressable) context), SequenceFile.Writer.metadata(new Metadata()));
    return new RecordWriter<Text, Parse>() {

        public void write(Text key, Parse parse) throws IOException {
            String fromUrl = key.toString();
            // host or domain name of the source URL
            String origin = null;
            if (textOut != null) {
                textOut.append(key, new ParseText(parse.getText()));
            }
            ParseData parseData = parse.getData();
            // recover the signature prepared by Fetcher or ParseSegment
            String sig = parseData.getContentMeta().get(Nutch.SIGNATURE_KEY);
            if (sig != null) {
                byte[] signature = StringUtil.fromHexString(sig);
                if (signature != null) {
                    // append a CrawlDatum with a signature
                    CrawlDatum d = new CrawlDatum(CrawlDatum.STATUS_SIGNATURE, 0);
                    d.setSignature(signature);
                    crawlOut.append(key, d);
                }
            }
            // see if the parse metadata contain things that we'd like
            // to pass to the metadata of the crawlDB entry
            CrawlDatum parseMDCrawlDatum = null;
            for (String mdname : parseMDtoCrawlDB) {
                String mdvalue = parse.getData().getParseMeta().get(mdname);
                if (mdvalue != null) {
                    if (parseMDCrawlDatum == null)
                        parseMDCrawlDatum = new CrawlDatum(CrawlDatum.STATUS_PARSE_META, 0);
                    parseMDCrawlDatum.getMetaData().put(new Text(mdname), new Text(mdvalue));
                }
            }
            if (parseMDCrawlDatum != null)
                crawlOut.append(key, parseMDCrawlDatum);
            // need to determine origin (once for all outlinks)
            if (ignoreExternalLinks || ignoreInternalLinks) {
                URL originURL = new URL(fromUrl.toString());
                // based on domain?
                if ("bydomain".equalsIgnoreCase(ignoreExternalLinksMode)) {
                    origin = URLUtil.getDomainName(originURL).toLowerCase();
                } else // use host
                {
                    origin = originURL.getHost().toLowerCase();
                }
            }
            ParseStatus pstatus = parseData.getStatus();
            if (pstatus != null && pstatus.isSuccess() && pstatus.getMinorCode() == ParseStatus.SUCCESS_REDIRECT) {
                String newUrl = pstatus.getMessage();
                int refreshTime = Integer.valueOf(pstatus.getArgs()[1]);
                newUrl = filterNormalize(fromUrl, newUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers, URLNormalizers.SCOPE_FETCHER);
                if (newUrl != null) {
                    String reprUrl = URLUtil.chooseRepr(fromUrl, newUrl, refreshTime < Fetcher.PERM_REFRESH_TIME);
                    CrawlDatum newDatum = new CrawlDatum();
                    newDatum.setStatus(CrawlDatum.STATUS_LINKED);
                    if (reprUrl != null && !reprUrl.equals(newUrl)) {
                        newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY, new Text(reprUrl));
                    }
                    crawlOut.append(new Text(newUrl), newDatum);
                }
            }
            // collect outlinks for subsequent db update
            Outlink[] links = parseData.getOutlinks();
            int outlinksToStore = Math.min(maxOutlinks, links.length);
            int validCount = 0;
            CrawlDatum adjust = null;
            List<Entry<Text, CrawlDatum>> targets = new ArrayList<>(outlinksToStore);
            List<Outlink> outlinkList = new ArrayList<>(outlinksToStore);
            for (int i = 0; i < links.length && validCount < outlinksToStore; i++) {
                String toUrl = links[i].getToUrl();
                // only normalize and filter if fetcher.parse = false
                if (!isParsing) {
                    toUrl = ParseOutputFormat.filterNormalize(fromUrl, toUrl, origin, ignoreInternalLinks, ignoreExternalLinks, ignoreExternalLinksMode, filters, exemptionFilters, normalizers);
                    if (toUrl == null) {
                        continue;
                    }
                }
                CrawlDatum target = new CrawlDatum(CrawlDatum.STATUS_LINKED, interval);
                Text targetUrl = new Text(toUrl);
                // see if the outlink has any metadata attached
                // and if so pass that to the crawldatum so that
                // the initial score or distribution can use that
                MapWritable outlinkMD = links[i].getMetadata();
                if (outlinkMD != null) {
                    target.getMetaData().putAll(outlinkMD);
                }
                try {
                    scfilters.initialScore(targetUrl, target);
                } catch (ScoringFilterException e) {
                    LOG.warn("Cannot filter init score for url " + key + ", using default: " + e.getMessage());
                    target.setScore(0.0f);
                }
                targets.add(new SimpleEntry(targetUrl, target));
                // overwrite URL in Outlink object with normalized URL (NUTCH-1174)
                links[i].setUrl(toUrl);
                outlinkList.add(links[i]);
                validCount++;
            }
            try {
                // compute score contributions and adjustment to the original score
                adjust = scfilters.distributeScoreToOutlinks(key, parseData, targets, null, links.length);
            } catch (ScoringFilterException e) {
                LOG.warn("Cannot distribute score from " + key + ": " + e.getMessage());
            }
            for (Entry<Text, CrawlDatum> target : targets) {
                crawlOut.append(target.getKey(), target.getValue());
            }
            if (adjust != null)
                crawlOut.append(key, adjust);
            Outlink[] filteredLinks = outlinkList.toArray(new Outlink[outlinkList.size()]);
            parseData = new ParseData(parseData.getStatus(), parseData.getTitle(), filteredLinks, parseData.getContentMeta(), parseData.getParseMeta());
            dataOut.append(key, parseData);
            if (!parse.isCanonical()) {
                CrawlDatum datum = new CrawlDatum();
                datum.setStatus(CrawlDatum.STATUS_FETCH_SUCCESS);
                String timeString = parse.getData().getContentMeta().get(Nutch.FETCH_TIME_KEY);
                try {
                    datum.setFetchTime(Long.parseLong(timeString));
                } catch (Exception e) {
                    LOG.warn("Can't read fetch time for: " + key);
                    datum.setFetchTime(System.currentTimeMillis());
                }
                crawlOut.append(key, datum);
            }
        }

        public void close(TaskAttemptContext context) throws IOException {
            if (textOut != null)
                textOut.close();
            dataOut.close();
            crawlOut.close();
        }
    };
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) Metadata(org.apache.hadoop.io.SequenceFile.Metadata) ArrayList(java.util.ArrayList) MapFile(org.apache.hadoop.io.MapFile) DefaultCodec(org.apache.hadoop.io.compress.DefaultCodec) URL(java.net.URL) Entry(java.util.Map.Entry) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) SequenceFile(org.apache.hadoop.io.SequenceFile) FileSystem(org.apache.hadoop.fs.FileSystem) ScoringFilters(org.apache.nutch.scoring.ScoringFilters) URLFilters(org.apache.nutch.net.URLFilters) Path(org.apache.hadoop.fs.Path) CrawlDatum(org.apache.nutch.crawl.CrawlDatum) Text(org.apache.hadoop.io.Text) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) MapWritable(org.apache.hadoop.io.MapWritable) MalformedURLException(java.net.MalformedURLException) ScoringFilterException(org.apache.nutch.scoring.ScoringFilterException) IOException(java.io.IOException) Progressable(org.apache.hadoop.util.Progressable) ScoringFilterException(org.apache.nutch.scoring.ScoringFilterException) Option(org.apache.hadoop.io.MapFile.Writer.Option) CompressionType(org.apache.hadoop.io.SequenceFile.CompressionType) URLExemptionFilters(org.apache.nutch.net.URLExemptionFilters) URLNormalizers(org.apache.nutch.net.URLNormalizers) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter)

Aggregations

Metadata (org.apache.hadoop.io.SequenceFile.Metadata)6 Configuration (org.apache.hadoop.conf.Configuration)3 Text (org.apache.hadoop.io.Text)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 CompressionCodec (org.apache.hadoop.io.compress.CompressionCodec)2 DefaultCodec (org.apache.hadoop.io.compress.DefaultCodec)2 RecordWriter (org.apache.hadoop.mapreduce.RecordWriter)2 Field (java.lang.reflect.Field)1 MalformedURLException (java.net.MalformedURLException)1 URL (java.net.URL)1 ArrayList (java.util.ArrayList)1 Entry (java.util.Map.Entry)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 RCFile (org.apache.hadoop.hive.ql.io.RCFile)1 MapFile (org.apache.hadoop.io.MapFile)1 Option (org.apache.hadoop.io.MapFile.Writer.Option)1 MapWritable (org.apache.hadoop.io.MapWritable)1 SequenceFile (org.apache.hadoop.io.SequenceFile)1