Search in sources :

Example 1 with WriterException

use of org.apache.hadoop.chukwa.datacollection.writer.WriterException in project Honu by jboulon.

the class LockFreeWriter method add.

/**
	 *  Best effort, there's no guarantee that chunks 
	 *  have really been written to disk
	 */
public CommitStatus add(List<Chunk> chunks) throws WriterException {
    Tracer t = Tracer.startNewTracer("honu.server." + group + ".addToList");
    long now = System.currentTimeMillis();
    if (chunks != null) {
        try {
            chunksWrittenThisRotate = true;
            ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
            for (Chunk chunk : chunks) {
                archiveKey.setTimePartition(timePeriod);
                archiveKey.setDataType(chunk.getDataType());
                archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource() + "/" + chunk.getStreamName());
                archiveKey.setSeqId(chunk.getSeqID());
                if (chunk != null) {
                    seqFileWriter.append(archiveKey, chunk);
                    // compute size for stats
                    dataSize += chunk.getData().length;
                }
            }
            long end = System.currentTimeMillis();
            if (log.isDebugEnabled()) {
                log.debug(group + "- duration=" + (end - now) + " size=" + chunks.size());
            }
        } catch (IOException e) {
            if (t != null) {
                t.stopAndLogTracer();
            }
            writeChunkRetries--;
            log.error(group + "- Could not save the chunk. ", e);
            if (writeChunkRetries < 0) {
                log.fatal(group + "- Too many IOException when trying to write a chunk, Collector is going to exit!");
                DaemonWatcher.bailout(-1);
            }
            throw new WriterException(e);
        }
    }
    if (t != null) {
        t.stopAndLogTracer();
    }
    return COMMIT_OK;
}
Also used : ChukwaArchiveKey(org.apache.hadoop.chukwa.ChukwaArchiveKey) Tracer(org.honu.util.Tracer) IOException(java.io.IOException) Chunk(org.apache.hadoop.chukwa.Chunk) WriterException(org.apache.hadoop.chukwa.datacollection.writer.WriterException)

Example 2 with WriterException

use of org.apache.hadoop.chukwa.datacollection.writer.WriterException in project Honu by jboulon.

the class LockFreeWriter method init.

@SuppressWarnings("unchecked")
public void init(Configuration conf) throws WriterException {
    this.conf = conf;
    // Force GMT
    day.setTimeZone(TimeZone.getTimeZone("GMT"));
    try {
        fs = FileSystem.getLocal(conf);
        localOutputDir = conf.get("honu.collector." + group + ".localOutputDir", "/honu/datasink/");
        if (!localOutputDir.endsWith("/")) {
            localOutputDir += "/";
        }
        Path pLocalOutputDir = new Path(localOutputDir);
        if (!fs.exists(pLocalOutputDir)) {
            boolean exist = fs.mkdirs(pLocalOutputDir);
            if (!exist) {
                throw new WriterException("Cannot create local dataSink dir: " + localOutputDir);
            }
        } else {
            FileStatus fsLocalOutputDir = fs.getFileStatus(pLocalOutputDir);
            if (!fsLocalOutputDir.isDir()) {
                throw new WriterException("local dataSink dir is not a directory: " + localOutputDir);
            }
        }
    } catch (Throwable e) {
        log.fatal("Cannot initialize LocalWriter", e);
        DaemonWatcher.bailout(-1);
    }
    String codecClass = null;
    try {
        codecClass = conf.get("honu.collector." + group + ".datasink.codec");
        if (codecClass != null) {
            Class classDefinition = Class.forName(codecClass);
            codec = (CompressionCodec) ReflectionUtils.newInstance(classDefinition, conf);
            log.info(group + "- Codec:" + codec.getDefaultExtension());
        }
    } catch (Exception e) {
        log.fatal(group + "- Compression codec for " + codecClass + " was not found.", e);
        DaemonWatcher.bailout(-1);
    }
    minPercentFreeDisk = conf.getInt("honu.collector." + group + ".minPercentFreeDisk", 20);
    rotateInterval = conf.getInt("honu.collector." + group + ".rotateInterval", // defaults to 5 minutes
    1000 * 60 * 5);
    initWriteChunkRetries = conf.getInt("honu.collector." + group + ".writeChunkRetries", 10);
    writeChunkRetries = initWriteChunkRetries;
    log.info(group + "- rotateInterval is " + rotateInterval);
    log.info(group + "- outputDir is " + localOutputDir);
    log.info(group + "- localFileSystem is " + fs.getUri().toString());
    log.info(group + "- minPercentFreeDisk is " + minPercentFreeDisk);
    statTimer = new Timer();
    statTimer.schedule(new StatReportingTask(), 1000, STAT_INTERVAL_SECONDS * 1000);
    fileQueue = new LinkedBlockingQueue<String>();
    localToRemoteHdfsMover = new LocalToRemoteHdfsMover(group, fileQueue, conf);
    this.start();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Timer(java.util.Timer) WriterException(org.apache.hadoop.chukwa.datacollection.writer.WriterException) WriterException(org.apache.hadoop.chukwa.datacollection.writer.WriterException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException)

Aggregations

IOException (java.io.IOException)2 WriterException (org.apache.hadoop.chukwa.datacollection.writer.WriterException)2 UnknownHostException (java.net.UnknownHostException)1 Timer (java.util.Timer)1 ChukwaArchiveKey (org.apache.hadoop.chukwa.ChukwaArchiveKey)1 Chunk (org.apache.hadoop.chukwa.Chunk)1 FileStatus (org.apache.hadoop.fs.FileStatus)1 Path (org.apache.hadoop.fs.Path)1 Tracer (org.honu.util.Tracer)1