Search in sources :

Example 1 with Tracer

use of org.honu.util.Tracer in project Honu by jboulon.

the class LockFreeWriter method rotate.

protected void rotate() {
    Tracer t = Tracer.startNewTracer("honu.server." + group + ".rotateDataSink");
    isRunning = true;
    calendar.setTimeInMillis(System.currentTimeMillis());
    log.info(group + "- start Date [" + calendar.getTime() + "]");
    log.info(group + "- Rotate from " + Thread.currentThread().getName());
    String newName = day.format(calendar.getTime());
    newName += localHostAddr + new java.rmi.server.UID().toString();
    newName = newName.replace("-", "");
    newName = newName.replace(":", "");
    // newName = newName.replace(".", "");
    newName = localOutputDir + "/" + newName.trim();
    try {
        FSDataOutputStream previousOutputStr = currentOutputStr;
        Path previousPath = currentPath;
        String previousFileName = currentFileName;
        if (previousOutputStr != null) {
            seqFileWriter.close();
            previousOutputStr.close();
            if (chunksWrittenThisRotate) {
                fs.rename(previousPath, new Path(previousFileName + ".done"));
                fileQueue.add(previousFileName + ".done");
            } else {
                log.info(group + "- no chunks written to " + previousPath + ", deleting");
                fs.delete(previousPath, false);
            }
        }
        Path newOutputPath = new Path(newName + ".chukwa");
        FSDataOutputStream newOutputStr = fs.create(newOutputPath);
        currentOutputStr = newOutputStr;
        currentPath = newOutputPath;
        currentFileName = newName;
        chunksWrittenThisRotate = false;
        if (codec != null) {
            seqFileWriter = SequenceFile.createWriter(conf, newOutputStr, ChukwaArchiveKey.class, ChunkImpl.class, SequenceFile.CompressionType.BLOCK, codec);
        } else {
            seqFileWriter = SequenceFile.createWriter(conf, newOutputStr, ChukwaArchiveKey.class, ChunkImpl.class, SequenceFile.CompressionType.NONE, codec);
        }
    } catch (Throwable e) {
        if (t != null) {
            t.stopAndLogTracer();
        }
        log.fatal(group + "- Throwable Exception in rotate. Exiting!", e);
        // Shutting down the collector
        // Watchdog will re-start it automatically
        DaemonWatcher.bailout(-1);
    }
    // Check for disk space
    File directory4Space = new File(localOutputDir);
    long totalSpace = directory4Space.getTotalSpace();
    long freeSpace = directory4Space.getFreeSpace();
    long minFreeAvailable = (totalSpace * minPercentFreeDisk) / 100;
    if (log.isDebugEnabled()) {
        log.debug(group + "- Directory: " + localOutputDir + ", totalSpace: " + totalSpace + ", freeSpace: " + freeSpace + ", minFreeAvailable: " + minFreeAvailable + ", percentFreeDisk: " + minPercentFreeDisk);
    }
    if (freeSpace < minFreeAvailable) {
        log.fatal(group + "- No space left on device, Bail out!");
        DaemonWatcher.bailout(-1);
    }
    nextRotate = System.currentTimeMillis() + rotateInterval;
    if (t != null) {
        t.stopAndLogTracer();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ChukwaArchiveKey(org.apache.hadoop.chukwa.ChukwaArchiveKey) ChunkImpl(org.apache.hadoop.chukwa.ChunkImpl) Tracer(org.honu.util.Tracer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File)

Example 2 with Tracer

use of org.honu.util.Tracer in project Honu by jboulon.

the class LockFreeWriter method add.

/**
	 *  Best effort, there's no guarantee that chunks 
	 *  have really been written to disk
	 */
public CommitStatus add(List<Chunk> chunks) throws WriterException {
    Tracer t = Tracer.startNewTracer("honu.server." + group + ".addToList");
    long now = System.currentTimeMillis();
    if (chunks != null) {
        try {
            chunksWrittenThisRotate = true;
            ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
            for (Chunk chunk : chunks) {
                archiveKey.setTimePartition(timePeriod);
                archiveKey.setDataType(chunk.getDataType());
                archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource() + "/" + chunk.getStreamName());
                archiveKey.setSeqId(chunk.getSeqID());
                if (chunk != null) {
                    seqFileWriter.append(archiveKey, chunk);
                    // compute size for stats
                    dataSize += chunk.getData().length;
                }
            }
            long end = System.currentTimeMillis();
            if (log.isDebugEnabled()) {
                log.debug(group + "- duration=" + (end - now) + " size=" + chunks.size());
            }
        } catch (IOException e) {
            if (t != null) {
                t.stopAndLogTracer();
            }
            writeChunkRetries--;
            log.error(group + "- Could not save the chunk. ", e);
            if (writeChunkRetries < 0) {
                log.fatal(group + "- Too many IOException when trying to write a chunk, Collector is going to exit!");
                DaemonWatcher.bailout(-1);
            }
            throw new WriterException(e);
        }
    }
    if (t != null) {
        t.stopAndLogTracer();
    }
    return COMMIT_OK;
}
Also used : ChukwaArchiveKey(org.apache.hadoop.chukwa.ChukwaArchiveKey) Tracer(org.honu.util.Tracer) IOException(java.io.IOException) Chunk(org.apache.hadoop.chukwa.Chunk) WriterException(org.apache.hadoop.chukwa.datacollection.writer.WriterException)

Example 3 with Tracer

use of org.honu.util.Tracer in project Honu by jboulon.

the class MessageSender method sendChunk.

protected void sendChunk(TChunk chunk) {
    boolean firstTryFailed = false;
    boolean hasBeenSent = false;
    int errorCount = 0;
    int tryLaterErrorCount = 0;
    Result result = null;
    do {
        try {
            Tracer t = Tracer.startNewTracer("honu.client.processChunk");
            try {
                if ((collector == null) || (tryLaterErrorCount >= 3)) {
                    tryLaterErrorCount = 0;
                    initConnection();
                }
                result = collector.process(chunk);
                if (t != null) {
                    t.stopAndLogTracer();
                }
                if (result.getResultCode() == ResultCode.TRY_LATER) {
                    Counter.increment("honu.client.tryLater");
                    Counter.increment("honu.client." + chunk.getApplication() + ".tryLater");
                    tryLaterErrorCount++;
                    // on a collector the switch
                    if (tryLaterErrorCount > 3) {
                        collector = null;
                        errorCount++;
                    } else {
                        randomSleep(5 * tryLaterErrorCount, true);
                    }
                } else {
                    Counter.increment("honu.client.chunkCount");
                    Counter.increment("honu.client.logCount", chunk.getLogEventsSize());
                    Counter.increment("honu.client." + chunk.getApplication() + ".chunkCount");
                    Counter.increment("honu.client." + chunk.getApplication() + ".logCount", chunk.getLogEventsSize());
                }
            } catch (Throwable e) {
                try {
                    log.warn("exception in sendChunk", e);
                    if (t != null) {
                        t.stopAndLogTracer();
                    }
                    Counter.increment("honu.client.exceptionCount");
                    exceptionCount++;
                    collector = null;
                    errorCount++;
                    randomSleep(300, false);
                } catch (Throwable eIgnored) {
                /* Ignore */
                }
            }
            if (errorCount >= CollectorRegistry.getInstance().getCollectorCount()) {
                if (firstTryFailed == true) {
                    randomSleep(30, true);
                    collector = null;
                    errorCount = 0;
                } else {
                    firstTryFailed = true;
                    randomSleep(15, true);
                    collector = null;
                    errorCount = 0;
                }
            }
            if (result != null && result.getResultCode() == ResultCode.OK && result.isSetMessage()) {
                hasBeenSent = true;
            }
            if (hasBeenSent == false && shutDownNow()) {
                System.err.println("Need to shutdown now --" + Thread.currentThread().getName() + " -- Data has not been sent over:" + chunk.toString());
                hasBeenSent = true;
            }
        } catch (Throwable e2) {
            logApp.warn("Error in sendChunk", e2);
        }
    } while (!hasBeenSent);
}
Also used : Tracer(org.honu.util.Tracer) Result(org.honu.thrift.Result)

Example 4 with Tracer

use of org.honu.util.Tracer in project Honu by jboulon.

the class ThriftCollectorLockFreeImpl method process.

public Result process(TChunk tChunk) throws TException {
    // Stop adding chunks if it's no running
    if (!isRunning) {
        Log.warn("Rejecting some incoming trafic!");
        Result result = new Result();
        result.setMessage("Shutting down");
        result.setResultCode(ResultCode.TRY_LATER);
        return result;
    }
    // If there's no log Events then return OK
    if (tChunk.getLogEventsSize() == 0) {
        Result result = new Result();
        result.setMessage("" + tChunk.getSeqId());
        result.setResultCode(ResultCode.OK);
        return result;
    }
    Tracer t = Tracer.startNewTracer("honu.server.processChunk");
    //this.counters.get(chunkCountField).incrementAndGet();
    ChunkBuilder cb = new ChunkBuilder();
    List<String> logEvents = tChunk.getLogEvents();
    for (String logEvent : logEvents) {
        cb.addRecord(logEvent.getBytes());
    }
    Chunk c = cb.getChunk();
    c.setApplication(tChunk.getApplication());
    c.setDataType(tChunk.getDataType());
    c.setSeqID(tChunk.getSeqId());
    c.setSource(tChunk.getSource());
    c.setTags(tChunk.getTags());
    if (isDebug) {
        System.out.println("\n\t ===============");
        System.out.println("tChunk.getApplication() :" + tChunk.getApplication());
        System.out.println("tChunk.getDataType() :" + tChunk.getDataType());
        System.out.println("tChunk.getSeqId() :" + tChunk.getSeqId());
        System.out.println("tChunk.getSource() :" + tChunk.getSource());
        System.out.println("tChunk.getStreamName() :" + tChunk.getStreamName());
        System.out.println("tChunk.getTags() :" + tChunk.getTags());
        System.out.println("c.getApplication() :" + c.getApplication());
        System.out.println("c.getDataType() :" + c.getDataType());
        System.out.println("c.getSeqID() :" + c.getSeqID());
        System.out.println("c.getSource() :" + c.getSource());
        System.out.println("c.getTags() :" + c.getTags());
        System.out.println("c.getData()" + new String(c.getData()));
    }
    boolean addResult = false;
    try {
        addResult = chunkQueue.offer(c, 2000, TimeUnit.MILLISECONDS);
    } catch (OutOfMemoryError ex) {
        ex.printStackTrace();
        DaemonWatcher.bailout(-1);
    } catch (Throwable e) {
        e.printStackTrace();
        addResult = false;
    }
    Result result = new Result();
    if (addResult) {
        try {
            Counter.increment("honu.server.chunkCount");
            Counter.increment("honu.server.logCount", logEvents.size());
            Counter.increment("honu.server." + tChunk.getApplication() + ".chunkCount");
            Counter.increment("honu.server." + tChunk.getApplication() + ".logCount", logEvents.size());
            (new Tracer("honu.server.chunkSize [messages, not msec]", logEvents.size())).logTracer();
            (new Tracer("honu.server." + tChunk.getApplication() + ".chunkSize [messages, not msec]", logEvents.size())).logTracer();
        } catch (Exception ignored) {
        }
        result.setMessage("" + tChunk.getSeqId());
        result.setResultCode(ResultCode.OK);
    } else {
        try {
            Counter.increment("honu.server.tryLater");
            Counter.increment("honu.server." + tChunk.getApplication() + ".tryLater");
        } catch (Exception ignored) {
        }
        result.setMessage("" + tChunk.getSeqId());
        result.setResultCode(ResultCode.TRY_LATER);
    }
    if (t != null) {
        t.stopAndLogTracer();
    }
    return result;
}
Also used : Tracer(org.honu.util.Tracer) ChunkBuilder(org.apache.hadoop.chukwa.ChunkBuilder) TChunk(org.honu.thrift.TChunk) Chunk(org.apache.hadoop.chukwa.Chunk) TException(org.apache.thrift.TException) Result(org.honu.thrift.Result)

Example 5 with Tracer

use of org.honu.util.Tracer in project Honu by jboulon.

the class MessageConsumer method produceChunk.

private void produceChunk() throws InterruptedException {
    TChunk chunk = new TChunk();
    chunk.setSeqId(System.currentTimeMillis());
    chunk.setSource(source);
    chunk.setStreamName(streamname);
    chunk.setApplication(application);
    chunk.setDataType(dataType);
    chunk.setTags(tags);
    List<String> logEvents = new LinkedList<String>();
    chunk.setLogEvents(logEvents);
    int qSize = messages.size();
    if (qSize > maxQueueSize) {
        maxQueueSize = qSize;
    }
    int count = 0;
    // the messages list
    synchronized (wakeUpLink) {
        do {
            logEvents.add(messages.remove(0));
            count++;
        } while (!messages.isEmpty() && count < maxMessageCountPerChunk);
    }
    try {
        (new Tracer("honu.client.messageQueueSize [messages, not msec]", messages.size())).logTracer();
        (new Tracer("honu.client." + chunk.getApplication() + ".messageQueueSize [messages, not msec]", messages.size())).logTracer();
    } catch (Exception ignored) {
    }
    // a backup file
    if (chunkQueue.size() >= maxChunkQueueSize) {
        try {
            Counter.increment("honu.client.lostChunks");
            Counter.increment("honu.client.lostMessages", chunk.getLogEventsSize());
            Counter.increment("honu.client." + chunk.getApplication() + ".lostChunks");
            Counter.increment("honu.client." + chunk.getApplication() + ".lostMessages", chunk.getLogEventsSize());
        } catch (Exception ignored) {
        }
        kv.startMessage("HonuLostStats");
        kv.addKeyValue("lostChunk", 1);
        kv.addKeyValue("lostLines", chunk.getLogEventsSize());
        kv.addKeyValue("RecordType", chunk.getDataType());
        kv.addKeyValue("SeqId", chunk.getSeqId());
        kv.addKeyValue("period", statFrequency);
        log.error(kv.generateMessage());
        MessageManager.getInstance().updateLostDataStats(chunk);
    } else {
        chunkQueue.put(chunk);
    }
}
Also used : TChunk(org.honu.thrift.TChunk) Tracer(org.honu.util.Tracer) LinkedList(java.util.LinkedList)

Aggregations

Tracer (org.honu.util.Tracer)6 TChunk (org.honu.thrift.TChunk)3 ChukwaArchiveKey (org.apache.hadoop.chukwa.ChukwaArchiveKey)2 Chunk (org.apache.hadoop.chukwa.Chunk)2 Result (org.honu.thrift.Result)2 File (java.io.File)1 IOException (java.io.IOException)1 LinkedList (java.util.LinkedList)1 ChunkBuilder (org.apache.hadoop.chukwa.ChunkBuilder)1 ChunkImpl (org.apache.hadoop.chukwa.ChunkImpl)1 WriterException (org.apache.hadoop.chukwa.datacollection.writer.WriterException)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 SequenceFile (org.apache.hadoop.io.SequenceFile)1 TException (org.apache.thrift.TException)1