use of org.apache.hadoop.chukwa.Chunk in project Honu by jboulon.
the class LockFreeWriter method run.
public void run() {
List<Chunk> chunks = new LinkedList<Chunk>();
Chunk chunk = null;
long now = 0;
while ((isRunning || !chunks.isEmpty()) && (System.currentTimeMillis() < timeOut)) {
try {
now = System.currentTimeMillis();
// Don't rotate if we are not running
if (isRunning && (now >= nextRotate)) {
rotate();
}
if (System.currentTimeMillis() >= nextTimePeriodComputation) {
computeTimePeriod();
}
if (chunks.isEmpty()) {
chunk = this.ChunkQueue.poll(1000, TimeUnit.MILLISECONDS);
if (chunk == null) {
continue;
}
chunks.add(chunk);
this.ChunkQueue.drainTo(chunks, 10);
}
add(chunks);
chunks.clear();
} catch (InterruptedException e) {
isRunning = false;
} catch (Exception e) {
e.printStackTrace();
}
}
log.info(group + "- Shutdown request exit loop ..., ChunkQueue.size at exit time: " + this.ChunkQueue.size());
try {
this.internalClose();
log.info(group + "- Shutdown request internalClose done ...");
} catch (Exception e) {
e.printStackTrace();
} finally {
isStopped = true;
}
}
use of org.apache.hadoop.chukwa.Chunk in project Honu by jboulon.
the class LockFreeWriter method add.
/**
* Best effort, there's no guarantee that chunks
* have really been written to disk
*/
public CommitStatus add(List<Chunk> chunks) throws WriterException {
Tracer t = Tracer.startNewTracer("honu.server." + group + ".addToList");
long now = System.currentTimeMillis();
if (chunks != null) {
try {
chunksWrittenThisRotate = true;
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
for (Chunk chunk : chunks) {
archiveKey.setTimePartition(timePeriod);
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource() + "/" + chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
if (chunk != null) {
seqFileWriter.append(archiveKey, chunk);
// compute size for stats
dataSize += chunk.getData().length;
}
}
long end = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug(group + "- duration=" + (end - now) + " size=" + chunks.size());
}
} catch (IOException e) {
if (t != null) {
t.stopAndLogTracer();
}
writeChunkRetries--;
log.error(group + "- Could not save the chunk. ", e);
if (writeChunkRetries < 0) {
log.fatal(group + "- Too many IOException when trying to write a chunk, Collector is going to exit!");
DaemonWatcher.bailout(-1);
}
throw new WriterException(e);
}
}
if (t != null) {
t.stopAndLogTracer();
}
return COMMIT_OK;
}
use of org.apache.hadoop.chukwa.Chunk in project Honu by jboulon.
the class CmdLineConverter method getChunk.
public static Chunk getChunk(ChunkBuilder cb, String dataType) {
Chunk c = cb.getChunk();
c.setApplication("CmdLineConverter");
c.setDataType(dataType);
c.setSeqID(System.currentTimeMillis());
c.setSource(localHostAddr);
return c;
}
use of org.apache.hadoop.chukwa.Chunk in project Honu by jboulon.
the class ThriftCollectorLockFreeImpl method process.
public Result process(TChunk tChunk) throws TException {
// Stop adding chunks if it's no running
if (!isRunning) {
Log.warn("Rejecting some incoming trafic!");
Result result = new Result();
result.setMessage("Shutting down");
result.setResultCode(ResultCode.TRY_LATER);
return result;
}
// If there's no log Events then return OK
if (tChunk.getLogEventsSize() == 0) {
Result result = new Result();
result.setMessage("" + tChunk.getSeqId());
result.setResultCode(ResultCode.OK);
return result;
}
Tracer t = Tracer.startNewTracer("honu.server.processChunk");
//this.counters.get(chunkCountField).incrementAndGet();
ChunkBuilder cb = new ChunkBuilder();
List<String> logEvents = tChunk.getLogEvents();
for (String logEvent : logEvents) {
cb.addRecord(logEvent.getBytes());
}
Chunk c = cb.getChunk();
c.setApplication(tChunk.getApplication());
c.setDataType(tChunk.getDataType());
c.setSeqID(tChunk.getSeqId());
c.setSource(tChunk.getSource());
c.setTags(tChunk.getTags());
if (isDebug) {
System.out.println("\n\t ===============");
System.out.println("tChunk.getApplication() :" + tChunk.getApplication());
System.out.println("tChunk.getDataType() :" + tChunk.getDataType());
System.out.println("tChunk.getSeqId() :" + tChunk.getSeqId());
System.out.println("tChunk.getSource() :" + tChunk.getSource());
System.out.println("tChunk.getStreamName() :" + tChunk.getStreamName());
System.out.println("tChunk.getTags() :" + tChunk.getTags());
System.out.println("c.getApplication() :" + c.getApplication());
System.out.println("c.getDataType() :" + c.getDataType());
System.out.println("c.getSeqID() :" + c.getSeqID());
System.out.println("c.getSource() :" + c.getSource());
System.out.println("c.getTags() :" + c.getTags());
System.out.println("c.getData()" + new String(c.getData()));
}
boolean addResult = false;
try {
addResult = chunkQueue.offer(c, 2000, TimeUnit.MILLISECONDS);
} catch (OutOfMemoryError ex) {
ex.printStackTrace();
DaemonWatcher.bailout(-1);
} catch (Throwable e) {
e.printStackTrace();
addResult = false;
}
Result result = new Result();
if (addResult) {
try {
Counter.increment("honu.server.chunkCount");
Counter.increment("honu.server.logCount", logEvents.size());
Counter.increment("honu.server." + tChunk.getApplication() + ".chunkCount");
Counter.increment("honu.server." + tChunk.getApplication() + ".logCount", logEvents.size());
(new Tracer("honu.server.chunkSize [messages, not msec]", logEvents.size())).logTracer();
(new Tracer("honu.server." + tChunk.getApplication() + ".chunkSize [messages, not msec]", logEvents.size())).logTracer();
} catch (Exception ignored) {
}
result.setMessage("" + tChunk.getSeqId());
result.setResultCode(ResultCode.OK);
} else {
try {
Counter.increment("honu.server.tryLater");
Counter.increment("honu.server." + tChunk.getApplication() + ".tryLater");
} catch (Exception ignored) {
}
result.setMessage("" + tChunk.getSeqId());
result.setResultCode(ResultCode.TRY_LATER);
}
if (t != null) {
t.stopAndLogTracer();
}
return result;
}
Aggregations