use of org.apache.hadoop.chukwa.datacollection.writer.WriterException in project Honu by jboulon.
the class LockFreeWriter method add.
/**
* Best effort, there's no guarantee that chunks
* have really been written to disk
*/
public CommitStatus add(List<Chunk> chunks) throws WriterException {
Tracer t = Tracer.startNewTracer("honu.server." + group + ".addToList");
long now = System.currentTimeMillis();
if (chunks != null) {
try {
chunksWrittenThisRotate = true;
ChukwaArchiveKey archiveKey = new ChukwaArchiveKey();
for (Chunk chunk : chunks) {
archiveKey.setTimePartition(timePeriod);
archiveKey.setDataType(chunk.getDataType());
archiveKey.setStreamName(chunk.getTags() + "/" + chunk.getSource() + "/" + chunk.getStreamName());
archiveKey.setSeqId(chunk.getSeqID());
if (chunk != null) {
seqFileWriter.append(archiveKey, chunk);
// compute size for stats
dataSize += chunk.getData().length;
}
}
long end = System.currentTimeMillis();
if (log.isDebugEnabled()) {
log.debug(group + "- duration=" + (end - now) + " size=" + chunks.size());
}
} catch (IOException e) {
if (t != null) {
t.stopAndLogTracer();
}
writeChunkRetries--;
log.error(group + "- Could not save the chunk. ", e);
if (writeChunkRetries < 0) {
log.fatal(group + "- Too many IOException when trying to write a chunk, Collector is going to exit!");
DaemonWatcher.bailout(-1);
}
throw new WriterException(e);
}
}
if (t != null) {
t.stopAndLogTracer();
}
return COMMIT_OK;
}
use of org.apache.hadoop.chukwa.datacollection.writer.WriterException in project Honu by jboulon.
the class LockFreeWriter method init.
@SuppressWarnings("unchecked")
public void init(Configuration conf) throws WriterException {
this.conf = conf;
// Force GMT
day.setTimeZone(TimeZone.getTimeZone("GMT"));
try {
fs = FileSystem.getLocal(conf);
localOutputDir = conf.get("honu.collector." + group + ".localOutputDir", "/honu/datasink/");
if (!localOutputDir.endsWith("/")) {
localOutputDir += "/";
}
Path pLocalOutputDir = new Path(localOutputDir);
if (!fs.exists(pLocalOutputDir)) {
boolean exist = fs.mkdirs(pLocalOutputDir);
if (!exist) {
throw new WriterException("Cannot create local dataSink dir: " + localOutputDir);
}
} else {
FileStatus fsLocalOutputDir = fs.getFileStatus(pLocalOutputDir);
if (!fsLocalOutputDir.isDir()) {
throw new WriterException("local dataSink dir is not a directory: " + localOutputDir);
}
}
} catch (Throwable e) {
log.fatal("Cannot initialize LocalWriter", e);
DaemonWatcher.bailout(-1);
}
String codecClass = null;
try {
codecClass = conf.get("honu.collector." + group + ".datasink.codec");
if (codecClass != null) {
Class classDefinition = Class.forName(codecClass);
codec = (CompressionCodec) ReflectionUtils.newInstance(classDefinition, conf);
log.info(group + "- Codec:" + codec.getDefaultExtension());
}
} catch (Exception e) {
log.fatal(group + "- Compression codec for " + codecClass + " was not found.", e);
DaemonWatcher.bailout(-1);
}
minPercentFreeDisk = conf.getInt("honu.collector." + group + ".minPercentFreeDisk", 20);
rotateInterval = conf.getInt("honu.collector." + group + ".rotateInterval", // defaults to 5 minutes
1000 * 60 * 5);
initWriteChunkRetries = conf.getInt("honu.collector." + group + ".writeChunkRetries", 10);
writeChunkRetries = initWriteChunkRetries;
log.info(group + "- rotateInterval is " + rotateInterval);
log.info(group + "- outputDir is " + localOutputDir);
log.info(group + "- localFileSystem is " + fs.getUri().toString());
log.info(group + "- minPercentFreeDisk is " + minPercentFreeDisk);
statTimer = new Timer();
statTimer.schedule(new StatReportingTask(), 1000, STAT_INTERVAL_SECONDS * 1000);
fileQueue = new LinkedBlockingQueue<String>();
localToRemoteHdfsMover = new LocalToRemoteHdfsMover(group, fileQueue, conf);
this.start();
}
Aggregations