use of co.cask.cdap.filetailer.state.FileTailerState in project cdap-ingest by caskdata.
the class FileTailerSinkTest method basicTestWithCustomPackSizeMultipleWriters.
@Test
public void basicTestWithCustomPackSizeMultipleWriters() throws Exception {
FileTailerStateProcessor stateProcessor = Mockito.mock(FileTailerStateProcessor.class);
FileTailerMetricsProcessor metricsProcessor = Mockito.mock(FileTailerMetricsProcessor.class);
FileTailerQueue queue = new FileTailerQueue(DEFAULT_QUEUE_SIZE);
final AtomicInteger count = new AtomicInteger(0);
StreamWriter writers = getDummyConcurrentWriter(count);
boolean success = false;
FileTailerSink sink = new FileTailerSink(queue, writers, SinkStrategy.LOADBALANCE, stateProcessor, metricsProcessor, null, CUSTOM_PACK_SIZE);
try {
sink.startAsync();
for (int i = 0; i < TEST_EVENTS_SIZE; i++) {
queue.put(new FileTailerEvent(new FileTailerState("file", 0L, 42, 0L), "test", Charset.defaultCharset()));
}
int attempts = 10;
while (attempts > 0) {
attempts--;
if (count.get() == TEST_EVENTS_SIZE) {
success = true;
break;
}
Thread.sleep(1000);
}
} finally {
sink.stopAsync();
}
org.junit.Assert.assertTrue(success);
}
use of co.cask.cdap.filetailer.state.FileTailerState in project cdap-ingest by caskdata.
the class FileTailerSinkTest method basicTestWithDefaultPackSize.
@Test
public void basicTestWithDefaultPackSize() throws Exception {
FileTailerStateProcessor stateProcessor = Mockito.mock(FileTailerStateProcessor.class);
FileTailerMetricsProcessor metricsProcessor = Mockito.mock(FileTailerMetricsProcessor.class);
FileTailerQueue queue = new FileTailerQueue(DEFAULT_QUEUE_SIZE);
StreamWriter writerMock = getDummyStreamWriter();
FileTailerSink sink = new FileTailerSink(queue, writerMock, SinkStrategy.LOADBALANCE, stateProcessor, metricsProcessor, null);
sink.startAsync();
for (int i = 0; i < TEST_EVENTS_SIZE; i++) {
queue.put(new FileTailerEvent(new FileTailerState("file", 0L, 42, 0L), "test", Charset.defaultCharset()));
}
Mockito.verify(writerMock, Mockito.timeout(10000).times(TEST_EVENTS_SIZE)).write("test", Charset.defaultCharset());
sink.stopAsync();
}
use of co.cask.cdap.filetailer.state.FileTailerState in project cdap-ingest by caskdata.
the class FileTailerSinkTest method basicTestWithCustomPackSize.
@Test
public void basicTestWithCustomPackSize() throws Exception {
FileTailerStateProcessor stateProcessor = Mockito.mock(FileTailerStateProcessor.class);
FileTailerMetricsProcessor metricsProcessor = Mockito.mock(FileTailerMetricsProcessor.class);
FileTailerQueue queue = new FileTailerQueue(DEFAULT_QUEUE_SIZE);
StreamWriter writerMock = getDummyStreamWriter();
FileTailerSink sink = new FileTailerSink(queue, writerMock, SinkStrategy.LOADBALANCE, stateProcessor, metricsProcessor, null, CUSTOM_PACK_SIZE);
try {
sink.startAsync();
for (int i = 0; i < TEST_EVENTS_SIZE; i++) {
queue.put(new FileTailerEvent(new FileTailerState("file", 0L, 42, 0L), "test", Charset.defaultCharset()));
}
Mockito.verify(writerMock, Mockito.timeout(10000).times(TEST_EVENTS_SIZE)).write("test", Charset.defaultCharset());
} finally {
sink.stopAsync();
}
}
use of co.cask.cdap.filetailer.state.FileTailerState in project cdap-ingest by caskdata.
the class LogTailer method tryReadFromFile.
/**
* Try read log file
*
* @param channel FileChannel steam
* @param separator log entry separator
* @param currentLogFile current log file
* @return last modified time of current log file
* @throws IOException in case could not read entry after failureRetryLimit attempts
* @throws InterruptedException in case thread was interrupted
*/
private long tryReadFromFile(FileChannel channel, char separator, File currentLogFile, long modifyTime) throws IOException, InterruptedException {
int retryNumber = 0;
long position = channel.position();
StringBuilder sb = new StringBuilder();
while (isRunning()) {
if (retryNumber > failureRetryLimit && failureRetryLimit > 0) {
LOG.error("fail to read line after {} attempts", retryNumber);
throw new IOException();
}
try {
readBuffer.clear();
decoded.clear();
int len = channel.read(readBuffer);
if (len >= 0) {
readBuffer.flip();
decoder.decode(readBuffer, decoded, false);
decoded.flip();
for (int i = 0; i < decoded.length(); i++) {
char ch = decoded.charAt(i);
if (ch != separator) {
sb.append(ch);
} else {
String line = sb.toString();
int lineHash = line.hashCode();
LOG.debug("From log file {} read entry: {}", currentLogFile, line);
modifyTime = currentLogFile.lastModified();
queue.put(new FileTailerEvent(new FileTailerState(currentLogFile.toString(), position, lineHash, modifyTime), line, charset));
metricsProcessor.onReadEventMetric(line.getBytes(charset).length);
position += line.getBytes(charset).length + separatorByteLength;
sb.setLength(0);
}
}
} else {
break;
}
} catch (IOException e) {
retryNumber++;
Thread.sleep(failureSleepInterval);
}
}
return modifyTime;
}
use of co.cask.cdap.filetailer.state.FileTailerState in project cdap-ingest by caskdata.
the class LogTailer method run.
/**
* Runs the log tailer thread.
*/
public void run() {
try {
checkLogDirExists(logDirectory);
} catch (LogDirNotFoundException e) {
LOG.error("Incorrect path to log directory; directory {} does not exist", logDirectory.getAbsolutePath());
return;
}
FileTailerState fileTailerState = getSaveStateFromFile();
try {
if (fileTailerState == null) {
LOG.info("File Tailer state was not found; start reading all logs from the directory from the beginning");
runWithOutRestore();
} else {
LOG.info("Start recover from state file");
runFromSaveState(fileTailerState);
}
} catch (InterruptedException e) {
LOG.info("Tailer daemon was interrupted");
}
LOG.info("Tailer daemon stopped");
}
Aggregations