use of org.apache.ranger.audit.model.AuditEventBase in project ranger by apache.
the class AuditAsyncQueue method runLogAudit.
public void runLogAudit() {
while (true) {
try {
AuditEventBase event = null;
if (!isDrain()) {
// For Transfer queue take() is blocking
event = queue.take();
} else {
// For Transfer queue poll() is non blocking
event = queue.poll();
}
if (event != null) {
Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
eventList.add(event);
queue.drainTo(eventList, MAX_DRAIN - 1);
consumer.log(eventList);
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
} catch (Throwable t) {
logger.error("Caught error during processing request.", t);
}
if (isDrain()) {
if (queue.isEmpty()) {
break;
}
if (isDrainMaxTimeElapsed()) {
logger.warn("Exiting polling loop because max time allowed reached. name=" + getName() + ", waited for " + (stopTime - System.currentTimeMillis()) + " ms");
}
}
}
logger.info("Exiting polling loop. name=" + getName());
try {
// Call stop on the consumer
logger.info("Calling to stop consumer. name=" + getName() + ", consumer.name=" + consumer.getName());
// Call stop on the consumer
consumer.stop();
} catch (Throwable t) {
logger.error("Error while calling stop on consumer.", t);
}
logger.info("Exiting consumerThread.run() method. name=" + getName());
}
use of org.apache.ranger.audit.model.AuditEventBase in project ranger by apache.
the class AuditBatchQueue method runLogAudit.
public void runLogAudit() {
long lastDispatchTime = System.currentTimeMillis();
boolean isDestActive = true;
while (true) {
logStatusIfRequired();
// Time to next dispatch
long nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval();
boolean isToSpool = false;
boolean fileSpoolDrain = false;
try {
if (fileSpoolerEnabled && fileSpooler.isPending()) {
int percentUsed = queue.size() * 100 / getMaxQueueSize();
long lastAttemptDelta = fileSpooler.getLastAttemptTimeDelta();
fileSpoolDrain = lastAttemptDelta > fileSpoolMaxWaitTime;
// If we should even read from queue?
if (!isDrain() && !fileSpoolDrain && percentUsed < fileSpoolDrainThresholdPercent) {
// not in drain mode, lets wait and retry
if (nextDispatchDuration > 0) {
Thread.sleep(nextDispatchDuration);
lastDispatchTime = System.currentTimeMillis();
}
continue;
}
isToSpool = true;
}
AuditEventBase event = null;
if (!isToSpool && !isDrain() && !fileSpoolDrain && nextDispatchDuration > 0) {
event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS);
} else {
// For poll() is non blocking
event = queue.poll();
}
if (event != null) {
localBatchBuffer.add(event);
if (getMaxBatchSize() >= localBatchBuffer.size()) {
queue.drainTo(localBatchBuffer, getMaxBatchSize() - localBatchBuffer.size());
}
} else {
// poll returned due to timeout, so reseting clock
nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + getMaxBatchInterval();
lastDispatchTime = System.currentTimeMillis();
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
setDrain(true);
} catch (Throwable t) {
logger.error("Caught error during processing request.", t);
}
addTotalCount(localBatchBuffer.size());
if (localBatchBuffer.size() > 0 && isToSpool) {
// Let spool to the file directly
if (isDestActive) {
logger.info("Switching to file spool. Queue=" + getName() + ", dest=" + consumer.getName());
}
isDestActive = false;
// Just before stashing
lastDispatchTime = System.currentTimeMillis();
fileSpooler.stashLogs(localBatchBuffer);
addStashedCount(localBatchBuffer.size());
localBatchBuffer.clear();
} else if (localBatchBuffer.size() > 0 && (isDrain() || localBatchBuffer.size() >= getMaxBatchSize() || nextDispatchDuration <= 0)) {
if (fileSpoolerEnabled && !isDestActive) {
logger.info("Switching to writing to destination. Queue=" + getName() + ", dest=" + consumer.getName());
}
// Reset time just before sending the logs
lastDispatchTime = System.currentTimeMillis();
boolean ret = consumer.log(localBatchBuffer);
if (!ret) {
if (fileSpoolerEnabled) {
logger.info("Switching to file spool. Queue=" + getName() + ", dest=" + consumer.getName());
// Transient error. Stash and move on
fileSpooler.stashLogs(localBatchBuffer);
isDestActive = false;
addStashedCount(localBatchBuffer.size());
} else {
// We need to drop this event
addFailedCount(localBatchBuffer.size());
logFailedEvent(localBatchBuffer);
}
} else {
isDestActive = true;
addSuccessCount(localBatchBuffer.size());
}
localBatchBuffer.clear();
}
if (isDrain()) {
if (!queue.isEmpty() || localBatchBuffer.size() > 0) {
logger.info("Queue is not empty. Will retry. queue.size)=" + queue.size() + ", localBatchBuffer.size()=" + localBatchBuffer.size());
} else {
break;
}
if (isDrainMaxTimeElapsed()) {
logger.warn("Exiting polling loop because max time allowed reached. name=" + getName() + ", waited for " + (stopTime - System.currentTimeMillis()) + " ms");
}
}
}
logger.info("Exiting consumerThread. Queue=" + getName() + ", dest=" + consumer.getName());
try {
// Call stop on the consumer
logger.info("Calling to stop consumer. name=" + getName() + ", consumer.name=" + consumer.getName());
consumer.stop();
if (fileSpoolerEnabled) {
fileSpooler.stop();
}
} catch (Throwable t) {
logger.error("Error while calling stop on consumer.", t);
}
logStatus();
logger.info("Exiting consumerThread.run() method. name=" + getName());
}
use of org.apache.ranger.audit.model.AuditEventBase in project ranger by apache.
the class AuditFileCacheProviderSpool method runLogAudit.
public void runLogAudit() {
// boolean isResumed = false;
while (true) {
try {
if (isDestDown) {
logger.info("Destination is down. sleeping for " + retryDestinationMS + " milli seconds. indexQueue=" + indexQueue.size() + ", queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName());
Thread.sleep(retryDestinationMS);
}
// Let's pause between each iteration
if (currentConsumerIndexRecord == null) {
currentConsumerIndexRecord = indexQueue.poll(retryDestinationMS, TimeUnit.MILLISECONDS);
} else {
Thread.sleep(retryDestinationMS);
}
if (isDrain) {
// Need to exit
break;
}
if (currentConsumerIndexRecord == null) {
closeFileIfNeeded();
continue;
}
boolean isRemoveIndex = false;
File consumerFile = new File(currentConsumerIndexRecord.filePath);
if (!consumerFile.exists()) {
logger.error("Consumer file=" + consumerFile.getPath() + " not found.");
printIndex();
isRemoveIndex = true;
} else {
// Let's open the file to write
BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(currentConsumerIndexRecord.filePath), "UTF-8"));
try {
int startLine = currentConsumerIndexRecord.linePosition;
String line;
int currLine = 0;
List<AuditEventBase> events = new ArrayList<>();
while ((line = br.readLine()) != null) {
currLine++;
if (currLine < startLine) {
continue;
}
AuditEventBase event = MiscUtil.fromJson(line, AuthzAuditEvent.class);
events.add(event);
if (events.size() == AUDIT_BATCH_SIZE_DEFAULT) {
boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine);
if (!ret) {
throw new Exception("Destination down");
}
events.clear();
}
}
if (events.size() > 0) {
boolean ret = sendEvent(events, currentConsumerIndexRecord, currLine);
if (!ret) {
throw new Exception("Destination down");
}
events.clear();
}
logger.info("Done reading file. file=" + currentConsumerIndexRecord.filePath + ", queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName());
// The entire file is read
currentConsumerIndexRecord.status = SPOOL_FILE_STATUS.done;
currentConsumerIndexRecord.doneCompleteTime = new Date();
currentConsumerIndexRecord.lastAttempt = true;
isRemoveIndex = true;
} catch (Exception ex) {
isDestDown = true;
logError("Destination down. queueName=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName());
lastAttemptTime = System.currentTimeMillis();
// Update the index file
currentConsumerIndexRecord.lastFailedTime = new Date();
currentConsumerIndexRecord.failedAttemptCount++;
currentConsumerIndexRecord.lastAttempt = false;
saveIndexFile();
} finally {
br.close();
}
}
if (isRemoveIndex) {
// Remove this entry from index
removeIndexRecord(currentConsumerIndexRecord);
currentConsumerIndexRecord = null;
closeFileIfNeeded();
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
} catch (Throwable t) {
logger.error("Exception in destination writing thread.", t);
}
}
logger.info("Exiting file spooler. provider=" + FILE_CACHE_PROVIDER_NAME + ", consumer=" + consumerProvider.getName());
}
use of org.apache.ranger.audit.model.AuditEventBase in project ranger by apache.
the class AuditSummaryQueue method runLogAudit.
public void runLogAudit() {
long lastDispatchTime = System.currentTimeMillis();
while (true) {
// Time to next dispatch
long nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + maxSummaryIntervalMs;
Collection<AuditEventBase> eventList = new ArrayList<AuditEventBase>();
try {
AuditEventBase event = null;
if (!isDrain() && nextDispatchDuration > 0) {
event = queue.poll(nextDispatchDuration, TimeUnit.MILLISECONDS);
} else {
// For poll() is non blocking
event = queue.poll();
}
if (event != null) {
eventList.add(event);
queue.drainTo(eventList, MAX_DRAIN - 1);
} else {
// poll returned due to timeout, so reseting clock
nextDispatchDuration = lastDispatchTime - System.currentTimeMillis() + maxSummaryIntervalMs;
lastDispatchTime = System.currentTimeMillis();
}
} catch (InterruptedException e) {
logger.info("Caught exception in consumer thread. Shutdown might be in progress");
} catch (Throwable t) {
logger.error("Caught error during processing request.", t);
}
for (AuditEventBase event : eventList) {
// Add to hash map
String key = event.getEventKey();
AuditSummary auditSummary = summaryMap.get(key);
if (auditSummary == null) {
auditSummary = new AuditSummary();
auditSummary.event = event;
auditSummary.startTime = event.getEventTime();
auditSummary.endTime = event.getEventTime();
auditSummary.count = 1;
summaryMap.put(key, auditSummary);
} else {
auditSummary.endTime = event.getEventTime();
auditSummary.count++;
}
}
if (isDrain() || nextDispatchDuration <= 0) {
// Reset time just before sending the logs
lastDispatchTime = System.currentTimeMillis();
for (Map.Entry<String, AuditSummary> entry : summaryMap.entrySet()) {
AuditSummary auditSummary = entry.getValue();
auditSummary.event.setEventCount(auditSummary.count);
long timeDiff = auditSummary.endTime.getTime() - auditSummary.startTime.getTime();
timeDiff = timeDiff > 0 ? timeDiff : 1;
auditSummary.event.setEventDurationMS(timeDiff);
boolean ret = consumer.log(auditSummary.event);
if (!ret) {
// We need to drop this event
logFailedEvent(auditSummary.event);
}
}
summaryMap.clear();
}
if (isDrain()) {
if (summaryMap.isEmpty() && queue.isEmpty()) {
break;
}
if (isDrainMaxTimeElapsed()) {
logger.warn("Exiting polling loop because max time allowed reached. name=" + getName() + ", waited for " + (stopTime - System.currentTimeMillis()) + " ms");
}
}
}
logger.info("Exiting polling loop. name=" + getName());
try {
// Call stop on the consumer
logger.info("Calling to stop consumer. name=" + getName() + ", consumer.name=" + consumer.getName());
consumer.stop();
} catch (Throwable t) {
logger.error("Error while calling stop on consumer.", t);
}
logger.info("Exiting consumerThread.run() method. name=" + getName());
}
use of org.apache.ranger.audit.model.AuditEventBase in project ranger by apache.
the class TestEvents method main.
public static void main(String[] args) {
DOMConfigurator.configure("log4j.xml");
LOG.info("==> TestEvents.main()");
try {
Properties auditProperties = new Properties();
String AUDIT_PROPERTIES_FILE = "xasecure-audit.properties";
File propFile = new File(AUDIT_PROPERTIES_FILE);
if (propFile.exists()) {
LOG.info("Loading Audit properties file" + AUDIT_PROPERTIES_FILE);
auditProperties.load(new FileInputStream(propFile));
} else {
LOG.info("Audit properties file missing: " + AUDIT_PROPERTIES_FILE);
auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.url", "jdbc:mysql://localhost:3306/xa_db");
auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.user", "xaaudit");
auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.password", "xaaudit");
auditProperties.setProperty("xasecure.audit.jpa.javax.persistence.jdbc.driver", "com.mysql.jdbc.Driver");
auditProperties.setProperty("xasecure.audit.is.enabled", "true");
auditProperties.setProperty("xasecure.audit.log4j.is.enabled", "false");
auditProperties.setProperty("xasecure.audit.log4j.is.async", "false");
auditProperties.setProperty("xasecure.audit.log4j.async.max.queue.size", "100000");
auditProperties.setProperty("xasecure.audit.log4j.async.max.flush.interval.ms", "30000");
auditProperties.setProperty("xasecure.audit.db.is.enabled", "false");
auditProperties.setProperty("xasecure.audit.db.is.async", "true");
auditProperties.setProperty("xasecure.audit.db.async.max.queue.size", "100000");
auditProperties.setProperty("xasecure.audit.db.async.max.flush.interval.ms", "30000");
auditProperties.setProperty("xasecure.audit.db.batch.size", "100");
}
AuditProviderFactory.getInstance().init(auditProperties, "hdfs");
AuditHandler provider = AuditProviderFactory.getAuditProvider();
LOG.info("provider=" + provider.toString());
String strEventCount = args.length > 0 ? args[0] : auditProperties.getProperty("xasecure.audit.test.event.count");
String strEventPauseTimeInMs = args.length > 1 ? args[1] : auditProperties.getProperty("xasecure.audit.test.event.pause.time.ms");
String strSleepTimeBeforeExit = args.length > 2 ? args[2] : auditProperties.getProperty("xasecure.audit.test.sleep.time.before.exit.seconds");
int eventCount = (strEventCount == null) ? 1024 : Integer.parseInt(strEventCount);
int eventPauseTime = (strEventPauseTimeInMs == null) ? 0 : Integer.parseInt(strEventPauseTimeInMs);
int sleepTimeBeforeExit = ((strSleepTimeBeforeExit == null) ? 0 : Integer.parseInt(strSleepTimeBeforeExit)) * 1000;
for (int i = 0; i < eventCount; i++) {
AuditEventBase event = getTestEvent(i);
LOG.info("==> TestEvents.main(" + (i + 1) + "): adding " + event.getClass().getName());
provider.log(event);
if (eventPauseTime > 0) {
Thread.sleep(eventPauseTime);
}
}
provider.waitToComplete();
// So, at this point it is possible that few local log files haven't made to HDFS.
if (sleepTimeBeforeExit > 0) {
LOG.info("waiting for " + sleepTimeBeforeExit + "ms before exiting..");
try {
Thread.sleep(sleepTimeBeforeExit);
} catch (Exception excp) {
LOG.info("error while waiting before exiting..");
}
}
provider.stop();
} catch (Exception excp) {
LOG.info(excp.getLocalizedMessage());
excp.printStackTrace();
}
LOG.info("<== TestEvents.main()");
}
Aggregations