use of org.apache.atlas.model.audit.EntityAuditEventV2 in project atlas by apache.
the class EntityAuditListenerV2 method onClassificationsUpdated.
@Override
public void onClassificationsUpdated(AtlasEntity entity, List<AtlasClassification> classifications) throws AtlasBaseException {
if (CollectionUtils.isNotEmpty(classifications)) {
List<EntityAuditEventV2> events = new ArrayList<>();
for (AtlasClassification classification : classifications) {
events.add(createEvent(entity, CLASSIFICATION_UPDATE, "Updated classification: " + AtlasType.toJson(classification)));
}
auditRepository.putEventsV2(events);
}
}
use of org.apache.atlas.model.audit.EntityAuditEventV2 in project atlas by apache.
the class HBaseBasedAuditRepository method listEventsV2.
@Override
public List<EntityAuditEventV2> listEventsV2(String entityId, String startKey, short n) throws AtlasBaseException {
if (LOG.isDebugEnabled()) {
LOG.debug("Listing events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, n);
}
Table table = null;
ResultScanner scanner = null;
try {
table = connection.getTable(tableName);
/**
* Scan Details:
* In hbase, the events are stored in increasing order of timestamp. So, doing reverse scan to get the latest event first
* Page filter is set to limit the number of results returned.
* Stop row is set to the entity id to avoid going past the current entity while scanning
* small is set to true to optimise RPC calls as the scanner is created per request
*/
Scan scan = new Scan().setReversed(true).setFilter(new PageFilter(n)).setStopRow(Bytes.toBytes(entityId)).setCaching(n).setSmall(true);
if (StringUtils.isEmpty(startKey)) {
// Set start row to entity id + max long value
byte[] entityBytes = getKey(entityId, Long.MAX_VALUE);
scan = scan.setStartRow(entityBytes);
} else {
scan = scan.setStartRow(Bytes.toBytes(startKey));
}
scanner = table.getScanner(scan);
List<EntityAuditEventV2> events = new ArrayList<>();
Result result;
// So, adding extra check on n here
while ((result = scanner.next()) != null && events.size() < n) {
EntityAuditEventV2 event = fromKeyV2(result.getRow());
// In case the user sets random start key, guarding against random events
if (!event.getEntityId().equals(entityId)) {
continue;
}
event.setUser(getResultString(result, COLUMN_USER));
event.setAction(EntityAuditAction.fromString(getResultString(result, COLUMN_ACTION)));
event.setDetails(getResultString(result, COLUMN_DETAIL));
if (persistEntityDefinition) {
String colDef = getResultString(result, COLUMN_DEFINITION);
if (colDef != null) {
event.setEntityDefinition(colDef);
}
}
events.add(event);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Got events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, events.size());
}
return events;
} catch (IOException e) {
throw new AtlasBaseException(e);
} finally {
try {
close(scanner);
close(table);
} catch (AtlasException e) {
throw new AtlasBaseException(e);
}
}
}
use of org.apache.atlas.model.audit.EntityAuditEventV2 in project atlas by apache.
the class InMemoryEntityAuditRepository method listEventsV2.
@Override
public List<EntityAuditEventV2> listEventsV2(String entityId, String startKey, short maxResults) {
List<EntityAuditEventV2> events = new ArrayList<>();
String myStartKey = startKey;
if (myStartKey == null) {
myStartKey = entityId;
}
SortedMap<String, EntityAuditEventV2> subMap = auditEventsV2.tailMap(myStartKey);
for (EntityAuditEventV2 event : subMap.values()) {
if (events.size() < maxResults && event.getEntityId().equals(entityId)) {
events.add(event);
}
}
return events;
}
Aggregations