use of org.apache.metron.integration.ProcessorResult in project metron by apache.
the class ElasticsearchIndexingIntegrationTest method getProcessor.
@Override
public Processor<List<Map<String, Object>>> getProcessor(final List<byte[]> inputMessages) {
return new Processor<List<Map<String, Object>>>() {
List<Map<String, Object>> docs = null;
List<byte[]> errors = null;
final AtomicInteger missCount = new AtomicInteger(0);
@Override
public ReadinessState process(ComponentRunner runner) {
ElasticSearchComponent elasticSearchComponent = runner.getComponent("search", ElasticSearchComponent.class);
KafkaComponent kafkaComponent = runner.getComponent("kafka", KafkaComponent.class);
if (elasticSearchComponent.hasIndex(index)) {
try {
docs = elasticSearchComponent.getAllIndexedDocs(index, testSensorType + "_doc");
} catch (IOException e) {
throw new IllegalStateException("Unable to retrieve indexed documents.", e);
}
if (docs.size() < inputMessages.size()) {
errors = kafkaComponent.readMessages(ERROR_TOPIC);
if (errors.size() > 0 && errors.size() + docs.size() == inputMessages.size()) {
return ReadinessState.READY;
}
return ReadinessState.NOT_READY;
} else {
return ReadinessState.READY;
}
} else {
return ReadinessState.NOT_READY;
}
}
@Override
public ProcessorResult<List<Map<String, Object>>> getResult() {
ProcessorResult.Builder<List<Map<String, Object>>> builder = new ProcessorResult.Builder();
return builder.withResult(docs).withProcessErrors(errors).build();
}
};
}
use of org.apache.metron.integration.ProcessorResult in project metron by apache.
the class PcapTopologyIntegrationTest method testTopology.
public void testTopology(Function<Properties, Void> updatePropertiesCallback, SendEntries sendPcapEntriesCallback, boolean withHeaders) throws Exception {
if (!new File(topologiesDir).exists()) {
topologiesDir = UnitTestHelper.findDir("topologies");
}
targetDir = UnitTestHelper.findDir("target");
final File outDir = getOutDir(targetDir);
final File queryDir = getQueryDir(targetDir);
clearOutDir(outDir);
clearOutDir(queryDir);
File baseDir = new File(new File(targetDir), BASE_DIR);
// Assert.assertEquals(0, numFiles(outDir));
Assert.assertNotNull(topologiesDir);
Assert.assertNotNull(targetDir);
Path pcapFile = new Path("../metron-integration-test/src/main/sample/data/SampleInput/PCAPExampleOutput");
final List<Map.Entry<byte[], byte[]>> pcapEntries = Lists.newArrayList(readPcaps(pcapFile, withHeaders));
Assert.assertTrue(Iterables.size(pcapEntries) > 0);
final Properties topologyProperties = new Properties() {
{
setProperty("topology.workers", "1");
setProperty("topology.worker.childopts", "");
setProperty("spout.kafka.topic.pcap", KAFKA_TOPIC);
setProperty("kafka.pcap.start", "EARLIEST");
setProperty("kafka.pcap.out", outDir.getAbsolutePath());
setProperty("kafka.pcap.numPackets", "2");
setProperty("kafka.pcap.maxTimeMS", "200000000");
setProperty("kafka.pcap.ts_granularity", "NANOSECONDS");
setProperty("kafka.spout.parallelism", "1");
setProperty("topology.auto-credentials", "[]");
setProperty("kafka.security.protocol", "PLAINTEXT");
setProperty("hdfs.sync.every", "1");
setProperty("hdfs.replication.factor", "-1");
}
};
updatePropertiesCallback.apply(topologyProperties);
final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties);
final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, Collections.singletonList(new KafkaComponent.Topic(KAFKA_TOPIC, 1)));
final MRComponent mr = new MRComponent().withBasePath(baseDir.getAbsolutePath());
FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder().withTopologyLocation(new File(topologiesDir + "/pcap/remote.yaml")).withTopologyName("pcap").withTopologyProperties(topologyProperties).build();
// UnitTestHelper.verboseLogging();
ComponentRunner runner = new ComponentRunner.Builder().withComponent("mr", mr).withComponent("zk", zkServerComponent).withComponent("kafka", kafkaComponent).withComponent("storm", fluxComponent).withMaxTimeMS(-1).withMillisecondsBetweenAttempts(2000).withNumRetries(10).withCustomShutdownOrder(new String[] { "storm", "kafka", "zk", "mr" }).build();
try {
runner.start();
fluxComponent.submitTopology();
sendPcapEntriesCallback.send(kafkaComponent, pcapEntries);
runner.process(new Processor<Void>() {
@Override
public ReadinessState process(ComponentRunner runner) {
int numFiles = numFiles(outDir, mr.getConfiguration());
int expectedNumFiles = pcapEntries.size() / 2;
if (numFiles == expectedNumFiles) {
return ReadinessState.READY;
} else {
return ReadinessState.NOT_READY;
}
}
@Override
public ProcessorResult<Void> getResult() {
return null;
}
});
PcapJob job = new PcapJob();
{
// Ensure that only two pcaps are returned when we look at 4 and 5
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(4, pcapEntries), getTimestamp(5, pcapEntries), 10, new HashMap<>(), new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 2);
}
{
// Ensure that only two pcaps are returned when we look at 4 and 5
// test with empty query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(4, pcapEntries), getTimestamp(5, pcapEntries), 10, "", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 2);
}
{
// ensure that none get returned since that destination IP address isn't in the dataset
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, new HashMap<String, String>() {
{
put(Constants.Fields.DST_ADDR.getName(), "207.28.210.1");
}
}, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 0);
}
{
// ensure that none get returned since that destination IP address isn't in the dataset
// test with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, "ip_dst_addr == '207.28.210.1'", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 0);
}
{
// same with protocol as before with the destination addr
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, new HashMap<String, String>() {
{
put(Constants.Fields.PROTOCOL.getName(), "foo");
}
}, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 0);
}
{
// same with protocol as before with the destination addr
// test with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, "protocol == 'foo'", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), 0);
}
{
// make sure I get them all.
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, new HashMap<>(), new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), pcapEntries.size());
}
{
// make sure I get them all.
// with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(Iterables.size(results), pcapEntries.size());
}
{
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, new HashMap<String, String>() {
{
put(Constants.Fields.DST_PORT.getName(), "22");
}
}, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
assertInOrder(results);
Assert.assertTrue(Iterables.size(results) > 0);
Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {
@Override
public boolean apply(@Nullable JSONObject input) {
Object prt = input.get(Constants.Fields.DST_PORT.getName());
return prt != null && prt.toString().equals("22");
}
}, withHeaders)));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
Assert.assertTrue(baos.toByteArray().length > 0);
}
{
// test with query filter and byte array matching
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "BYTEARRAY_MATCHER('2f56abd814bc56420489ca38e7faf8cec3d4', packet)", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertEquals(1, Iterables.size(results));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
Assert.assertTrue(baos.toByteArray().length > 0);
}
{
// test with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port == 22", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertTrue(Iterables.size(results) > 0);
Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {
@Override
public boolean apply(@Nullable JSONObject input) {
Object prt = input.get(Constants.Fields.DST_PORT.getName());
return prt != null && (Long) prt == 22;
}
}, withHeaders)));
assertInOrder(results);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
Assert.assertTrue(baos.toByteArray().length > 0);
}
{
// test with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port > 20 and ip_dst_port < 55792", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertTrue(Iterables.size(results) > 0);
Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {
@Override
public boolean apply(@Nullable JSONObject input) {
Object prt = input.get(Constants.Fields.DST_PORT.getName());
return prt != null && ((Long) prt > 20 && (Long) prt < 55792);
}
}, withHeaders)));
assertInOrder(results);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
Assert.assertTrue(baos.toByteArray().length > 0);
}
{
// test with query filter
Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port > 55790", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
assertInOrder(results);
Assert.assertTrue(Iterables.size(results) > 0);
Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {
@Override
public boolean apply(@Nullable JSONObject input) {
Object prt = input.get(Constants.Fields.DST_PORT.getName());
return prt != null && (Long) prt > 55790;
}
}, withHeaders)));
assertInOrder(results);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
Assert.assertTrue(baos.toByteArray().length > 0);
}
System.out.println("Ended");
} finally {
runner.stop();
clearOutDir(outDir);
clearOutDir(queryDir);
}
}
use of org.apache.metron.integration.ProcessorResult in project metron by apache.
the class SolrIndexingIntegrationTest method getProcessor.
@Override
public Processor<List<Map<String, Object>>> getProcessor(final List<byte[]> inputMessages) {
return new Processor<List<Map<String, Object>>>() {
List<Map<String, Object>> docs = null;
List<byte[]> errors = null;
@Override
public ReadinessState process(ComponentRunner runner) {
SolrComponent solrComponent = runner.getComponent("search", SolrComponent.class);
KafkaComponent kafkaComponent = runner.getComponent("kafka", KafkaComponent.class);
if (solrComponent.hasCollection(collection)) {
docs = solrComponent.getAllIndexedDocs(collection);
if (docs.size() < inputMessages.size()) {
errors = kafkaComponent.readMessages(ERROR_TOPIC);
if (errors.size() > 0 && errors.size() + docs.size() == inputMessages.size()) {
return ReadinessState.READY;
}
return ReadinessState.NOT_READY;
} else {
return ReadinessState.READY;
}
} else {
return ReadinessState.NOT_READY;
}
}
@Override
public ProcessorResult<List<Map<String, Object>>> getResult() {
ProcessorResult.Builder<List<Map<String, Object>>> builder = new ProcessorResult.Builder();
return builder.withResult(docs).withProcessErrors(errors).build();
}
};
}
Aggregations