Search in sources :

Example 1 with FluxTopologyComponent

use of org.apache.metron.integration.components.FluxTopologyComponent in project metron by apache.

the class PcapTopologyIntegrationTest method testTopology.

public void testTopology(Function<Properties, Void> updatePropertiesCallback, SendEntries sendPcapEntriesCallback, boolean withHeaders) throws Exception {
    if (!new File(topologiesDir).exists()) {
        topologiesDir = UnitTestHelper.findDir("topologies");
    }
    targetDir = UnitTestHelper.findDir("target");
    final File outDir = getOutDir(targetDir);
    final File queryDir = getQueryDir(targetDir);
    clearOutDir(outDir);
    clearOutDir(queryDir);
    File baseDir = new File(new File(targetDir), BASE_DIR);
    // Assert.assertEquals(0, numFiles(outDir));
    Assert.assertNotNull(topologiesDir);
    Assert.assertNotNull(targetDir);
    Path pcapFile = new Path("../metron-integration-test/src/main/sample/data/SampleInput/PCAPExampleOutput");
    final List<Map.Entry<byte[], byte[]>> pcapEntries = Lists.newArrayList(readPcaps(pcapFile, withHeaders));
    Assert.assertTrue(Iterables.size(pcapEntries) > 0);
    final Properties topologyProperties = new Properties() {

        {
            setProperty("topology.workers", "1");
            setProperty("topology.worker.childopts", "");
            setProperty("spout.kafka.topic.pcap", KAFKA_TOPIC);
            setProperty("kafka.pcap.start", "EARLIEST");
            setProperty("kafka.pcap.out", outDir.getAbsolutePath());
            setProperty("kafka.pcap.numPackets", "2");
            setProperty("kafka.pcap.maxTimeMS", "200000000");
            setProperty("kafka.pcap.ts_granularity", "NANOSECONDS");
            setProperty("kafka.spout.parallelism", "1");
            setProperty("topology.auto-credentials", "[]");
            setProperty("kafka.security.protocol", "PLAINTEXT");
            setProperty("hdfs.sync.every", "1");
            setProperty("hdfs.replication.factor", "-1");
        }
    };
    updatePropertiesCallback.apply(topologyProperties);
    final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties);
    final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, Collections.singletonList(new KafkaComponent.Topic(KAFKA_TOPIC, 1)));
    final MRComponent mr = new MRComponent().withBasePath(baseDir.getAbsolutePath());
    FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder().withTopologyLocation(new File(topologiesDir + "/pcap/remote.yaml")).withTopologyName("pcap").withTopologyProperties(topologyProperties).build();
    // UnitTestHelper.verboseLogging();
    ComponentRunner runner = new ComponentRunner.Builder().withComponent("mr", mr).withComponent("zk", zkServerComponent).withComponent("kafka", kafkaComponent).withComponent("storm", fluxComponent).withMaxTimeMS(-1).withMillisecondsBetweenAttempts(2000).withNumRetries(10).withCustomShutdownOrder(new String[] { "storm", "kafka", "zk", "mr" }).build();
    try {
        runner.start();
        fluxComponent.submitTopology();
        sendPcapEntriesCallback.send(kafkaComponent, pcapEntries);
        runner.process(new Processor<Void>() {

            @Override
            public ReadinessState process(ComponentRunner runner) {
                int numFiles = numFiles(outDir, mr.getConfiguration());
                int expectedNumFiles = pcapEntries.size() / 2;
                if (numFiles == expectedNumFiles) {
                    return ReadinessState.READY;
                } else {
                    return ReadinessState.NOT_READY;
                }
            }

            @Override
            public ProcessorResult<Void> getResult() {
                return null;
            }
        });
        PcapJob job = new PcapJob();
        {
            // Ensure that only two pcaps are returned when we look at 4 and 5
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(4, pcapEntries), getTimestamp(5, pcapEntries), 10, new HashMap<>(), new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 2);
        }
        {
            // Ensure that only two pcaps are returned when we look at 4 and 5
            // test with empty query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(4, pcapEntries), getTimestamp(5, pcapEntries), 10, "", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 2);
        }
        {
            // ensure that none get returned since that destination IP address isn't in the dataset
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, new HashMap<String, String>() {

                {
                    put(Constants.Fields.DST_ADDR.getName(), "207.28.210.1");
                }
            }, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 0);
        }
        {
            // ensure that none get returned since that destination IP address isn't in the dataset
            // test with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, "ip_dst_addr == '207.28.210.1'", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 0);
        }
        {
            // same with protocol as before with the destination addr
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, new HashMap<String, String>() {

                {
                    put(Constants.Fields.PROTOCOL.getName(), "foo");
                }
            }, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 0);
        }
        {
            // same with protocol as before with the destination addr
            // test with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(1, pcapEntries), 10, "protocol == 'foo'", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), 0);
        }
        {
            // make sure I get them all.
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, new HashMap<>(), new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), pcapEntries.size());
        }
        {
            // make sure I get them all.
            // with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(Iterables.size(results), pcapEntries.size());
        }
        {
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, new HashMap<String, String>() {

                {
                    put(Constants.Fields.DST_PORT.getName(), "22");
                }
            }, new Configuration(), FileSystem.get(new Configuration()), new FixedPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertTrue(Iterables.size(results) > 0);
            Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {

                @Override
                public boolean apply(@Nullable JSONObject input) {
                    Object prt = input.get(Constants.Fields.DST_PORT.getName());
                    return prt != null && prt.toString().equals("22");
                }
            }, withHeaders)));
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
            Assert.assertTrue(baos.toByteArray().length > 0);
        }
        {
            // test with query filter and byte array matching
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "BYTEARRAY_MATCHER('2f56abd814bc56420489ca38e7faf8cec3d4', packet)", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertEquals(1, Iterables.size(results));
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
            Assert.assertTrue(baos.toByteArray().length > 0);
        }
        {
            // test with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port == 22", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertTrue(Iterables.size(results) > 0);
            Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {

                @Override
                public boolean apply(@Nullable JSONObject input) {
                    Object prt = input.get(Constants.Fields.DST_PORT.getName());
                    return prt != null && (Long) prt == 22;
                }
            }, withHeaders)));
            assertInOrder(results);
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
            Assert.assertTrue(baos.toByteArray().length > 0);
        }
        {
            // test with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port > 20 and ip_dst_port < 55792", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertTrue(Iterables.size(results) > 0);
            Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {

                @Override
                public boolean apply(@Nullable JSONObject input) {
                    Object prt = input.get(Constants.Fields.DST_PORT.getName());
                    return prt != null && ((Long) prt > 20 && (Long) prt < 55792);
                }
            }, withHeaders)));
            assertInOrder(results);
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
            Assert.assertTrue(baos.toByteArray().length > 0);
        }
        {
            // test with query filter
            Iterable<byte[]> results = job.query(new Path(outDir.getAbsolutePath()), new Path(queryDir.getAbsolutePath()), getTimestamp(0, pcapEntries), getTimestamp(pcapEntries.size() - 1, pcapEntries) + 1, 10, "ip_dst_port > 55790", new Configuration(), FileSystem.get(new Configuration()), new QueryPcapFilter.Configurator());
            assertInOrder(results);
            Assert.assertTrue(Iterables.size(results) > 0);
            Assert.assertEquals(Iterables.size(results), Iterables.size(filterPcaps(pcapEntries, new Predicate<JSONObject>() {

                @Override
                public boolean apply(@Nullable JSONObject input) {
                    Object prt = input.get(Constants.Fields.DST_PORT.getName());
                    return prt != null && (Long) prt > 55790;
                }
            }, withHeaders)));
            assertInOrder(results);
            ByteArrayOutputStream baos = new ByteArrayOutputStream();
            PcapMerger.merge(baos, Iterables.partition(results, 1).iterator().next());
            Assert.assertTrue(baos.toByteArray().length > 0);
        }
        System.out.println("Ended");
    } finally {
        runner.stop();
        clearOutDir(outDir);
        clearOutDir(queryDir);
    }
}
Also used : KafkaComponent(org.apache.metron.integration.components.KafkaComponent) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ProcessorResult(org.apache.metron.integration.ProcessorResult) MRComponent(org.apache.metron.integration.components.MRComponent) ZKServerComponent(org.apache.metron.integration.components.ZKServerComponent) Properties(java.util.Properties) FluxTopologyComponent(org.apache.metron.integration.components.FluxTopologyComponent) Predicate(com.google.common.base.Predicate) ReadinessState(org.apache.metron.integration.ReadinessState) ComponentRunner(org.apache.metron.integration.ComponentRunner) Path(org.apache.hadoop.fs.Path) ByteArrayOutputStream(java.io.ByteArrayOutputStream) JSONObject(org.json.simple.JSONObject) PcapJob(org.apache.metron.pcap.mr.PcapJob) JSONObject(org.json.simple.JSONObject) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File) Nullable(javax.annotation.Nullable)

Example 2 with FluxTopologyComponent

use of org.apache.metron.integration.components.FluxTopologyComponent in project metron by apache.

the class EnrichmentIntegrationTest method test.

@Test
public void test() throws Exception {
    final String cf = "cf";
    final String trackerHBaseTableName = "tracker";
    final String threatIntelTableName = "threat_intel";
    final String enrichmentsTableName = "enrichments";
    final Properties topologyProperties = new Properties() {

        {
            setProperty("enrichment_workers", "1");
            setProperty("enrichment_acker_executors", "0");
            setProperty("enrichment_topology_worker_childopts", "");
            setProperty("topology_auto_credentials", "[]");
            setProperty("enrichment_topology_max_spout_pending", "");
            setProperty("enrichment_kafka_start", "UNCOMMITTED_EARLIEST");
            setProperty("kafka_security_protocol", "PLAINTEXT");
            setProperty("enrichment_input_topic", Constants.ENRICHMENT_TOPIC);
            setProperty("enrichment_output_topic", Constants.INDEXING_TOPIC);
            setProperty("enrichment_error_topic", ERROR_TOPIC);
            setProperty("threatintel_error_topic", ERROR_TOPIC);
            setProperty("enrichment_join_cache_size", "1000");
            setProperty("threatintel_join_cache_size", "1000");
            setProperty("enrichment_hbase_provider_impl", "" + MockHBaseTableProvider.class.getName());
            setProperty("enrichment_hbase_table", enrichmentsTableName);
            setProperty("enrichment_hbase_cf", cf);
            setProperty("enrichment_host_known_hosts", "[{\"ip\":\"10.1.128.236\", \"local\":\"YES\", \"type\":\"webserver\", \"asset_value\" : \"important\"}," + "{\"ip\":\"10.1.128.237\", \"local\":\"UNKNOWN\", \"type\":\"unknown\", \"asset_value\" : \"important\"}," + "{\"ip\":\"10.60.10.254\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}," + "{\"ip\":\"10.0.2.15\", \"local\":\"YES\", \"type\":\"printer\", \"asset_value\" : \"important\"}]");
            setProperty("threatintel_hbase_table", threatIntelTableName);
            setProperty("threatintel_hbase_cf", cf);
            setProperty("enrichment_kafka_spout_parallelism", "1");
            setProperty("enrichment_split_parallelism", "1");
            setProperty("enrichment_stellar_parallelism", "1");
            setProperty("enrichment_join_parallelism", "1");
            setProperty("threat_intel_split_parallelism", "1");
            setProperty("threat_intel_stellar_parallelism", "1");
            setProperty("threat_intel_join_parallelism", "1");
            setProperty("kafka_writer_parallelism", "1");
        }
    };
    final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties);
    final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {

        {
            add(new KafkaComponent.Topic(Constants.ENRICHMENT_TOPIC, 1));
            add(new KafkaComponent.Topic(Constants.INDEXING_TOPIC, 1));
            add(new KafkaComponent.Topic(ERROR_TOPIC, 1));
        }
    });
    String globalConfigStr = null;
    {
        File globalConfig = new File(new File(TestConstants.SAMPLE_CONFIG_PATH), "global.json");
        Map<String, Object> config = JSONUtils.INSTANCE.load(globalConfig, JSONUtils.MAP_SUPPLIER);
        config.put(SimpleHBaseEnrichmentFunctions.TABLE_PROVIDER_TYPE_CONF, MockHBaseTableProvider.class.getName());
        config.put(SimpleHBaseEnrichmentFunctions.ACCESS_TRACKER_TYPE_CONF, "PERSISTENT_BLOOM");
        config.put(PersistentBloomTrackerCreator.Config.PERSISTENT_BLOOM_TABLE, trackerHBaseTableName);
        config.put(PersistentBloomTrackerCreator.Config.PERSISTENT_BLOOM_CF, cf);
        config.put(GeoLiteDatabase.GEO_HDFS_FILE, geoHdfsFile.getAbsolutePath());
        globalConfigStr = JSONUtils.INSTANCE.toJSON(config, true);
    }
    ConfigUploadComponent configUploadComponent = new ConfigUploadComponent().withTopologyProperties(topologyProperties).withGlobalConfig(globalConfigStr).withEnrichmentConfigsPath(TestConstants.SAMPLE_CONFIG_PATH);
    // create MockHBaseTables
    final MockHTable trackerTable = (MockHTable) MockHBaseTableProvider.addToCache(trackerHBaseTableName, cf);
    final MockHTable threatIntelTable = (MockHTable) MockHBaseTableProvider.addToCache(threatIntelTableName, cf);
    EnrichmentHelper.INSTANCE.load(threatIntelTable, cf, new ArrayList<LookupKV<EnrichmentKey, EnrichmentValue>>() {

        {
            add(new LookupKV<>(new EnrichmentKey(MALICIOUS_IP_TYPE, "10.0.2.3"), new EnrichmentValue(new HashMap<>())));
        }
    });
    final MockHTable enrichmentTable = (MockHTable) MockHBaseTableProvider.addToCache(enrichmentsTableName, cf);
    EnrichmentHelper.INSTANCE.load(enrichmentTable, cf, new ArrayList<LookupKV<EnrichmentKey, EnrichmentValue>>() {

        {
            add(new LookupKV<>(new EnrichmentKey(PLAYFUL_CLASSIFICATION_TYPE, "10.0.2.3"), new EnrichmentValue(PLAYFUL_ENRICHMENT)));
        }
    });
    FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder().withTopologyLocation(new File(fluxPath())).withTopologyName("test").withTemplateLocation(new File(templatePath)).withTopologyProperties(topologyProperties).build();
    // UnitTestHelper.verboseLogging();
    ComponentRunner runner = new ComponentRunner.Builder().withComponent("zk", zkServerComponent).withComponent("kafka", kafkaComponent).withComponent("config", configUploadComponent).withComponent("storm", fluxComponent).withMillisecondsBetweenAttempts(15000).withCustomShutdownOrder(new String[] { "storm", "config", "kafka", "zk" }).withNumRetries(10).build();
    try {
        runner.start();
        fluxComponent.submitTopology();
        kafkaComponent.writeMessages(Constants.ENRICHMENT_TOPIC, inputMessages);
        ProcessorResult<Map<String, List<Map<String, Object>>>> result = runner.process(getProcessor());
        Map<String, List<Map<String, Object>>> outputMessages = result.getResult();
        List<Map<String, Object>> docs = outputMessages.get(Constants.INDEXING_TOPIC);
        Assert.assertEquals(inputMessages.size(), docs.size());
        validateAll(docs);
        List<Map<String, Object>> errors = outputMessages.get(ERROR_TOPIC);
        Assert.assertEquals(inputMessages.size(), errors.size());
        validateErrors(errors);
    } finally {
        runner.stop();
    }
}
Also used : KafkaComponent(org.apache.metron.integration.components.KafkaComponent) HashMap(java.util.HashMap) ZKServerComponent(org.apache.metron.integration.components.ZKServerComponent) Properties(java.util.Properties) MockHTable(org.apache.metron.hbase.mock.MockHTable) FluxTopologyComponent(org.apache.metron.integration.components.FluxTopologyComponent) EnrichmentKey(org.apache.metron.enrichment.converter.EnrichmentKey) LookupKV(org.apache.metron.enrichment.lookup.LookupKV) ConfigUploadComponent(org.apache.metron.enrichment.integration.components.ConfigUploadComponent) ComponentRunner(org.apache.metron.integration.ComponentRunner) ArrayList(java.util.ArrayList) List(java.util.List) File(java.io.File) HashMap(java.util.HashMap) Map(java.util.Map) EnrichmentValue(org.apache.metron.enrichment.converter.EnrichmentValue) BaseIntegrationTest(org.apache.metron.integration.BaseIntegrationTest) Test(org.junit.Test)

Example 3 with FluxTopologyComponent

use of org.apache.metron.integration.components.FluxTopologyComponent in project metron by apache.

the class IndexingIntegrationTest method test.

@Test
public void test() throws Exception {
    preTest();
    final List<byte[]> inputMessages = TestUtils.readSampleData(sampleParsedPath);
    final Properties topologyProperties = new Properties() {

        {
            setProperty("indexing_kafka_start", "UNCOMMITTED_EARLIEST");
            setProperty("kafka_security_protocol", "PLAINTEXT");
            setProperty("topology_auto_credentials", "[]");
            setProperty("indexing_workers", "1");
            setProperty("indexing_acker_executors", "0");
            setProperty("indexing_topology_worker_childopts", "");
            setProperty("indexing_topology_max_spout_pending", "");
            setProperty("indexing_input_topic", Constants.INDEXING_TOPIC);
            setProperty("indexing_error_topic", ERROR_TOPIC);
            setProperty("indexing_kafka_spout_parallelism", "1");
            setProperty("indexing_writer_parallelism", "1");
        }
    };
    setAdditionalProperties(topologyProperties);
    final ZKServerComponent zkServerComponent = getZKServerComponent(topologyProperties);
    final KafkaComponent kafkaComponent = getKafkaComponent(topologyProperties, new ArrayList<KafkaComponent.Topic>() {

        {
            add(new KafkaComponent.Topic(Constants.INDEXING_TOPIC, 1));
            add(new KafkaComponent.Topic(ERROR_TOPIC, 1));
        }
    });
    List<Map<String, Object>> inputDocs = new ArrayList<>();
    for (byte[] b : inputMessages) {
        Map<String, Object> m = JSONUtils.INSTANCE.load(new String(b), JSONUtils.MAP_SUPPLIER);
        inputDocs.add(m);
    }
    final AtomicBoolean isLoaded = new AtomicBoolean(false);
    ConfigUploadComponent configUploadComponent = new ConfigUploadComponent().withTopologyProperties(topologyProperties).withGlobalConfigsPath(TestConstants.SAMPLE_CONFIG_PATH).withEnrichmentConfigsPath(TestConstants.SAMPLE_CONFIG_PATH).withIndexingConfigsPath(TestConstants.SAMPLE_CONFIG_PATH).withPostStartCallback(component -> {
        try {
            waitForIndex(component.getTopologyProperties().getProperty(ZKServerComponent.ZOOKEEPER_PROPERTY));
        } catch (Exception e) {
            e.printStackTrace();
        }
        isLoaded.set(true);
    });
    FluxTopologyComponent fluxComponent = new FluxTopologyComponent.Builder().withTopologyLocation(new File(getFluxPath())).withTopologyName("test").withTemplateLocation(new File(getTemplatePath())).withTopologyProperties(topologyProperties).build();
    ComponentRunner runner = null;
    InMemoryComponent searchComponent = getSearchComponent(topologyProperties);
    ComponentRunner.Builder componentBuilder = new ComponentRunner.Builder();
    componentBuilder = componentBuilder.withComponent("zk", zkServerComponent).withComponent("kafka", kafkaComponent).withComponent("config", configUploadComponent).withComponent("storm", fluxComponent).withMillisecondsBetweenAttempts(1500).withNumRetries(NUM_RETRIES).withMaxTimeMS(TOTAL_TIME_MS);
    if (searchComponent != null) {
        componentBuilder = componentBuilder.withComponent("search", getSearchComponent(topologyProperties)).withCustomShutdownOrder(new String[] { "search", "storm", "config", "kafka", "zk" });
    } else {
        componentBuilder = componentBuilder.withCustomShutdownOrder(new String[] { "storm", "config", "kafka", "zk" });
    }
    runner = componentBuilder.build();
    try {
        runner.start();
        while (!isLoaded.get()) {
            Thread.sleep(100);
        }
        fluxComponent.submitTopology();
        kafkaComponent.writeMessages(Constants.INDEXING_TOPIC, inputMessages);
        List<Map<String, Object>> docs = cleanDocs(runner.process(getProcessor(inputMessages)));
        Assert.assertEquals(docs.size(), inputMessages.size());
        // assert that our input docs are equivalent to the output docs, converting the input docs keys based
        // on the field name converter
        assertInputDocsMatchOutputs(inputDocs, docs, getFieldNameConverter());
    } finally {
        if (runner != null) {
            runner.stop();
        }
    }
}
Also used : KafkaComponent(org.apache.metron.integration.components.KafkaComponent) ZKServerComponent(org.apache.metron.integration.components.ZKServerComponent) InMemoryComponent(org.apache.metron.integration.InMemoryComponent) FluxTopologyComponent(org.apache.metron.integration.components.FluxTopologyComponent) KeeperException(org.apache.zookeeper.KeeperException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ConfigUploadComponent(org.apache.metron.enrichment.integration.components.ConfigUploadComponent) ComponentRunner(org.apache.metron.integration.ComponentRunner) File(java.io.File) Test(org.junit.Test) BaseIntegrationTest(org.apache.metron.integration.BaseIntegrationTest)

Aggregations

File (java.io.File)3 ComponentRunner (org.apache.metron.integration.ComponentRunner)3 FluxTopologyComponent (org.apache.metron.integration.components.FluxTopologyComponent)3 KafkaComponent (org.apache.metron.integration.components.KafkaComponent)3 ZKServerComponent (org.apache.metron.integration.components.ZKServerComponent)3 HashMap (java.util.HashMap)2 Properties (java.util.Properties)2 ConfigUploadComponent (org.apache.metron.enrichment.integration.components.ConfigUploadComponent)2 BaseIntegrationTest (org.apache.metron.integration.BaseIntegrationTest)2 Test (org.junit.Test)2 Predicate (com.google.common.base.Predicate)1 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Map (java.util.Map)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 Nullable (javax.annotation.Nullable)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 SequenceFile (org.apache.hadoop.io.SequenceFile)1