Search in sources :

Example 96 with ParameterizedTest

use of org.junit.jupiter.params.ParameterizedTest in project storm by apache.

the class BlobStoreTest method testWithAuthentication.

@ParameterizedTest
@EnumSource(value = AuthenticationTestSubject.class)
void testWithAuthentication(AuthenticationTestSubject testSubject) throws Exception {
    try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-" + testSubject.name())) {
        BlobStore store = container.blobStore;
        assertStoreHasExactly(store);
        SettableBlobMeta metadata = new SettableBlobMeta(BlobStoreAclHandler.DEFAULT);
        try (AtomicOutputStream out = store.createBlob("test", metadata, testSubject.subject)) {
            assertStoreHasExactly(store, "test");
            out.write(1);
        }
        store.deleteBlob("test", testSubject.subject);
    }
}
Also used : AtomicOutputStream(org.apache.storm.blobstore.AtomicOutputStream) SettableBlobMeta(org.apache.storm.generated.SettableBlobMeta) BlobStore(org.apache.storm.blobstore.BlobStore) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 97 with ParameterizedTest

use of org.junit.jupiter.params.ParameterizedTest in project storm by apache.

the class BlobStoreTest method testWithAuthenticationDummy.

@ParameterizedTest
@ValueSource(booleans = { true, false })
void testWithAuthenticationDummy(boolean securityEnabled) throws Exception {
    try (AutoCloseableBlobStoreContainer container = initHdfs("/storm/blobstore-auth-dummy-sec-" + securityEnabled)) {
        BlobStore store = container.blobStore;
        Subject who = getSubject("test_subject");
        assertStoreHasExactly(store);
        // Tests for case when subject != null (security turned on) and
        // acls for the blob are set to WORLD_EVERYTHING
        SettableBlobMeta metadata = new SettableBlobMeta(securityEnabled ? BlobStoreAclHandler.DEFAULT : BlobStoreAclHandler.WORLD_EVERYTHING);
        try (AtomicOutputStream out = store.createBlob("test", metadata, who)) {
            out.write(1);
        }
        assertStoreHasExactly(store, "test");
        if (securityEnabled) {
            // Testing whether acls are set to WORLD_EVERYTHING. Here the acl should not contain WORLD_EVERYTHING because
            // the subject is neither null nor empty. The ACL should however contain USER_EVERYTHING as user needs to have
            // complete access to the blob
            assertTrue("ACL contains WORLD_EVERYTHING", !metadata.toString().contains("AccessControl(type:OTHER, access:7)"));
        } else {
            // Testing whether acls are set to WORLD_EVERYTHING
            assertTrue("ACL does not contain WORLD_EVERYTHING", metadata.toString().contains("AccessControl(type:OTHER, access:7)"));
        }
        readAssertEqualsWithAuth(store, who, "test", 1);
        LOG.info("Deleting test");
        store.deleteBlob("test", who);
        assertStoreHasExactly(store);
    }
}
Also used : AtomicOutputStream(org.apache.storm.blobstore.AtomicOutputStream) SettableBlobMeta(org.apache.storm.generated.SettableBlobMeta) BlobStore(org.apache.storm.blobstore.BlobStore) Subject(javax.security.auth.Subject) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 98 with ParameterizedTest

use of org.junit.jupiter.params.ParameterizedTest in project storm by apache.

the class TestGenericResourceAwareStrategy method testGenericResourceAwareStrategyWithSettingAckerExecutors.

/**
 * Test if the scheduling logic for the GenericResourceAwareStrategy is correct
 * with setting {@link Config#TOPOLOGY_ACKER_EXECUTORS}.
 *
 * Test details refer to {@link TestDefaultResourceAwareStrategy#testDefaultResourceAwareStrategyWithSettingAckerExecutors(int)}
 */
@ParameterizedTest
@ValueSource(ints = { -1, 0, 2, 200 })
public void testGenericResourceAwareStrategyWithSettingAckerExecutors(int numOfAckersPerWorker) throws InvalidTopologyException {
    int spoutParallelism = 1;
    int boltParallelism = 2;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1").addResource("gpu.count", 1.0);
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2").addResource("gpu.count", 2.0);
    String topoName = "testTopology";
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Config conf = createGrasClusterConfig(50, 500, 0, null, Collections.emptyMap());
    Map<String, Double> genericResourcesMap = new HashMap<>();
    genericResourcesMap.put("gpu.count", 2.0);
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000, genericResourcesMap);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, topoName);
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
    conf.put(Config.TOPOLOGY_ACKER_EXECUTORS, 4);
    if (numOfAckersPerWorker == -1) {
    // Leave topology.acker.executors.per.worker unset
    } else {
        conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, numOfAckersPerWorker);
    }
    int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(conf, stormToplogy);
    Nimbus.setUpAckerExecutorConfigs(topoName, conf, conf, estimatedNumWorker);
    conf.put(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB, 250);
    conf.put(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, 50);
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), currentTime, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    // We need to have 3 slots on 3 separate hosts. The topology needs 6 GPUs 3500 MB memory and 350% CPU
    // The bolt-3 instances must be on separate nodes because they each need 2 GPUs.
    // The bolt-2 instances must be on the same node as they each need 1 GPU
    // (this assumes that we are packing the components to avoid fragmentation).
    // The bolt-1 and spout instances fill in the rest.
    // Ordered execs: [[6, 6], [2, 2], [4, 4], [5, 5], [1, 1], [3, 3], [0, 0]]
    // Ackers: [[8, 8], [7, 7]] (+ [[9, 9], [10, 10]] when numOfAckersPerWorker=2)
    HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
    expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3 - 500 MB, 50% CPU, 2 GPU
    new ExecutorDetails(3, 3))));
    // Total 500 MB, 50% CPU, 2 - GPU -> this node has 1500 MB, 150% cpu, 0 GPU left
    expectedScheduling.add(new HashSet<>(Arrays.asList(// acker - 250 MB, 50% CPU, 0 GPU
    new ExecutorDetails(7, 7), // acker - 250 MB, 50% CPU, 0 GPU
    new ExecutorDetails(8, 8), // bolt-2 - 500 MB, 50% CPU, 1 GPU
    new ExecutorDetails(6, 6), // bolt-1 - 500 MB, 50% CPU, 0 GPU
    new ExecutorDetails(2, 2))));
    // Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
    expectedScheduling.add(new HashSet<>(Arrays.asList(// acker- 250 MB, 50% CPU, 0 GPU
    new ExecutorDetails(9, 9), // acker- 250 MB, 50% CPU, 0 GPU
    new ExecutorDetails(10, 10), // bolt-1 - 500 MB, 50% CPU, 0 GPU
    new ExecutorDetails(1, 1), // bolt-3 500 MB, 50% cpu, 2 GPU
    new ExecutorDetails(4, 4))));
    // Total 1500 MB, 200% CPU, 2 GPU -> this node has 500 MB, 0% CPU, 0 GPU left
    expectedScheduling.add(new HashSet<>(Arrays.asList(// Spout - 500 MB, 50% CPU, 0 GPU
    new ExecutorDetails(0, 0), // bolt-2 - 500 MB, 50% CPU, 1 GPU
    new ExecutorDetails(5, 5))));
    // Total 1000 MB, 100% CPU, 2 GPU -> this node has 1000 MB, 100% CPU, 0 GPU left
    HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
    SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
    for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
        foundScheduling.add(new HashSet<>(execs));
    }
    assertEquals(expectedScheduling, foundScheduling);
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) HashMap(java.util.HashMap) DaemonConfig(org.apache.storm.DaemonConfig) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 99 with ParameterizedTest

use of org.junit.jupiter.params.ParameterizedTest in project storm by apache.

the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategySharedMemory.

/**
 * test if the scheduling shared memory is correct with/without oneExecutorPerWorker enabled
 */
@ParameterizedTest
@EnumSource(WorkerRestrictionType.class)
public void testDefaultResourceAwareStrategySharedMemory(WorkerRestrictionType schedulingLimitation) {
    int spoutParallelism = 2;
    int boltParallelism = 2;
    int numBolts = 3;
    double cpuPercent = 10;
    double memoryOnHeap = 10;
    double memoryOffHeap = 10;
    double sharedOnHeapWithinWorker = 400;
    double sharedOffHeapWithinNode = 700;
    double sharedOffHeapWithinWorker = 600;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "bolt-1 shared off heap within worker")).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "bolt-2 shared off heap within node")).shuffleGrouping("bolt-1");
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "bolt-3 shared on heap within worker")).shuffleGrouping("bolt-2");
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000);
    Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, "testTopology");
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    switch(schedulingLimitation) {
        case WORKER_RESTRICTION_ONE_EXECUTOR:
            conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
            break;
        case WORKER_RESTRICTION_ONE_COMPONENT:
            conf.put(Config.TOPOLOGY_RAS_ONE_COMPONENT_PER_WORKER, true);
            break;
    }
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    for (Entry<String, SupervisorResources> entry : cluster.getSupervisorsResourcesMap().entrySet()) {
        String supervisorId = entry.getKey();
        SupervisorResources resources = entry.getValue();
        assertTrue(supervisorId, resources.getTotalCpu() >= resources.getUsedCpu());
        assertTrue(supervisorId, resources.getTotalMem() >= resources.getUsedMem());
    }
    int totalNumberOfTasks = spoutParallelism + boltParallelism * numBolts;
    SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
    TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
    long numNodes = assignment.getSlotToExecutors().keySet().stream().map(WorkerSlot::getNodeId).distinct().count();
    String assignmentString = "Assignments:\n\t" + assignment.getSlotToExecutors().entrySet().stream().map(x -> String.format("Node=%s, components=%s", x.getKey().getNodeId(), x.getValue().stream().map(y -> topo.getComponentFromExecutor(y)).collect(Collectors.joining(",")))).collect(Collectors.joining("\n\t"));
    if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_NONE) {
        // Everything should fit in a single slot
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
        double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker;
        assertThat(assignment.getSlots().size(), is(1));
        WorkerSlot ws = assignment.getSlots().iterator().next();
        String nodeId = ws.getNodeId();
        assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().size(), is(1));
        assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().get(nodeId), closeTo(sharedOffHeapWithinNode, 0.01));
        assertThat(assignment.getScheduledResources().size(), is(1));
        WorkerResources resources = assignment.getScheduledResources().get(ws);
        assertThat(resources.get_cpu(), closeTo(totalExpectedCPU, 0.01));
        assertThat(resources.get_mem_on_heap(), closeTo(totalExpectedOnHeap, 0.01));
        assertThat(resources.get_mem_off_heap(), closeTo(totalExpectedWorkerOffHeap, 0.01));
        assertThat(resources.get_shared_mem_on_heap(), closeTo(sharedOnHeapWithinWorker, 0.01));
        assertThat(resources.get_shared_mem_off_heap(), closeTo(sharedOffHeapWithinWorker, 0.01));
    } else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_EXECUTOR) {
        double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + 2 * sharedOnHeapWithinWorker;
        double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
        double expectedMemSharedOnHeap = 2 * sharedOnHeapWithinWorker;
        double expectedMemSharedOffHeap = 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
        double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
        double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
        assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
        assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
        int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
        assertThat(numAssignedWorkers, is(8));
        assertThat(assignment.getSlots().size(), is(8));
        assertThat(assignmentString, numNodes, is(2L));
    } else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_COMPONENT) {
        double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
        double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
        double expectedMemSharedOnHeap = sharedOnHeapWithinWorker;
        double expectedMemSharedOffHeap = sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
        double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
        double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
        assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
        assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
        assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
        double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
        assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
        int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
        assertThat(numAssignedWorkers, is(4));
        assertThat(assignment.getSlots().size(), is(4));
        assertThat(numNodes, is(1L));
    }
}
Also used : Arrays(java.util.Arrays) LoggerFactory(org.slf4j.LoggerFactory) INimbus(org.apache.storm.scheduler.INimbus) SupervisorResources(org.apache.storm.scheduler.SupervisorResources) ExtendWith(org.junit.jupiter.api.extension.ExtendWith) Matchers.closeTo(org.hamcrest.Matchers.closeTo) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Map(java.util.Map) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) NodeSorterHostProximity(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorterHostProximity) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) DNSToSwitchMapping(org.apache.storm.networktopography.DNSToSwitchMapping) Collection(java.util.Collection) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) Collectors(java.util.stream.Collectors) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) Test(org.junit.jupiter.api.Test) WorkerResources(org.apache.storm.generated.WorkerResources) List(java.util.List) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) Entry(java.util.Map.Entry) Config(org.apache.storm.Config) Matchers.is(org.hamcrest.Matchers.is) InvalidTopologyException(org.apache.storm.generated.InvalidTopologyException) StormCommon(org.apache.storm.daemon.StormCommon) ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) IScheduler(org.apache.storm.scheduler.IScheduler) RasNode(org.apache.storm.scheduler.resource.RasNode) SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) NodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.NodeSorter) EnumSource(org.junit.jupiter.params.provider.EnumSource) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Topologies(org.apache.storm.scheduler.Topologies) ServerUtils(org.apache.storm.utils.ServerUtils) StormTopology(org.apache.storm.generated.StormTopology) NormalizedResourcesExtension(org.apache.storm.scheduler.resource.normalization.NormalizedResourcesExtension) LinkedList(java.util.LinkedList) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) ValueSource(org.junit.jupiter.params.provider.ValueSource) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) INodeSorter(org.apache.storm.scheduler.resource.strategies.scheduling.sorter.INodeSorter) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources) Cluster(org.apache.storm.scheduler.Cluster) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) SchedulingResult(org.apache.storm.scheduler.resource.SchedulingResult) Nimbus(org.apache.storm.daemon.nimbus.Nimbus) AfterEach(org.junit.jupiter.api.AfterEach) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Assert(org.junit.Assert) Collections(java.util.Collections) SharedOnHeap(org.apache.storm.topology.SharedOnHeap) SharedOffHeapWithinWorker(org.apache.storm.topology.SharedOffHeapWithinWorker) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) WorkerSlot(org.apache.storm.scheduler.WorkerSlot) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) SharedOffHeapWithinNode(org.apache.storm.topology.SharedOffHeapWithinNode) SupervisorResources(org.apache.storm.scheduler.SupervisorResources) WorkerResources(org.apache.storm.generated.WorkerResources) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) TopologyResources(org.apache.storm.daemon.nimbus.TopologyResources) EnumSource(org.junit.jupiter.params.provider.EnumSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 100 with ParameterizedTest

use of org.junit.jupiter.params.ParameterizedTest in project storm by apache.

the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategyWithoutSettingAckerExecutors.

/**
 * test if the scheduling logic for the DefaultResourceAwareStrategy is correct
 * when topology.acker.executors.per.worker is set to different values.
 *
 * If {@link Config#TOPOLOGY_ACKER_EXECUTORS} is not set,
 * it will be calculated by Nimbus as (num of estimated worker * topology.acker.executors.per.worker).
 * In this test, {@link Config#TOPOLOGY_ACKER_EXECUTORS} is set to 2 (num of estimated workers based on topo resources usage)
 *
 * For different value for {@link Config#TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER}:
 * -1: Note we don't really set it to be -1.
 *     It is just a special case in this test that topology.acker.executors.per.worker is unset, nimbus will set to 1 by default.
 * 0:  Since {@link Config#TOPOLOGY_ACKER_EXECUTORS} is not set either, acking is disabled.
 * 1:  2 ackers in total. Distribute 1 acker per worker. With ackers being added, this topology will now need 3 workers.
 *     Then first two worker will get 1 acker and last worker get 0.
 * 2:  4 ackers in total. First two workers will get 2 acker per worker respectively.
 */
@ParameterizedTest
@ValueSource(ints = { -1, 0, 1, 2 })
public void testDefaultResourceAwareStrategyWithoutSettingAckerExecutors(int numOfAckersPerWorker) throws InvalidTopologyException {
    int spoutParallelism = 1;
    int boltParallelism = 2;
    TopologyBuilder builder = new TopologyBuilder();
    builder.setSpout("spout", new TestSpout(), spoutParallelism);
    builder.setBolt("bolt-1", new TestBolt(), boltParallelism).shuffleGrouping("spout");
    builder.setBolt("bolt-2", new TestBolt(), boltParallelism).shuffleGrouping("bolt-1");
    builder.setBolt("bolt-3", new TestBolt(), boltParallelism).shuffleGrouping("bolt-2");
    String topoName = "testTopology";
    StormTopology stormToplogy = builder.createTopology();
    INimbus iNimbus = new INimbusTest();
    Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 200, 2000);
    Config conf = createClusterConfig(50, 450, 0, null);
    conf.put(Config.TOPOLOGY_PRIORITY, 0);
    conf.put(Config.TOPOLOGY_NAME, topoName);
    conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
    conf.put(Config.TOPOLOGY_SUBMITTER_USER, "user");
    // Parameterized test on different numOfAckersPerWorker
    if (numOfAckersPerWorker == -1) {
    // Both Config.TOPOLOGY_ACKER_EXECUTORS and Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER are not set
    // Default will be 2 (estimate num of workers) and 1 respectively
    } else {
        conf.put(Config.TOPOLOGY_RAS_ACKER_EXECUTORS_PER_WORKER, numOfAckersPerWorker);
    }
    int estimatedNumWorker = ServerUtils.getEstimatedWorkerCountForRasTopo(conf, stormToplogy);
    Nimbus.setUpAckerExecutorConfigs(topoName, conf, conf, estimatedNumWorker);
    conf.put(Config.TOPOLOGY_ACKER_RESOURCES_ONHEAP_MEMORY_MB, 250);
    conf.put(Config.TOPOLOGY_ACKER_CPU_PCORE_PERCENT, 50);
    TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(StormCommon.systemTopology(conf, stormToplogy)), CURRENT_TIME, "user");
    Topologies topologies = new Topologies(topo);
    Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
    scheduler = new ResourceAwareScheduler();
    scheduler.prepare(conf, new StormMetricsRegistry());
    scheduler.schedule(topologies, cluster);
    // Ordered execs: [[6, 6], [2, 2], [4, 4], [5, 5], [1, 1], [3, 3], [0, 0], [8, 8], [7, 7]]
    // Ackers: [[8, 8], [7, 7]] (+ [[9, 9], [10, 10]] when numOfAckersPerWorker=2)
    HashSet<HashSet<ExecutorDetails>> expectedScheduling = new HashSet<>();
    if (numOfAckersPerWorker == -1 || numOfAckersPerWorker == 1) {
        // Setting topology.acker.executors = null and topology.acker.executors.per.worker = null
        // are equivalent to topology.acker.executors = null and topology.acker.executors.per.worker = 1
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
        new ExecutorDetails(6, 6), // bolt-1
        new ExecutorDetails(2, 2), // bolt-2
        new ExecutorDetails(4, 4), // acker
        new ExecutorDetails(8, 8))));
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
        new ExecutorDetails(5, 5), // bolt-1
        new ExecutorDetails(1, 1), // bolt-2
        new ExecutorDetails(3, 3), // acker
        new ExecutorDetails(7, 7))));
        expectedScheduling.add(new HashSet<>(Arrays.asList(// spout
        new ExecutorDetails(0, 0))));
    } else if (numOfAckersPerWorker == 0) {
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
        new ExecutorDetails(6, 6), // bolt-1
        new ExecutorDetails(2, 2), // bolt-2
        new ExecutorDetails(4, 4), // bolt-3
        new ExecutorDetails(5, 5))));
        expectedScheduling.add(new HashSet<>(Arrays.asList(// spout
        new ExecutorDetails(0, 0), // bolt-2
        new ExecutorDetails(3, 3), // bolt-1
        new ExecutorDetails(1, 1))));
    } else if (numOfAckersPerWorker == 2) {
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-3
        new ExecutorDetails(6, 6), // bolt-1
        new ExecutorDetails(2, 2), // acker
        new ExecutorDetails(7, 7), // acker
        new ExecutorDetails(8, 8))));
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-2
        new ExecutorDetails(4, 4), // bolt-3
        new ExecutorDetails(5, 5), // acker
        new ExecutorDetails(9, 9), // acker
        new ExecutorDetails(10, 10))));
        expectedScheduling.add(new HashSet<>(Arrays.asList(// bolt-1
        new ExecutorDetails(1, 1), // bolt-2
        new ExecutorDetails(3, 3), // spout
        new ExecutorDetails(0, 0))));
    }
    HashSet<HashSet<ExecutorDetails>> foundScheduling = new HashSet<>();
    SchedulerAssignment assignment = cluster.getAssignmentById("testTopology-id");
    for (Collection<ExecutorDetails> execs : assignment.getSlotToExecutors().values()) {
        foundScheduling.add(new HashSet<>(execs));
    }
    Assert.assertEquals(expectedScheduling, foundScheduling);
}
Also used : ExecutorDetails(org.apache.storm.scheduler.ExecutorDetails) TopologyBuilder(org.apache.storm.topology.TopologyBuilder) Config(org.apache.storm.Config) StormTopology(org.apache.storm.generated.StormTopology) StormMetricsRegistry(org.apache.storm.metric.StormMetricsRegistry) Cluster(org.apache.storm.scheduler.Cluster) INimbus(org.apache.storm.scheduler.INimbus) TopologyDetails(org.apache.storm.scheduler.TopologyDetails) TestUtilsForResourceAwareScheduler(org.apache.storm.scheduler.resource.TestUtilsForResourceAwareScheduler) ResourceAwareScheduler(org.apache.storm.scheduler.resource.ResourceAwareScheduler) ResourceMetrics(org.apache.storm.scheduler.resource.normalization.ResourceMetrics) SchedulerAssignment(org.apache.storm.scheduler.SchedulerAssignment) Topologies(org.apache.storm.scheduler.Topologies) SupervisorDetails(org.apache.storm.scheduler.SupervisorDetails) HashSet(java.util.HashSet) ValueSource(org.junit.jupiter.params.provider.ValueSource) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)2045 MethodSource (org.junit.jupiter.params.provider.MethodSource)1116 EnumSource (org.junit.jupiter.params.provider.EnumSource)325 ValueSource (org.junit.jupiter.params.provider.ValueSource)302 ArgumentsSource (org.junit.jupiter.params.provider.ArgumentsSource)181 Transaction (org.neo4j.graphdb.Transaction)117 CsvSource (org.junit.jupiter.params.provider.CsvSource)113 ArrayList (java.util.ArrayList)112 ByteBuffer (java.nio.ByteBuffer)81 List (java.util.List)81 Path (java.nio.file.Path)75 Test (org.junit.jupiter.api.Test)74 InterruptAfter (io.aeron.test.InterruptAfter)72 IOException (java.io.IOException)72 KernelTransaction (org.neo4j.kernel.api.KernelTransaction)67 TimeUnit (java.util.concurrent.TimeUnit)65 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)62 CountDownLatch (java.util.concurrent.CountDownLatch)61 SelfSignedCertificate (io.netty.handler.ssl.util.SelfSignedCertificate)60 Stream (java.util.stream.Stream)59