use of org.junit.jupiter.params.provider.EnumSource in project storm by apache.
the class TestDefaultResourceAwareStrategy method testDefaultResourceAwareStrategySharedMemory.
/**
* test if the scheduling shared memory is correct with/without oneExecutorPerWorker enabled
*/
@ParameterizedTest
@EnumSource(WorkerRestrictionType.class)
public void testDefaultResourceAwareStrategySharedMemory(WorkerRestrictionType schedulingLimitation) {
int spoutParallelism = 2;
int boltParallelism = 2;
int numBolts = 3;
double cpuPercent = 10;
double memoryOnHeap = 10;
double memoryOffHeap = 10;
double sharedOnHeapWithinWorker = 400;
double sharedOffHeapWithinNode = 700;
double sharedOffHeapWithinWorker = 600;
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("spout", new TestSpout(), spoutParallelism);
builder.setBolt("bolt-1", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "bolt-1 shared off heap within worker")).shuffleGrouping("spout");
builder.setBolt("bolt-2", new TestBolt(), boltParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "bolt-2 shared off heap within node")).shuffleGrouping("bolt-1");
builder.setBolt("bolt-3", new TestBolt(), boltParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "bolt-3 shared on heap within worker")).shuffleGrouping("bolt-2");
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 2000);
Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
switch(schedulingLimitation) {
case WORKER_RESTRICTION_ONE_EXECUTOR:
conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
break;
case WORKER_RESTRICTION_ONE_COMPONENT:
conf.put(Config.TOPOLOGY_RAS_ONE_COMPONENT_PER_WORKER, true);
break;
}
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
for (Entry<String, SupervisorResources> entry : cluster.getSupervisorsResourcesMap().entrySet()) {
String supervisorId = entry.getKey();
SupervisorResources resources = entry.getValue();
assertTrue(supervisorId, resources.getTotalCpu() >= resources.getUsedCpu());
assertTrue(supervisorId, resources.getTotalMem() >= resources.getUsedMem());
}
int totalNumberOfTasks = spoutParallelism + boltParallelism * numBolts;
SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
long numNodes = assignment.getSlotToExecutors().keySet().stream().map(WorkerSlot::getNodeId).distinct().count();
String assignmentString = "Assignments:\n\t" + assignment.getSlotToExecutors().entrySet().stream().map(x -> String.format("Node=%s, components=%s", x.getKey().getNodeId(), x.getValue().stream().map(y -> topo.getComponentFromExecutor(y)).collect(Collectors.joining(",")))).collect(Collectors.joining("\n\t"));
if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_NONE) {
// Everything should fit in a single slot
double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
double totalExpectedOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
double totalExpectedWorkerOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker;
assertThat(assignment.getSlots().size(), is(1));
WorkerSlot ws = assignment.getSlots().iterator().next();
String nodeId = ws.getNodeId();
assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().size(), is(1));
assertThat(assignment.getNodeIdToTotalSharedOffHeapNodeMemory().get(nodeId), closeTo(sharedOffHeapWithinNode, 0.01));
assertThat(assignment.getScheduledResources().size(), is(1));
WorkerResources resources = assignment.getScheduledResources().get(ws);
assertThat(resources.get_cpu(), closeTo(totalExpectedCPU, 0.01));
assertThat(resources.get_mem_on_heap(), closeTo(totalExpectedOnHeap, 0.01));
assertThat(resources.get_mem_off_heap(), closeTo(totalExpectedWorkerOffHeap, 0.01));
assertThat(resources.get_shared_mem_on_heap(), closeTo(sharedOnHeapWithinWorker, 0.01));
assertThat(resources.get_shared_mem_off_heap(), closeTo(sharedOffHeapWithinWorker, 0.01));
} else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_EXECUTOR) {
double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + 2 * sharedOnHeapWithinWorker;
double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
double expectedMemSharedOnHeap = 2 * sharedOnHeapWithinWorker;
double expectedMemSharedOffHeap = 2 * sharedOffHeapWithinWorker + 2 * sharedOffHeapWithinNode;
double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
assertThat(numAssignedWorkers, is(8));
assertThat(assignment.getSlots().size(), is(8));
assertThat(assignmentString, numNodes, is(2L));
} else if (schedulingLimitation == WorkerRestrictionType.WORKER_RESTRICTION_ONE_COMPONENT) {
double expectedMemOnHeap = (totalNumberOfTasks * memoryOnHeap) + sharedOnHeapWithinWorker;
double expectedMemOffHeap = (totalNumberOfTasks * memoryOffHeap) + sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
double expectedMemSharedOnHeap = sharedOnHeapWithinWorker;
double expectedMemSharedOffHeap = sharedOffHeapWithinWorker + sharedOffHeapWithinNode;
double expectedMemNonSharedOnHeap = totalNumberOfTasks * memoryOnHeap;
double expectedMemNonSharedOffHeap = totalNumberOfTasks * memoryOffHeap;
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(expectedMemOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(expectedMemOffHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(expectedMemSharedOnHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(expectedMemSharedOffHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(expectedMemNonSharedOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(expectedMemNonSharedOffHeap, 0.01));
double totalExpectedCPU = totalNumberOfTasks * cpuPercent;
assertThat(topologyResources.getAssignedCpu(), closeTo(totalExpectedCPU, 0.01));
int numAssignedWorkers = cluster.getAssignedNumWorkers(topo);
assertThat(numAssignedWorkers, is(4));
assertThat(assignment.getSlots().size(), is(4));
assertThat(numNodes, is(1L));
}
}
use of org.junit.jupiter.params.provider.EnumSource in project storm by apache.
the class TestDefaultResourceAwareStrategy method testMultipleSharedMemoryWithOneExecutorPerWorker.
/*
* test assigned memory with shared memory types and oneWorkerPerExecutor
*/
@ParameterizedTest
@EnumSource(SharedMemoryType.class)
public void testMultipleSharedMemoryWithOneExecutorPerWorker(SharedMemoryType memoryType) {
int spoutParallelism = 4;
double cpuPercent = 10;
double memoryOnHeap = 10;
double memoryOffHeap = 10;
double sharedOnHeapWithinWorker = 450;
double sharedOffHeapWithinNode = 600;
double sharedOffHeapWithinWorker = 400;
TopologyBuilder builder = new TopologyBuilder();
switch(memoryType) {
case SHARED_OFF_HEAP_NODE:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinNode(sharedOffHeapWithinNode, "spout shared off heap within node"));
break;
case SHARED_OFF_HEAP_WORKER:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOffHeapWithinWorker(sharedOffHeapWithinWorker, "spout shared off heap within worker"));
break;
case SHARED_ON_HEAP_WORKER:
builder.setSpout("spout", new TestSpout(), spoutParallelism).addSharedMemory(new SharedOnHeap(sharedOnHeapWithinWorker, "spout shared on heap within worker"));
break;
}
StormTopology stormToplogy = builder.createTopology();
INimbus iNimbus = new INimbusTest();
Map<String, SupervisorDetails> supMap = genSupervisors(4, 4, 500, 1000);
Config conf = createClusterConfig(cpuPercent, memoryOnHeap, memoryOffHeap, null);
conf.put(Config.TOPOLOGY_PRIORITY, 0);
conf.put(Config.TOPOLOGY_NAME, "testTopology");
conf.put(Config.TOPOLOGY_WORKER_MAX_HEAP_SIZE_MB, 2000);
conf.put(Config.TOPOLOGY_RAS_ONE_EXECUTOR_PER_WORKER, true);
TopologyDetails topo = new TopologyDetails("testTopology-id", conf, stormToplogy, 0, genExecsAndComps(stormToplogy), CURRENT_TIME, "user");
Topologies topologies = new Topologies(topo);
Cluster cluster = new Cluster(iNimbus, new ResourceMetrics(new StormMetricsRegistry()), supMap, new HashMap<>(), topologies, conf);
scheduler = new ResourceAwareScheduler();
scheduler.prepare(conf, new StormMetricsRegistry());
scheduler.schedule(topologies, cluster);
TopologyResources topologyResources = cluster.getTopologyResourcesMap().get(topo.getId());
SchedulerAssignment assignment = cluster.getAssignmentById(topo.getId());
long numNodes = assignment.getSlotToExecutors().keySet().stream().map(ws -> ws.getNodeId()).distinct().count();
switch(memoryType) {
case SHARED_OFF_HEAP_NODE:
// 4 workers on single node. OffHeapNode memory is shared
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap + sharedOffHeapWithinNode, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(sharedOffHeapWithinNode, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(1L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
case SHARED_OFF_HEAP_WORKER:
// 4 workers on 2 nodes. OffHeapWorker memory not shared -- consumed 4x, once for each worker)
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * (memoryOffHeap + sharedOffHeapWithinWorker), 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(spoutParallelism * sharedOffHeapWithinWorker, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(2L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
case SHARED_ON_HEAP_WORKER:
// 4 workers on 2 nodes. onHeap memory not shared -- consumed 4x, once for each worker
assertThat(topologyResources.getAssignedMemOnHeap(), closeTo(spoutParallelism * (memoryOnHeap + sharedOnHeapWithinWorker), 0.01));
assertThat(topologyResources.getAssignedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(topologyResources.getAssignedSharedMemOnHeap(), closeTo(spoutParallelism * sharedOnHeapWithinWorker, 0.01));
assertThat(topologyResources.getAssignedSharedMemOffHeap(), closeTo(0, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOnHeap(), closeTo(spoutParallelism * memoryOnHeap, 0.01));
assertThat(topologyResources.getAssignedNonSharedMemOffHeap(), closeTo(spoutParallelism * memoryOffHeap, 0.01));
assertThat(numNodes, is(2L));
assertThat(cluster.getAssignedNumWorkers(topo), is(spoutParallelism));
break;
}
}
use of org.junit.jupiter.params.provider.EnumSource in project neo4j by neo4j.
the class IndexCleanupIT method mustClearIndexDirectoryOnDropWhileOnline.
@ParameterizedTest
@EnumSource(SchemaIndex.class)
void mustClearIndexDirectoryOnDropWhileOnline(SchemaIndex schemaIndex) throws IOException {
configureDb(schemaIndex);
createIndex(db, true);
Path[] providerDirectories = providerDirectories(fs, db);
for (Path providerDirectory : providerDirectories) {
assertTrue(fs.listFiles(providerDirectory).length > 0, "expected there to be at least one index per existing provider map");
}
dropAllIndexes();
assertNoIndexFilesExisting(providerDirectories);
}
use of org.junit.jupiter.params.provider.EnumSource in project neo4j by neo4j.
the class ReadAheadChannelTest method checksumIsCalculatedCorrectlyOverBuffersLargerThanReadAheadSize.
@ParameterizedTest
@EnumSource(Constructors.class)
void checksumIsCalculatedCorrectlyOverBuffersLargerThanReadAheadSize(Constructor constructor) throws Exception {
// given
Checksum checksum = CHECKSUM_FACTORY.get();
int checksumValue;
Path file = Path.of("foo.1");
int testSize = 100;
try (StoreChannel storeChannel = fileSystem.write(file)) {
ByteBuffer buffer = ByteBuffers.allocate(testSize + 4, INSTANCE);
for (int i = 0; i < testSize; i++) {
buffer.put((byte) i);
checksum.update(i);
}
checksumValue = (int) checksum.getValue();
buffer.putInt(checksumValue);
buffer.flip();
storeChannel.writeAll(buffer);
storeChannel.force(false);
}
ReadAheadChannel<StoreChannel> bufferedReader = constructor.apply(fileSystem.read(file), testSize / 2);
byte[] in = new byte[testSize];
bufferedReader.get(in, testSize);
for (int i = 0; i < testSize; i++) {
assertEquals(i, in[i]);
}
assertEquals(checksumValue, bufferedReader.endChecksumAndValidate());
}
use of org.junit.jupiter.params.provider.EnumSource in project neo4j by neo4j.
the class ReadAheadChannelTest method shouldThrowExceptionForReadAfterEOFIfNotEnoughBytesExist.
@ParameterizedTest
@EnumSource(Constructors.class)
void shouldThrowExceptionForReadAfterEOFIfNotEnoughBytesExist(Constructor constructor) throws Exception {
// Given
Path bytesReadTestFile = Path.of("bytesReadTest.txt");
StoreChannel storeChannel = fileSystem.write(bytesReadTestFile);
ByteBuffer buffer = ByteBuffers.allocate(1, INSTANCE);
buffer.put((byte) 1);
buffer.flip();
storeChannel.writeAll(buffer);
storeChannel.force(false);
storeChannel.close();
storeChannel = fileSystem.read(bytesReadTestFile);
ReadAheadChannel<StoreChannel> channel = constructor.apply(storeChannel, DEFAULT_READ_AHEAD_SIZE);
assertEquals((byte) 1, channel.get());
assertThrows(ReadPastEndException.class, channel::get);
assertThrows(ReadPastEndException.class, channel::get);
}
Aggregations