use of org.btrplace.model.view.network.Network in project scheduler by btrplace.
the class NetworkConverterTest method staticRoutingTest.
@Test
public void staticRoutingTest() throws JSONConverterException {
Model mo = new DefaultModel();
Network net = new Network(new StaticRouting());
Switch s = net.newSwitch(1000);
Node n1 = mo.newNode();
Node n2 = mo.newNode();
mo.getMapping().addOnlineNode(n1);
mo.getMapping().addOnlineNode(n2);
net.connect(1000, s, n1, n2);
Map<Link, Boolean> route = new LinkedHashMap<>();
route.put(net.getConnectedLinks(n1).get(0), true);
route.put(net.getConnectedLinks(n2).get(0), false);
((StaticRouting) net.getRouting()).setStaticRoute(new StaticRouting.NodesMap(n1, n2), route);
mo.attach(net);
ModelConverter mc = new ModelConverter();
JSONObject jo = mc.toJSON(mo);
System.err.println(jo);
Model mo2 = mc.fromJSON(jo);
Network net2 = Network.get(mo2);
Assert.assertTrue(net.getSwitches().equals(net2.getSwitches()));
Assert.assertTrue(net.getLinks().equals(net2.getLinks()));
Assert.assertTrue(net.getConnectedNodes().equals(net2.getConnectedNodes()));
Map<StaticRouting.NodesMap, Map<Link, Boolean>> routes = ((StaticRouting) net.getRouting()).getStaticRoutes();
Map<StaticRouting.NodesMap, Map<Link, Boolean>> routes2 = ((StaticRouting) net2.getRouting()).getStaticRoutes();
for (StaticRouting.NodesMap nm : routes.keySet()) {
for (StaticRouting.NodesMap nm2 : routes2.keySet()) {
Assert.assertTrue(nm.equals(nm2));
Assert.assertTrue(routes.get(nm).equals(routes2.get(nm2)));
}
}
}
use of org.btrplace.model.view.network.Network in project scheduler by btrplace.
the class CNetworkTest method defaultTest.
/**
* Test the instantiation and the creation of the variables.
*
* @throws org.btrplace.scheduler.SchedulerException if an error occurs during the solving process (it should not)
*/
@Test
public void defaultTest() throws SchedulerException {
// New default model
Model mo = new DefaultModel();
Mapping ma = mo.getMapping();
// Create and boot 1 source and 1 destination node
Node srcNode = mo.newNode(), dstNode = mo.newNode();
ma.addOnlineNode(srcNode);
ma.addOnlineNode(dstNode);
// Attach a network view
Network net = new Network();
mo.attach(net);
// Connect the nodes through a main non-blocking switch using 1 Gbit/s links
Switch swMain = net.newSwitch();
int bw = 1000;
net.connect(bw, swMain, srcNode, dstNode);
// Create and host 1 running VM on the source node
VM vm = mo.newVM();
ma.addRunningVM(vm, srcNode);
// The VM consumes 6 GiB memory and has a memory intensive workload equivalent to "stress --vm 1000 --bytes 50K"
int memUsed = 6000, hotDirtySize = 46, hotDirtyDuration = 2;
double coldDirtyRate = 23.6;
// 6 GiB
mo.getAttributes().put(vm, "memUsed", memUsed);
// 46 MiB
mo.getAttributes().put(vm, "hotDirtySize", hotDirtySize);
// 2 sec.
mo.getAttributes().put(vm, "hotDirtyDuration", hotDirtyDuration);
// 23.6 MiB/sec.
mo.getAttributes().put(vm, "coldDirtyRate", coldDirtyRate);
// Add constraints
List<SatConstraint> cstrs = new ArrayList<>();
// We force the migration to go on the destination node
cstrs.add(new Fence(vm, Collections.singleton(dstNode)));
// Try to solve using the custom Min MTTR objective for migration scheduling
ReconfigurationPlan p = new DefaultChocoScheduler().solve(mo, cstrs, new MinMTTRMig());
Assert.assertNotNull(p);
// The switch is non-blocking
Assert.assertEquals(swMain.getCapacity(), Integer.MAX_VALUE);
// Check the migration path and bandwidth
MigrateVM mig = (MigrateVM) p.getActions().stream().filter(s -> s instanceof MigrateVM).findFirst().get();
Assert.assertTrue(net.getRouting().getPath(mig.getSourceNode(), mig.getDestinationNode()).containsAll(net.getLinks()));
Assert.assertEquals(net.getRouting().getMaxBW(mig.getSourceNode(), mig.getDestinationNode()), bw);
Assert.assertEquals(mig.getBandwidth(), bw);
// Check the migration duration computation
double bandwidth_octet = mig.getBandwidth() / 9, durationMin, durationColdPages, durationHotPages, durationTotal;
durationMin = memUsed / bandwidth_octet;
durationColdPages = ((hotDirtySize + ((durationMin - hotDirtyDuration) * coldDirtyRate)) / (bandwidth_octet - coldDirtyRate));
durationHotPages = ((hotDirtySize / bandwidth_octet) * ((hotDirtySize / hotDirtyDuration) / (bandwidth_octet - (hotDirtySize / hotDirtyDuration))));
durationTotal = durationMin + durationColdPages + durationHotPages;
Assert.assertEquals((mig.getEnd() - mig.getStart()), (int) Math.round(durationTotal));
}
use of org.btrplace.model.view.network.Network in project scheduler by btrplace.
the class CNetworkTest method testWithSwitchCapacity.
@Test
public void testWithSwitchCapacity() {
Model mo = new DefaultModel();
Node n1 = mo.newNode();
Node n2 = mo.newNode();
VM v = mo.newVM();
mo.getMapping().on(n1, n2).run(n1, v);
ShareableResource mem = new ShareableResource("mem", 10000, 5000);
Network net = new Network();
mo.attach(net);
mo.attach(mem);
mo.getAttributes().put(v, "memUsed", 10000);
Switch sw = net.newSwitch(1000);
net.connect(2000, sw, n1, n2);
ChocoScheduler s = new DefaultChocoScheduler();
ReconfigurationPlan p = s.solve(mo, Collections.singletonList(new Fence(v, n2)));
Assert.assertNotNull(p);
}
use of org.btrplace.model.view.network.Network in project scheduler by btrplace.
the class NetworkAndMigrations method run.
@Override
public void run() {
// New default model
Model mo = new DefaultModel();
Mapping ma = mo.getMapping();
// Create and boot 2 source nodes and 1 destination node
Node srcNode1 = mo.newNode();
Node srcNode2 = mo.newNode();
Node dstNode = mo.newNode();
ma.addOnlineNode(srcNode1);
ma.addOnlineNode(srcNode2);
ma.addOnlineNode(dstNode);
// Create 4 VMs and host 2 VMs on each source node
VM vm0 = mo.newVM();
VM vm1 = mo.newVM();
VM vm2 = mo.newVM();
VM vm3 = mo.newVM();
ma.addRunningVM(vm0, srcNode1);
ma.addRunningVM(vm1, srcNode1);
ma.addRunningVM(vm2, srcNode2);
ma.addRunningVM(vm3, srcNode2);
// Set VM attributes 'memory used', 'hot dirty page size', 'hot dirty page duration' and 'cold dirty pages rate'
// vm0 and vm3 are 'idle' VMs (with no special memory activity) but they still consume some memory
// 2 GiB
mo.getAttributes().put(vm0, "memUsed", 2000);
// 2.2 GiB
mo.getAttributes().put(vm3, "memUsed", 2200);
// vm1 and vm2 consume memory and have a memory intensive workload equivalent to "stress --vm 1000 --bytes 50K"
// 8 GiB
mo.getAttributes().put(vm1, "memUsed", 8000);
mo.getAttributes().put(vm1, "hotDirtySize", 56);
mo.getAttributes().put(vm1, "hotDirtyDuration", 2);
mo.getAttributes().put(vm1, "coldDirtyRate", 22.6);
// 7.5 GiB
mo.getAttributes().put(vm2, "memUsed", 7500);
mo.getAttributes().put(vm2, "hotDirtySize", 56);
mo.getAttributes().put(vm2, "hotDirtyDuration", 2);
mo.getAttributes().put(vm2, "coldDirtyRate", 22.6);
// Add placement constraints: we want to shutdown the source nodes to force VMs migration to destination nodes
List<SatConstraint> cstrs = new ArrayList<>();
cstrs.add(new Offline(srcNode1));
cstrs.add(new Offline(srcNode2));
// Try to solve as is and show the computed plan
ReconfigurationPlan p = new DefaultChocoScheduler().solve(mo, cstrs);
System.out.println(p);
// Set a default network view and try to solve again
// connect nodes to a non-blocking switch using 1 Gbit/s links
Network net = Network.createDefaultNetwork(mo);
p = new DefaultChocoScheduler().solve(mo, cstrs);
System.out.println(p);
// Create and attach a custom network view and try to solve again
mo.detach(net);
net = new Network();
// Connect the nodes through a main non-blocking switch
// The source nodes are connected with 1Gbit/sec. links while the destination node has 10Gbit/sec. link.
Switch swMain = net.newSwitch();
net.connect(1000, swMain, srcNode1, srcNode2);
net.connect(10000, swMain, dstNode);
mo.attach(net);
p = new DefaultChocoScheduler().solve(mo, cstrs);
System.out.println(p);
System.out.flush();
}
use of org.btrplace.model.view.network.Network in project scheduler by btrplace.
the class CDeadlineTest method testOk.
@Test
public void testOk() throws SchedulerException {
// New default model
Model mo = new DefaultModel();
Mapping ma = mo.getMapping();
// Create and boot 2 source nodes and 1 destination node
Node srcNode1 = mo.newNode(), srcNode2 = mo.newNode(), dstNode = mo.newNode();
ma.addOnlineNode(srcNode1);
ma.addOnlineNode(srcNode2);
ma.addOnlineNode(dstNode);
// Attach a network view
Network net = new Network();
mo.attach(net);
// Connect the nodes through a main non-blocking switch
// The destination node have twice the bandwidth of source nodes
Switch swMain = net.newSwitch();
net.connect(1000, swMain, srcNode1, srcNode2);
net.connect(2000, swMain, dstNode);
// Create and host 1 VM per source node
VM vm1 = mo.newVM();
VM vm2 = mo.newVM();
ma.addRunningVM(vm1, srcNode1);
ma.addRunningVM(vm2, srcNode2);
// Attach CPU and Mem resource views and assign nodes capacity and VMs consumption
int mem_vm = 8, cpu_vm = 4, mem_src = 8, cpu_src = 4, mem_dst = 16, cpu_dst = 8;
ShareableResource rcMem = new ShareableResource("mem", 0, 0), rcCPU = new ShareableResource("cpu", 0, 0);
mo.attach(rcMem);
mo.attach(rcCPU);
// VMs
rcMem.setConsumption(vm1, mem_vm).setConsumption(vm2, mem_vm);
rcCPU.setConsumption(vm1, cpu_vm).setConsumption(vm2, cpu_vm);
// Nodes
rcMem.setCapacity(srcNode1, mem_src).setCapacity(srcNode2, mem_src).setCapacity(dstNode, mem_dst);
rcCPU.setCapacity(srcNode1, cpu_src).setCapacity(srcNode2, cpu_src).setCapacity(dstNode, cpu_dst);
// Set VM attributes 'memory used', 'hot dirty page size', 'hot dirty page duration' and 'cold dirty pages rate'
int vm_mu = 6000, vm_mds = 46, vm_mdd = 2;
double vm_cdr = 23.6;
// vm1 is an 'idle' VM (with no special memory activity) but still consumes 6 GiB of memory
mo.getAttributes().put(vm1, "memUsed", vm_mu);
// vm2 consumes 6 GiB memory and has a memory intensive workload equivalent to "stress --vm 1000 --bytes 50K"
// VM with a workload
mo.getAttributes().put(vm2, "memUsed", vm_mu);
mo.getAttributes().put(vm2, "hotDirtySize", vm_mds);
mo.getAttributes().put(vm2, "hotDirtyDuration", vm_mdd);
mo.getAttributes().put(vm2, "coldDirtyRate", vm_cdr);
// Create constraints
List<SatConstraint> cstrs = new ArrayList<>();
// Placement constraints, we want to shutdown the source nodes to force the migration to destination nodes
cstrs.add(new Offline(srcNode1));
cstrs.add(new Offline(srcNode2));
// SET A RELATIVE DEADLINE FOR THE MIGRATION OF VM2
// 90s
Deadline dead = new Deadline(vm2, "+00:01:30");
cstrs.add(dead);
// Solve it using the Min Max Time To Repair Migration scheduling oriented objective
ReconfigurationPlan p = new DefaultChocoScheduler().solve(mo, cstrs, new MinMTTRMig());
// It works because 30s is enough to fully migrate vm2
Assert.assertNotNull(p);
// Check if the deadline is respected
Action mig1 = p.getActions().stream().filter(s -> s instanceof MigrateVM && ((MigrateVM) s).getVM().equals(vm1)).findAny().get();
Assert.assertTrue(mig1.getEnd() <= 90);
// TODO: use methods on DeadlineChecker to verify that the action terminates at time ?
Assert.assertTrue(dead.isSatisfied(p));
}
Aggregations