use of org.btrplace.model.constraint.Offline in project scheduler by btrplace.
the class UCC15 method decommissioning_40gb.
public SolvingStatistics decommissioning_40gb() throws SchedulerException {
// Set nb of nodes and vms
int nbNodesRack = 24;
int nbSrcNodes = nbNodesRack * 8;
int nbDstNodes = nbNodesRack * 4;
int nbVMs = nbSrcNodes * 2;
// Set mem + cpu for VMs and Nodes
int memVM = 4, cpuVM = 1;
int memSrcNode = 16, cpuSrcNode = 4;
int memDstNode = 16, cpuDstNode = 4;
// Set memoryUsed and dirtyRate (for all VMs)
int tpl1MemUsed = 2000, tpl1MaxDirtySize = 5, tpl1MaxDirtyDuration = 3;
// idle vm
double tpl1DirtyRate = 0;
int tpl2MemUsed = 4000, tpl2MaxDirtySize = 96, tpl2MaxDirtyDuration = 2;
// stress --vm 1000 --bytes 70K
double tpl2DirtyRate = 3;
int tpl3MemUsed = 2000, tpl3MaxDirtySize = 96, tpl3MaxDirtyDuration = 2;
// stress --vm 1000 --bytes 70K
double tpl3DirtyRate = 3;
int tpl4MemUsed = 4000, tpl4MaxDirtySize = 5, tpl4MaxDirtyDuration = 3;
// idle vm
double tpl4DirtyRate = 0;
// New default model
Model mo = new DefaultModel();
Mapping ma = mo.getMapping();
// Create online source nodes and offline destination nodes
List<Node> srcNodes = new ArrayList<>(), dstNodes = new ArrayList<>();
for (int i = 0; i < nbSrcNodes; i++) {
srcNodes.add(mo.newNode());
ma.addOnlineNode(srcNodes.get(i));
}
for (int i = 0; i < nbDstNodes; i++) {
dstNodes.add(mo.newNode());
ma.addOfflineNode(dstNodes.get(i));
}
// Set boot and shutdown time
for (Node n : dstNodes) {
mo.getAttributes().put(n, "boot", 120);
/*~2 minutes to boot*/
}
for (Node n : srcNodes) {
mo.getAttributes().put(n, "shutdown", 17);
/*~30 seconds to shutdown*/
}
// Create running VMs on src nodes
List<VM> vms = new ArrayList<>();
VM v;
for (int i = 0; i < nbSrcNodes; i++) {
if (i % 2 == 0) {
v = mo.newVM();
vms.add(v);
mo.getAttributes().put(v, "memUsed", tpl1MemUsed);
mo.getAttributes().put(v, "coldDirtyRate", tpl1DirtyRate);
mo.getAttributes().put(v, "hotDirtySize", tpl1MaxDirtySize);
mo.getAttributes().put(v, "hotDirtyDuration", tpl1MaxDirtyDuration);
ma.addRunningVM(v, srcNodes.get(i));
v = mo.newVM();
vms.add(v);
mo.getAttributes().put(v, "memUsed", tpl2MemUsed);
mo.getAttributes().put(v, "coldDirtyRate", tpl2DirtyRate);
mo.getAttributes().put(v, "hotDirtySize", tpl2MaxDirtySize);
mo.getAttributes().put(v, "hotDirtyDuration", tpl2MaxDirtyDuration);
ma.addRunningVM(v, srcNodes.get(i));
} else {
v = mo.newVM();
vms.add(v);
mo.getAttributes().put(v, "memUsed", tpl3MemUsed);
mo.getAttributes().put(v, "coldDirtyRate", tpl3DirtyRate);
mo.getAttributes().put(v, "hotDirtySize", tpl3MaxDirtySize);
mo.getAttributes().put(v, "hotDirtyDuration", tpl3MaxDirtyDuration);
ma.addRunningVM(v, srcNodes.get(i));
v = mo.newVM();
vms.add(v);
mo.getAttributes().put(v, "memUsed", tpl4MemUsed);
mo.getAttributes().put(v, "coldDirtyRate", tpl4DirtyRate);
mo.getAttributes().put(v, "hotDirtySize", tpl4MaxDirtySize);
mo.getAttributes().put(v, "hotDirtyDuration", tpl4MaxDirtyDuration);
ma.addRunningVM(v, srcNodes.get(i));
}
}
// Add resource decorators
ShareableResource rcMem = new ShareableResource("mem", 0, 0);
ShareableResource rcCPU = new ShareableResource("cpu", 0, 0);
for (Node n : srcNodes) {
rcMem.setCapacity(n, memSrcNode);
rcCPU.setCapacity(n, cpuSrcNode);
}
for (Node n : dstNodes) {
rcMem.setCapacity(n, memDstNode);
rcCPU.setCapacity(n, cpuDstNode);
}
for (VM vm : vms) {
rcMem.setConsumption(vm, memVM);
rcCPU.setConsumption(vm, cpuVM);
}
mo.attach(rcMem);
mo.attach(rcCPU);
// Add a NetworkView view
Network net = new Network();
Switch swSrcRack1 = net.newSwitch();
Switch swSrcRack2 = net.newSwitch();
Switch swSrcRack3 = net.newSwitch();
Switch swSrcRack4 = net.newSwitch();
Switch swSrcRack5 = net.newSwitch();
Switch swSrcRack6 = net.newSwitch();
Switch swSrcRack7 = net.newSwitch();
Switch swSrcRack8 = net.newSwitch();
Switch swDstRack1 = net.newSwitch();
Switch swDstRack2 = net.newSwitch();
Switch swDstRack3 = net.newSwitch();
Switch swDstRack4 = net.newSwitch();
Switch swMain = net.newSwitch();
net.connect(1000, swSrcRack1, srcNodes.subList(0, nbNodesRack));
net.connect(1000, swSrcRack2, srcNodes.subList(nbNodesRack, nbNodesRack * 2));
net.connect(1000, swSrcRack3, srcNodes.subList(nbNodesRack * 2, nbNodesRack * 3));
net.connect(1000, swSrcRack4, srcNodes.subList(nbNodesRack * 3, nbNodesRack * 4));
net.connect(1000, swSrcRack5, srcNodes.subList(nbNodesRack * 4, nbNodesRack * 5));
net.connect(1000, swSrcRack6, srcNodes.subList(nbNodesRack * 5, nbNodesRack * 6));
net.connect(1000, swSrcRack7, srcNodes.subList(nbNodesRack * 6, nbNodesRack * 7));
net.connect(1000, swSrcRack8, srcNodes.subList(nbNodesRack * 7, nbNodesRack * 8));
net.connect(1000, swDstRack1, dstNodes.subList(0, nbNodesRack));
net.connect(1000, swDstRack2, dstNodes.subList(nbNodesRack, nbNodesRack * 2));
net.connect(1000, swDstRack3, dstNodes.subList(nbNodesRack * 2, nbNodesRack * 3));
net.connect(1000, swDstRack4, dstNodes.subList(nbNodesRack * 3, nbNodesRack * 4));
net.connect(40000, swMain, swSrcRack1, swSrcRack2, swSrcRack3, swSrcRack4, swSrcRack5, swSrcRack6, swSrcRack7, swSrcRack8, swDstRack1, swDstRack2, swDstRack3, swDstRack4);
mo.attach(net);
// net.generateDot(path + "topology.dot", false);
// Set parameters
DefaultParameters ps = new DefaultParameters();
ps.setVerbosity(0);
ps.setTimeLimit(60);
// ps.setMaxEnd(600);
ps.doOptimize(false);
// Migrate all VMs to destination nodes
List<SatConstraint> cstrs = new ArrayList<>();
int vm_num = 0;
for (int i = 0; i < nbDstNodes; i++) {
cstrs.add(new Fence(vms.get(vm_num), Collections.singleton(dstNodes.get(i))));
cstrs.add(new Fence(vms.get(vm_num + 1), Collections.singleton(dstNodes.get(i))));
cstrs.add(new Fence(vms.get(nbVMs - 1 - vm_num), Collections.singleton(dstNodes.get(i))));
cstrs.add(new Fence(vms.get(nbVMs - 2 - vm_num), Collections.singleton(dstNodes.get(i))));
vm_num += 2;
}
// Shutdown source nodes
cstrs.addAll(srcNodes.stream().map(Offline::new).collect(Collectors.toList()));
// Set a custom objective
DefaultChocoScheduler sc = new DefaultChocoScheduler(ps);
Instance i = new Instance(mo, cstrs, new MinMTTRMig());
ReconfigurationPlan p;
try {
p = sc.solve(i);
Assert.assertNotNull(p);
} catch (Exception e) {
e.printStackTrace();
}
// finally {
return sc.getStatistics();
// }
}
use of org.btrplace.model.constraint.Offline in project scheduler by btrplace.
the class IssuesTest method testIssue89.
@Test
public static void testIssue89() throws Exception {
final Model model = new DefaultModel();
final Mapping mapping = model.getMapping();
final Node node0 = model.newNode(0);
final int[] ids0 = { 1, 45, 43, 40, 39, 38, 82, 80, 79, 78, 30, 75, 18, 16, 15, 14, 60, 9, 55, 54, 50, 48 };
final Node node1 = model.newNode(1);
final int[] ids1 = { 84, 83, 81, 77, 73, 71, 64, 63, 62, 57, 53, 52, 47, 46, 44, 41, 34, 31, 28, 25, 13, 8, 6, 4, 3, 0 };
final Node node2 = model.newNode(2);
final int[] ids2 = { 21, 67, 42, 36, 35, 33, 76, 74, 23, 69, 68, 20, 61, 12, 11, 10, 5, 51 };
final Node node3 = model.newNode(3);
final int[] ids3 = { 2, 66, 86, 85, 37, 32, 29, 27, 26, 72, 24, 70, 22, 19, 65, 17, 59, 58, 56, 7, 49 };
final ShareableResource cpu = new ShareableResource("cpu", 45, 1);
final ShareableResource mem = new ShareableResource("mem", 90, 2);
populateNodeVm(model, mapping, node0, ids0);
populateNodeVm(model, mapping, node1, ids1);
populateNodeVm(model, mapping, node2, ids2);
populateNodeVm(model, mapping, node3, ids3);
model.attach(cpu);
model.attach(mem);
final Collection<SatConstraint> satConstraints = new ArrayList<>();
// We want to cause Node 3 to go offline to see how the VMs hosted on that
// node will get rebalanced.
satConstraints.add(new Offline(node3));
final OptConstraint optConstraint = new MinMTTR();
DefaultChocoScheduler scheduler = new DefaultChocoScheduler();
scheduler.doOptimize(false);
scheduler.doRepair(true);
scheduler.setTimeLimit(60000);
ReconfigurationPlan plan = scheduler.solve(model, satConstraints, optConstraint);
System.out.println(scheduler.getStatistics());
Assert.assertTrue(plan.isApplyable());
satConstraints.clear();
// This is somewhat similar to making Node 3 going offline by ensuring that
// all VMs can no longer get hosted on that node.
satConstraints.addAll(mapping.getAllVMs().stream().map(vm -> new Ban(vm, Collections.singletonList(node3))).collect(Collectors.toList()));
scheduler = new DefaultChocoScheduler();
scheduler.doOptimize(false);
scheduler.doRepair(true);
plan = scheduler.solve(model, satConstraints, optConstraint);
Assert.assertTrue(plan.isApplyable());
}
use of org.btrplace.model.constraint.Offline in project scheduler by btrplace.
the class IssuesTest method test16b.
/**
* Unit test derived from Issue 16.
*
* @throws org.btrplace.scheduler.SchedulerException
*/
@Test
public void test16b() throws SchedulerException {
Model model = new DefaultModel();
Node n1 = model.newNode();
Node n2 = model.newNode();
Node n3 = model.newNode();
Node n4 = model.newNode();
VM vm1 = model.newVM();
VM vm2 = model.newVM();
VM vm3 = model.newVM();
VM vm4 = model.newVM();
VM vm5 = model.newVM();
VM vm6 = model.newVM();
model.getMapping().on(n1, n2, n3, n4).run(n1, vm1, vm2).run(n2, vm3, vm4).run(n3, vm5, vm6);
Set<SatConstraint> ctrsC = new HashSet<>();
Set<VM> vms1 = new HashSet<>(Arrays.asList(vm1, vm3, vm5));
Set<VM> vms2 = new HashSet<>(Arrays.asList(vm2, vm4, vm6));
ctrsC.add(new Spread(vms1));
ctrsC.add(new Spread(vms2));
ctrsC.add(new Fence(vm3, Collections.singleton(n1)));
Offline off = new Offline(n2);
ctrsC.add(off);
ChocoScheduler cra = new DefaultChocoScheduler();
ReconfigurationPlan dp = cra.solve(model, ctrsC);
Assert.assertNotNull(dp);
}
use of org.btrplace.model.constraint.Offline in project scheduler by btrplace.
the class OfflineConverterTest method testViables.
@Test
public void testViables() throws JSONConverterException {
Model mo = new DefaultModel();
ConstraintsConverter conv = new ConstraintsConverter();
conv.register(new OfflineConverter());
Offline d = new Offline(mo.newNode());
Assert.assertEquals(conv.fromJSON(mo, conv.toJSON(d)), d);
System.out.println(conv.toJSON(d));
}
use of org.btrplace.model.constraint.Offline in project scheduler by btrplace.
the class COfflineTest method testUnsolvableProblem.
@Test
public void testUnsolvableProblem() throws SchedulerException {
Model mo = new DefaultModel();
VM vm1 = mo.newVM();
Node n1 = mo.newNode();
mo.getMapping().on(n1).run(n1, vm1);
ChocoScheduler cra = new DefaultChocoScheduler();
ReconfigurationPlan plan = cra.solve(mo, Collections.singleton(new Offline(n1)));
Assert.assertNull(plan);
SolvingStatistics stats = cra.getStatistics();
Assert.assertTrue(stats.completed());
}
Aggregations