use of org.platformlayer.ops.CommandEnvironment in project platformlayer by platformlayer.
the class NetworkBridge method addChildren.
@Override
protected void addChildren() throws OpsException {
// We would need proxy_arp if they were on the same IPV4 network
// Do we need proxy_ndp ???
// echo 1 > /proc/sys/net/ipv6/conf/eth0/proxy_ndp
addChild(SysctlSetting.build("net.ipv4.ip_forward", "1"));
addChild(SysctlSetting.build("net.ipv6.conf.all.forwarding", "1"));
addChild(SysctlSetting.build("net.ipv6.conf.all.proxy_ndp", "1"));
{
File scriptPath = new File("/etc/network/if-up.d/nat-for-bridge");
TemplatedFile nat = addChild(TemplatedFile.build(template, scriptPath));
nat.setFileMode("0755");
// Simulate an ifup run
Command command = Command.build(scriptPath);
CommandEnvironment env = new CommandEnvironment();
env.put("IFACE", template.getPublicInterface());
env.put("MODE", "start");
env.put("ADDRFAM", "inet");
command.setEnvironment(env);
nat.setUpdateAction(command);
}
}
use of org.platformlayer.ops.CommandEnvironment in project platformlayer by platformlayer.
the class AptPackageManager method update.
public void update(OpsTarget target, boolean force) throws OpsException {
if (!force && !isUpdateNeeded(target)) {
log.info("apt-get update not needed; won't update");
return;
}
log.info("Updating apt repositories");
CommandEnvironment commandEnvironment = buildEnvironmentWithProxy(target);
Command command = Command.build("apt-get --yes update");
command = command.setEnvironment(commandEnvironment).setTimeout(TimeSpan.TEN_MINUTES);
executeAptCommand(target, command);
flushCache(target);
}
use of org.platformlayer.ops.CommandEnvironment in project platformlayer by platformlayer.
the class AptPackageManager method upgrade.
public void upgrade(OpsTarget target) throws OpsException {
CommandEnvironment commandEnvironment = buildEnvironmentWithProxy(target);
Command command = Command.build("apt-get --yes upgrade");
target.executeCommand(command.setEnvironment(commandEnvironment).setTimeout(TimeSpan.TEN_MINUTES));
flushCache(target);
}
use of org.platformlayer.ops.CommandEnvironment in project platformlayer by platformlayer.
the class DiskImageController method buildImage.
public void buildImage(DiskImage image) throws OpsException, IOException {
// Assume the worst...
opsContext.setFailure(true);
MachineProvider targetCloud = cloudHelpers.getCloud(image.cloud);
DiskImageRecipe recipe = platformLayer.getItem(image.recipeId, DiskImageRecipe.class);
OperatingSystem operatingSystem = getRequestedOperatingSystem(recipe);
String kernelPackage = packageHelpers.getDefaultKernelPackage(operatingSystem);
String filesystem = "ext3";
ImageFormat imageFormat = EnumUtils.valueOfCaseInsensitive(ImageFormat.class, image.format);
boolean buildTar = imageFormat == ImageFormat.Tar;
// TODO: This logic is not intrinsically correct
// boolean supportCloudConfigDisk = imageFormat != ImageFormat.DiskQcow2;
boolean supportCloudConfigDisk = true;
boolean useConfigDriveSymlinks = false;
String configDriveLabel = "config";
List<String> packages = Lists.newArrayList();
packages.add("openssh-server");
// Needed for preseeding
packages.add("debconf-utils");
if (operatingSystem.getDistribution() == Distribution.Debian) {
packages.add("locales");
}
// We always want some basics available in our images
packages.add("curl");
String hostname = "openstack";
MachineCreationRequest request = new MachineCreationRequest();
SshKey sshKey = service.getSshKey();
// There are problems using LXC with debootstrap
request.hostPolicy = new HostPolicy();
request.hostPolicy.allowRunInContainer = false;
// Null means 'use bootstrap image'
request.recipeId = null;
request.sshPublicKey = sshKey.getKeyPair().getPublic();
request.sshPublicKeyName = service.getSshKeyName();
request.securityGroups = Lists.newArrayList();
String securityGroup = service.getSecurityGroupName();
request.securityGroups.add(securityGroup);
// We don't need a lot of memory to build a disk image (I think!)
request.minimumMemoryMB = 256;
Machine machine = cloud.createInstance(request, image.getKey());
opsContext.takeOwnership(machine);
machine = waitForAddress(machine);
OpsTarget target = machine.getTarget(sshKey);
waitForTarget(target);
File tempDir = target.createTempDir();
apt.update(target, true);
// We need to install curl first so we can detect the performance of our proxies
// Needed for proxy testing at least
apt.install(target, "curl");
CommandEnvironment httpProxyEnv = httpProxies.getHttpProxyEnvironment(target, Usage.SoftwarePackages, null);
// For now, we assume that this image doesn't have debootstrap pre-installed
apt.install(target, "debootstrap");
// For transferring the file to a direct image server
// debootstrap with LXC seems to have serious problems...
boolean supportLxc = false;
if (supportLxc) {
apt.install(target, "fakechroot", "fakeroot");
}
Command command;
File rootfsDir;
File imageFile;
File loopbackPartition = null;
if (!buildTar) {
apt.install(target, "mbr");
apt.install(target, "parted");
apt.install(target, "kpartx");
apt.install(target, "extlinux");
// Same with qemu-kvm
// (needed for qemu-img convert ... a lot of extra stuff for just the
// utils!)
// packageHelpers.getPackageFor("qemu-img", operatingSystem);
String qemuImgPackage = "qemu-utils";
apt.install(target, qemuImgPackage);
// Use local ephemeral storage...
imageFile = new File(tempDir, "image.raw");
command = Command.build("dd if=/dev/null bs=1M seek=8180 of={0}", imageFile);
target.executeCommand(command);
// Create partitions
target.executeCommand(Command.build("parted -s {0} mklabel msdos", imageFile));
target.executeCommand(Command.build("parted -s {0} mkpart primary 0% 100%", imageFile));
target.executeCommand(Command.build("parted -s {0} set 1 boot on", imageFile));
// Install Master Boot Record
target.executeCommand(Command.build("install-mbr {0}", imageFile));
// Mount the partitions
// Hopefully it’s loop0p1...
target.executeCommand(Command.build("modprobe dm-mod"));
// boolean isMounted = false;
//
// {
// ProcessExecution mountExecution = target.executeCommand(Command.build("mount", imageFile));
// String stdout = mountExecution.getStdOut();
// System.out.println(stdout);
//
// for (String line : Splitter.on('\n').split(stdout)) {
// line = line.trim();
// if (line.isEmpty()) {
// continue;
// }
//
// List<String> tokens = Lists.newArrayList(Splitter.on(' ').split(line));
// if (tokens.size() < 3) {
// throw new IllegalStateException("Cannot parse mount line: " + line);
// }
//
// String mountDir = tokens.get(2);
// if (mountDir.equals(mntDir.getAbsolutePath())) {
// isMounted = true;
// loopbackPartition = new File(tokens.get(0));
// break;
// }
// }
//
// // /dev/sda1 on / type ext4 (rw,errors=remount-ro)
// // tmpfs on /lib/init/rw type tmpfs (rw,nosuid,mode=0755)
// // proc on /proc type proc (rw,noexec,nosuid,nodev)
// // sysfs on /sys type sysfs (rw,noexec,nosuid,nodev)
// // udev on /dev type tmpfs (rw,mode=0755)
// // tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
// // devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=620)
// // /dev/mapper/loop0p1 on /tmp/8389210e66cd0df6/mnt type ext3 (rw)
// // proc on /tmp/8389210e66cd0df6/mnt/proc type proc (rw)
// }
//
// if (!isMounted)
{
ProcessExecution kpartxExecution = target.executeCommand(Command.build("kpartx -av {0}", imageFile));
String stdout = kpartxExecution.getStdOut();
List<String> tokens = Lists.newArrayList(Splitter.on(' ').split(stdout));
if (tokens.size() != 9) {
throw new IllegalStateException("Cannot parse kpartx stdout: " + stdout);
}
// add map loop6p1 (253:6): 0 16750592 linear /dev/loop6 2048
String partitionDevice = tokens.get(2);
if (!partitionDevice.startsWith("loop")) {
throw new IllegalStateException("kpartx output does not look like a partition: " + stdout);
}
loopbackPartition = new File("/dev/mapper/" + partitionDevice);
}
// Format filesystem
command = Command.build("yes | mkfs." + filesystem + " {0}", loopbackPartition);
command.setTimeout(TimeSpan.FIVE_MINUTES);
target.executeCommand(command);
// Get this onto disk now, so we don't delay later commands
target.executeCommand(Command.build("sync").setTimeout(TimeSpan.FIVE_MINUTES));
// Don’t force a check based on dates
target.executeCommand(Command.build("tune2fs -i 0 {0}", loopbackPartition).setTimeout(TimeSpan.FIVE_MINUTES));
// Get this onto disk now, so we don't delay later commands
target.executeCommand(Command.build("sync").setTimeout(TimeSpan.FIVE_MINUTES));
// Mount on mnt/
File mntDir = new File(tempDir, "mnt");
target.executeCommand("mkdir {0}", mntDir);
target.executeCommand(Command.build("mount {0} {1}", loopbackPartition, mntDir).setTimeout(TimeSpan.FIVE_MINUTES));
rootfsDir = mntDir;
} else {
rootfsDir = new File(tempDir, "rootfs");
imageFile = new File(tempDir, "image.tar.bz2");
}
if (buildTar) {
apt.install(target, "bzip2");
}
if (supportLxc) {
command = Command.build("fakechroot fakeroot debootstrap");
} else {
command = Command.build("debootstrap");
}
command.addLiteral("--verbose");
command.addLiteral("--resolve-deps");
if (supportLxc) {
// Lxc has problems with mounting etc; fakechroot avoids this
command.addLiteral("--variant=fakechroot");
// command.addLiteral("--variant=minbase");
}
command.addQuoted("--include=", Joiner.on(",").join(packages));
command.addLiteral(operatingSystem.getVersion());
command.addFile(rootfsDir);
// command.addQuoted(aptSource);
command.setEnvironment(httpProxyEnv);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
try {
target.executeCommand(command);
} catch (ProcessExecutionException e) {
String debootstrapLog = target.readTextFile(new File(rootfsDir, "debootstrap/debootstrap.log"));
log.warn("Debootstrap log: " + debootstrapLog);
throw new OpsException("Error running debootstrap", e);
}
// TODO: Switch to ChrootOpsTarget, so we can move this stuff into utility functions
ChrootOpsTarget chrootTarget = new ChrootOpsTarget(rootfsDir, new File("/tmp"), target);
FileUpload.upload(target, new File(rootfsDir, "etc/hostname"), hostname);
{
// Stop services being started in the chroot
String policy = ResourceUtils.get(getClass(), "usr.sbin.policy-rc.d");
File policyFile = new File(rootfsDir, "usr/sbin/policy-rc.d");
FileUpload.upload(target, policyFile, policy);
target.chmod(policyFile, "755");
}
target.executeCommand("mount -t proc proc {0}", new File(rootfsDir, "proc"));
apt.update(chrootTarget, true);
target.executeCommand("chroot {0} locale-gen en_US.utf8", rootfsDir);
target.executeCommand("chroot {0} /bin/bash -c \"DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales\"", rootfsDir);
if (!buildTar) {
{
File kernelImgConf = new File(rootfsDir, "etc/kernel-img.conf");
String preseedData = ResourceUtils.get(getClass(), "kernel-img.conf");
FileUpload.upload(target, kernelImgConf, preseedData);
}
{
File preseedTmpDir = target.createTempDir();
File preseedFile = new File(preseedTmpDir, "kernel.preseed");
String preseedData = ResourceUtils.get(getClass(), "kernel.preseed");
FileUpload.upload(target, preseedFile, preseedData);
target.executeCommand(Command.build("cat {0} | chroot {1} debconf-set-selections", preseedFile, rootfsDir));
apt.install(chrootTarget, kernelPackage);
}
}
preconfigurePackages(chrootTarget, recipe.configurePackage);
if (recipe.repositoryKey != null) {
addRepositoryKeys(chrootTarget, recipe.repositoryKey);
}
if (recipe.repository != null) {
addRepositories(chrootTarget, recipe.repository);
apt.update(chrootTarget, true);
}
if (recipe.addPackage != null) {
apt.install(chrootTarget, recipe.addPackage);
if (recipe.addPackage.contains("jenkins")) {
// It looks like jenkins doesn't honor policy-rc.d (?)
// TODO: Fix this monstrosity...
log.warn("Hard-coding service stop after jenkins installation");
target.executeCommand(Command.build("chroot {0} /etc/init.d/jenkins stop", rootfsDir));
}
}
apt.upgrade(chrootTarget);
apt.clean(chrootTarget);
if (!buildTar) {
String uuid;
{
ProcessExecution uuidExecution = target.executeCommand("blkid -o value -s UUID {0}", loopbackPartition);
uuid = uuidExecution.getStdOut().trim();
}
// Set up /etc/fstab
String fstab = "# /etc/fstab: static file system information.\n";
// TODO: Swap
fstab += "proc\t/proc\tproc\tnodev,noexec,nosuid\t0\t0\n";
// fstab += "/dev/sda1\t/\t" + filesystem +
// "\terrors=remount-ro\t0\t1\n";
fstab += String.format("UUID=%s\t/\t%s\terrors=remount-ro\t0\t1\n", uuid, filesystem);
if (supportCloudConfigDisk) {
if (useConfigDriveSymlinks) {
// Use configuration from cloud_config mount
target.mkdir(new File(rootfsDir, "media/config"));
fstab += "/dev/disk/by-label/" + configDriveLabel + "\t/media/config\tudf,iso9660\tro\t0\t0\n";
}
}
FileUpload.upload(target, new File(rootfsDir, "etc/fstab"), fstab);
log.info("fstab = " + fstab);
// Set up extlinux
{
ProcessExecution kernelExecution = target.executeCommand("chroot {0} find boot/ -name \"vmlinuz-*\"", rootfsDir);
List<String> kernels = Lists.newArrayList();
for (String kernel : kernelExecution.getStdOut().split("\n")) {
kernel = kernel.trim();
if (kernel.isEmpty()) {
continue;
}
kernels.add(kernel);
}
if (kernels.size() > 1) {
throw new IllegalStateException("Multiple kernels found");
} else if (kernels.size() != 1) {
throw new IllegalStateException("No kernels found");
}
ProcessExecution initrdExecution = target.executeCommand("chroot {0} find boot/ -name \"initrd*\"", rootfsDir);
List<String> initrds = Lists.newArrayList();
for (String initrd : initrdExecution.getStdOut().split("\n")) {
initrd = initrd.trim();
if (initrd.isEmpty()) {
continue;
}
if (initrd.endsWith(".bak")) {
continue;
}
initrds.add(initrd);
}
if (initrds.size() > 1) {
throw new IllegalStateException("Multiple initrds found");
} else if (initrds.size() != 1) {
throw new IllegalStateException("No initrds found");
}
String conf = String.format("default linux\ntimeout 1\n\nlabel linux\nkernel %s\nappend initrd=%s root=UUID=%s ro quiet", kernels.get(0), initrds.get(0), uuid);
FileUpload.upload(target, new File(rootfsDir, "extlinux.conf"), conf);
log.info("extlinux.conf = " + conf);
}
target.executeCommand(Command.build("extlinux --install {0}", rootfsDir).setTimeout(TimeSpan.FIVE_MINUTES));
}
if (supportCloudConfigDisk) {
if (useConfigDriveSymlinks) {
target.rm(new File(rootfsDir, "etc/network/interfaces"));
target.executeCommand("ln -s /media/config/etc/network/interfaces {0}", new File(rootfsDir, "etc/network/interfaces"));
target.mkdir(new File(rootfsDir, "root/.ssh"));
target.executeCommand("ln -s /media/config/root/.ssh/authorized_keys {0}", new File(rootfsDir, "root/.ssh/authorized_keys"));
} else {
String initScript = ResourceUtils.get(getClass(), "openstack-config");
File initScriptFile = new File(rootfsDir, "etc/init.d/openstack-config");
FileUpload.upload(target, initScriptFile, initScript);
target.executeCommand("chmod +x {0}", initScriptFile);
chrootTarget.executeCommand("/usr/sbin/update-rc.d openstack-config defaults");
}
}
{
// Remove policy file
File policyFile = new File(rootfsDir, "usr/sbin/policy-rc.d");
target.rm(policyFile);
}
target.executeCommand("sync");
target.executeCommand("umount {0}", new File(rootfsDir, "proc"));
if (!buildTar) {
target.executeCommand("sync");
target.executeCommand("umount {0}", rootfsDir);
target.executeCommand("sync");
target.executeCommand("kpartx -d {0}", imageFile);
target.executeCommand("sync");
}
if (buildTar) {
Command compress = Command.build("cd {0}; tar jcf {1} .", rootfsDir, imageFile);
target.executeCommand(compress.setTimeout(TimeSpan.FIFTEEN_MINUTES));
}
FilesystemInfo imageInfo = target.getFilesystemInfoFile(imageFile);
File uploadImageFile;
if (!buildTar) {
boolean isQcow2 = imageFormat == ImageFormat.DiskQcow2;
if (isQcow2) {
// We create the image as a raw image (making use of sparse files)
// and then convert it to qcow2. This is a little less efficient, but
// has a few advantages...
// 1) We can support different formats
// 2) The final image is defragmented
// 3) Mounting a qcow2 image (or other image formats) is tricky vs
// loopback mount
uploadImageFile = new File(imageFile.getParentFile(), "image.qcow2");
command = Command.build("qemu-img convert -f raw -O qcow2 {0} {1}", imageFile, uploadImageFile);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
target.executeCommand(command);
} else {
uploadImageFile = new File(imageFile.getParentFile(), "image.raw.gz");
command = Command.build("gzip -c --best {0} > {1}", imageFile, uploadImageFile);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
target.executeCommand(command);
}
} else {
uploadImageFile = imageFile;
}
String imageId;
// Upload & tag the image with the recipe ID
{
Tags tags = new Tags();
tags.add(Tag.buildParentTag(recipe.getKey()));
tags.add(imageFormat.toTag());
imageId = cloud.getImageStore(targetCloud).uploadImage(target, tags, uploadImageFile, imageInfo.size);
}
// Tag the recipe with the image ID
{
TagChanges tagChanges = new TagChanges();
tagChanges.addTags.add(Tag.IMAGE_ID.build(imageId));
platformLayer.changeTags(image.getKey(), tagChanges);
}
// Our pessimism proved unfounded...
opsContext.setFailure(false);
}
use of org.platformlayer.ops.CommandEnvironment in project platformlayer by platformlayer.
the class DownloadFileByHash method uploadFile.
@Override
protected void uploadFile(OpsTarget target, File remoteFilePath) throws IOException, OpsException {
target.mkdir(remoteFilePath.getParentFile());
Md5Hash resolved = getResolved(target);
CasStoreObject casObject;
CasStoreMap casStoreMap = cas.getCasStoreMap(target);
try {
casObject = casStoreMap.findArtifact(new OpsCasTarget(target), resolved);
} catch (Exception e) {
throw new OpsException("Error while resolving artifact:" + getHumanName(), e);
}
if (url != null && casObject == null) {
target.mkdir(remoteFilePath.getParentFile());
CurlRequest curlRequest = new CurlRequest(url);
curlRequest.bareRequest = true;
CommandEnvironment commandEnvironment = httpProxies.getHttpProxyEnvironment(target, Usage.General, url);
Command curlCommand = curlRequest.toCommand();
curlCommand.addLiteral(">");
curlCommand.addFile(remoteFilePath);
curlCommand.setEnvironment(commandEnvironment);
curlCommand.setTimeout(TimeSpan.FIVE_MINUTES);
// TODO: Can we cache into CAS instead??
log.info("Not found in CAS system; downloading directly: " + url);
target.executeCommand(curlCommand);
} else {
if (casObject == null) {
throw new OpsException("Unable to find artifact: " + getHumanName());
}
log.info("Doing a CAS copy from " + casObject + " to target");
cas.copyObject(casStoreMap, casObject, new OpsCasTarget(target), remoteFilePath, true);
}
}
Aggregations