use of org.platformlayer.images.model.DiskImageRecipe in project platformlayer by platformlayer.
the class InstanceBuilder method buildPersistentInstanceTemplate.
private PersistentInstance buildPersistentInstanceTemplate() throws OpsException {
SshKey sshKey = service.getSshKey();
String securityGroup = service.getSecurityGroupName();
DiskImageRecipe recipeTemplate = diskImageRecipe.get();
if (recipeTemplate.getKey() == null) {
// TODO: Something nicer than a UUID
String recipeId = UUID.randomUUID().toString();
recipeTemplate.setKey(PlatformLayerKey.fromId(recipeId));
}
DiskImageRecipe recipe = imageFactory.getOrCreateRecipe(recipeTemplate);
PersistentInstance persistentInstanceTemplate = new PersistentInstance();
persistentInstanceTemplate.setDnsName(dnsName);
persistentInstanceTemplate.setSshPublicKey(SshKeys.serialize(sshKey.getKeyPair().getPublic()));
persistentInstanceTemplate.setSecurityGroup(securityGroup);
persistentInstanceTemplate.setMinimumRam(minimumMemoryMb);
persistentInstanceTemplate.setCloud(cloud);
persistentInstanceTemplate.setHostPolicy(hostPolicy);
persistentInstanceTemplate.setRecipe(recipe.getKey());
String id = dnsName;
if (Strings.isNullOrEmpty(id)) {
id = UUID.randomUUID().toString();
}
persistentInstanceTemplate.setKey(PlatformLayerKey.fromId(id));
for (int publicPort : publicPorts) {
persistentInstanceTemplate.getPublicPorts().add(publicPort);
}
return persistentInstanceTemplate;
}
use of org.platformlayer.images.model.DiskImageRecipe in project platformlayer by platformlayer.
the class OpenstackCloudContext method createInstance.
public Server createInstance(OpenstackCloud cloud, String serverName, MachineCreationRequest request) throws OpsException {
OpenstackComputeClient computeClient = getComputeClient(cloud);
try {
Image foundImage = null;
CloudBehaviours cloudBehaviours = new CloudBehaviours(cloud);
if (!cloudBehaviours.canUploadImages()) {
// For now, we presume this is the HP cloud and hard-code the name
// if (!cloudBehaviours.isHpCloud()) {
// throw new UnsupportedOperationException();
// }
DiskImageRecipe recipe = null;
if (request.recipeId != null) {
recipe = platformLayerClient.getItem(request.recipeId, DiskImageRecipe.class);
}
OperatingSystemRecipe operatingSystem = null;
if (recipe != null) {
operatingSystem = recipe.getOperatingSystem();
}
log.info("Listing images to pick best image");
Iterable<Image> images = computeClient.root().images().list();
if (cloudBehaviours.isHpCloud()) {
// TODO: We need a better solution here!!
Set<String> imageNames = Sets.newHashSet("Debian Squeeze 6.0.3 Server 64-bit 20120123");
log.warn("Hard coding image name (presuming HP cloud)");
// TODO: Match OS
for (Image image : images) {
if (imageNames.contains(image.getName())) {
foundImage = image;
break;
}
}
} else if (cloudBehaviours.isRackspaceCloud()) {
if (operatingSystem == null) {
operatingSystem = new OperatingSystemRecipe();
operatingSystem.setDistribution("debian");
operatingSystem.setVersion("squeeze");
}
for (Image image : images) {
boolean matchesDistribution = false;
boolean matchesVersion = false;
for (Image.ImageMetadata.ImageMetadataItem item : image.getMetadata()) {
if (item.getKey().equals("os_distro")) {
if (operatingSystem != null && operatingSystem.getDistribution() != null) {
if (Comparisons.equalsIgnoreCase(operatingSystem.getDistribution(), item.getValue())) {
matchesDistribution = true;
}
}
}
if (item.getKey().equals("os_version")) {
if (operatingSystem != null && operatingSystem.getVersion() != null) {
if (Comparisons.equalsIgnoreCase(operatingSystem.getVersion(), item.getValue())) {
matchesVersion = true;
} else if (Comparisons.equalsIgnoreCase(operatingSystem.getDistribution(), "debian")) {
if (Comparisons.equalsIgnoreCase(operatingSystem.getVersion(), "squeeze") && Comparisons.equalsIgnoreCase(item.getValue(), "6")) {
matchesVersion = true;
} else {
matchesVersion = false;
}
} else if (Comparisons.equalsIgnoreCase(operatingSystem.getDistribution(), "ubuntu")) {
if (Comparisons.equalsIgnoreCase(operatingSystem.getVersion(), "lucid") && Comparisons.equalsIgnoreCase(item.getValue(), "10.04LTS")) {
matchesVersion = true;
} else {
matchesVersion = false;
}
} else {
matchesVersion = false;
}
}
}
}
if (matchesDistribution && matchesVersion) {
foundImage = image;
break;
}
}
} else {
for (Image image : images) {
boolean isMatch = false;
for (Image.ImageMetadata.ImageMetadataItem item : image.getMetadata()) {
if (item.getKey().equals(Tag.IMAGE_OS_DISTRIBUTION)) {
if (operatingSystem != null && operatingSystem.getDistribution() != null) {
if (!Comparisons.equalsIgnoreCase(operatingSystem.getDistribution(), item.getValue())) {
isMatch = false;
}
}
}
if (item.getKey().equals(Tag.IMAGE_OS_VERSION)) {
if (operatingSystem != null && operatingSystem.getVersion() != null) {
if (!Comparisons.equalsIgnoreCase(operatingSystem.getVersion(), item.getValue())) {
isMatch = false;
}
}
}
}
if (isMatch) {
foundImage = image;
break;
}
}
}
if (foundImage == null) {
throw new IllegalArgumentException("Could not find image");
}
} else {
List<ImageFormat> formats = Collections.singletonList(ImageFormat.DiskQcow2);
CloudImage image = imageFactory.getOrCreateImageId(cloud, formats, request.recipeId);
String imageId = image.getId();
log.info("Getting image details for image: " + imageId);
foundImage = computeClient.root().images().image(imageId).show();
if (foundImage == null) {
throw new IllegalArgumentException("Could not find image: " + imageId);
}
}
SecurityGroup createdSecurityGroup = null;
if (cloudBehaviours.supportsSecurityGroups()) {
SecurityGroup createTemplate = new SecurityGroup();
createTemplate.setName(SECURITY_GROUP_PREFIX + serverName);
createTemplate.setDescription("Security group for instance: " + serverName);
try {
log.info("Creating security group: " + createTemplate.getName());
createdSecurityGroup = computeClient.root().securityGroups().create(createTemplate);
} catch (OpenstackException e) {
for (SecurityGroup candidate : computeClient.root().securityGroups().list()) {
if (Objects.equal(candidate.getName(), createTemplate.getName())) {
createdSecurityGroup = candidate;
break;
}
}
if (createdSecurityGroup != null) {
// Ignore
log.warn("Ignoring 'security group already exists' error: " + e.getMessage());
} else {
throw new OpsException("Error creating security group", e);
}
}
{
CreateSecurityGroupRuleRequest newRule = new CreateSecurityGroupRuleRequest();
newRule.setCidr("0.0.0.0/0");
newRule.setFromPort(22);
newRule.setToPort(22);
newRule.setIpProtocol("tcp");
newRule.setParentGroupId(createdSecurityGroup.getId());
try {
log.info("Creating security group rule for port: " + newRule.getToPort());
SecurityGroupRule createdRule = computeClient.root().securityGroupRules().create(newRule);
} catch (OpenstackException e) {
String message = e.getMessage();
if (message != null && message.contains("This rule already exists")) {
log.warn("Ignoring 'rule already exists': " + e.getMessage());
} else {
throw new OpsException("Error creating security group access", e);
}
}
}
}
AsyncServerOperation createServerOperation;
{
ServerForCreate create = new ServerForCreate();
create.setName(serverName);
if (request.sshPublicKey != null) {
if (cloudBehaviours.supportsPublicKeys()) {
OpenstackCloudHelpers cloudHelpers = new OpenstackCloudHelpers();
KeyPair keyPair = cloudHelpers.ensurePublicKeyUploaded(computeClient, request.sshPublicKeyName, request.sshPublicKey);
create.setKeyName(keyPair.getName());
} else if (cloudBehaviours.supportsFileInjection()) {
String fileContents = SshKeys.serialize(request.sshPublicKey);
create.addUploadFile("/root/.ssh/authorized_keys", Utf8.getBytes(fileContents));
} else {
throw new OpsException("No supported SSH key mechanism on cloud");
}
}
create.setImageRef(foundImage.getId());
Flavor flavor = getClosestInstanceType(computeClient, request);
if (flavor == null) {
throw new OpsException("Cannot determine instance type for request");
}
create.setFlavorRef(flavor.getId());
if (request.securityGroups != null) {
// TODO: Reimplement if needed
throw new UnsupportedOperationException();
}
if (createdSecurityGroup != null) {
ServerForCreate.SecurityGroup serverSecurityGroup = new ServerForCreate.SecurityGroup();
serverSecurityGroup.setName(createdSecurityGroup.getName());
create.getSecurityGroups().add(serverSecurityGroup);
}
create.setConfigDrive(cloudBehaviours.useConfigDrive());
log.info("Launching new server: " + create.getName());
createServerOperation = computeClient.createServer(create);
}
log.info("Waiting for server to be ready");
Server server = createServerOperation.waitComplete();
Server instanceInfo = null;
String stateName = null;
while (true) {
instanceInfo = getInstanceInfo(computeClient, server.getId());
stateName = instanceInfo.getStatus();
log.info("Instance state: " + stateName);
//
if (stateName.equals("BUILD")) {
break;
}
if (stateName.equals("ACTIVE")) {
break;
}
Thread.sleep(1000);
}
// Even if the machine is in 'error' state, we still want to associate it with us
if (request.tags != null) {
Server newServerInfo = new Server();
Metadata metadata = new Metadata();
for (Tag tag : request.tags) {
Metadata.Item meta = new Metadata.Item();
meta.setKey(tag.getKey());
meta.setValue(tag.getValue());
metadata.getItems().add(meta);
}
newServerInfo.setMetadata(metadata);
log.info("Tagging server: " + server.getId());
computeClient.root().servers().server(server.getId()).update(newServerInfo);
}
return server;
} catch (InterruptedException e) {
ExceptionUtils.handleInterrupted(e);
throw new OpsException("Error building server", e);
} catch (OpenstackException e) {
throw new OpsException("Error building server", e);
}
}
use of org.platformlayer.images.model.DiskImageRecipe in project platformlayer by platformlayer.
the class GoogleComputeClient method createInstance.
public Instance createInstance(GoogleCloud cloud, MachineCreationRequest request, PublicKey sshPublicKey) throws OpsException {
try {
Image foundImage = null;
{
DiskImageRecipe recipe = null;
if (request.recipeId != null) {
recipe = platformLayerClient.getItem(request.recipeId, DiskImageRecipe.class);
}
OperatingSystemRecipe operatingSystem = null;
if (recipe != null) {
operatingSystem = recipe.getOperatingSystem();
}
log.info("Listing images to pick best image");
Iterable<Image> images = listImages(PROJECTID_GOOGLE);
// TODO: We need a better solution here!!
log.warn("Hard coding image names");
Set<String> imageNames = Sets.newHashSet("ubuntu-12-04-v20120621");
for (Image image : images) {
if (imageNames.contains(image.getName())) {
foundImage = image;
break;
}
}
if (foundImage == null) {
throw new IllegalArgumentException("Could not find image");
}
}
// GCE requires that the name comply with RFC1035, which I think means a valid DNS
// For now, just use a UUID, with a pl- prefix so it doesn't start with a number
// TODO: Fix this!
String instanceName = "pl-" + UUID.randomUUID().toString();
Operation createServerOperation;
{
Instance create = new Instance();
create.setName(instanceName);
create.setZone(buildZoneUrl(projectId, ZONE_US_CENTRAL1_A));
{
NetworkInterface networkInterface = new NetworkInterface();
networkInterface.setNetwork(buildNetworkUrl(projectId, "default"));
AccessConfig networkAccessConfig = new AccessConfig();
networkAccessConfig.setType("ONE_TO_ONE_NAT");
networkInterface.setAccessConfigs(Lists.newArrayList(networkAccessConfig));
create.setNetworkInterfaces(Lists.newArrayList(networkInterface));
}
Metadata metadata = new Metadata();
metadata.setItems(Lists.<Items>newArrayList());
create.setMetadata(metadata);
if (request.tags != null) {
for (Tag tag : request.tags) {
Metadata.Items meta = new Metadata.Items();
meta.setKey(tag.getKey());
meta.setValue(tag.getValue());
metadata.getItems().add(meta);
}
}
if (request.sshPublicKey != null) {
Metadata.Items meta = new Metadata.Items();
meta.setKey("sshKeys");
meta.setValue(USER_NAME + ":" + OpenSshUtils.serialize(sshPublicKey));
metadata.getItems().add(meta);
}
create.setImage(foundImage.getSelfLink());
MachineType flavor = getClosestInstanceType(request);
if (flavor == null) {
throw new OpsException("Cannot determine machine type for request");
}
create.setMachineType(flavor.getSelfLink());
if (request.securityGroups != null) {
// TODO: Reimplement if needed
throw new UnsupportedOperationException();
}
// if (createdSecurityGroup != null) {
// ServerForCreate.SecurityGroup serverSecurityGroup = new ServerForCreate.SecurityGroup();
// serverSecurityGroup.setName(createdSecurityGroup.getName());
// create.getSecurityGroups().add(serverSecurityGroup);
// }
// create.setConfigDrive(cloudBehaviours.useConfigDrive());
log.info("Launching new server: " + instanceName);
try {
createServerOperation = compute.instances().insert(projectId, create).execute();
} catch (IOException e) {
throw new OpsException("Error launching new instance", e);
}
}
log.info("Waiting for server to be ready");
createServerOperation = waitComplete(createServerOperation, 10, TimeUnit.MINUTES);
Instance created;
InstanceState state = null;
while (true) {
created = findInstanceByName(instanceName);
state = InstanceState.get(created);
log.info("Instance state: " + state);
if (state.isRunning()) {
break;
}
Thread.sleep(1000);
}
return created;
} catch (InterruptedException e) {
ExceptionUtils.handleInterrupted(e);
throw new OpsException("Error building server", e);
} catch (TimeoutException e) {
throw new OpsException("Timeout waiting for server build", e);
}
}
use of org.platformlayer.images.model.DiskImageRecipe in project platformlayer by platformlayer.
the class DiskImageController method buildImage.
public void buildImage(DiskImage image) throws OpsException, IOException {
// Assume the worst...
opsContext.setFailure(true);
MachineProvider targetCloud = cloudHelpers.getCloud(image.cloud);
DiskImageRecipe recipe = platformLayer.getItem(image.recipeId, DiskImageRecipe.class);
OperatingSystem operatingSystem = getRequestedOperatingSystem(recipe);
String kernelPackage = packageHelpers.getDefaultKernelPackage(operatingSystem);
String filesystem = "ext3";
ImageFormat imageFormat = EnumUtils.valueOfCaseInsensitive(ImageFormat.class, image.format);
boolean buildTar = imageFormat == ImageFormat.Tar;
// TODO: This logic is not intrinsically correct
// boolean supportCloudConfigDisk = imageFormat != ImageFormat.DiskQcow2;
boolean supportCloudConfigDisk = true;
boolean useConfigDriveSymlinks = false;
String configDriveLabel = "config";
List<String> packages = Lists.newArrayList();
packages.add("openssh-server");
// Needed for preseeding
packages.add("debconf-utils");
if (operatingSystem.getDistribution() == Distribution.Debian) {
packages.add("locales");
}
// We always want some basics available in our images
packages.add("curl");
String hostname = "openstack";
MachineCreationRequest request = new MachineCreationRequest();
SshKey sshKey = service.getSshKey();
// There are problems using LXC with debootstrap
request.hostPolicy = new HostPolicy();
request.hostPolicy.allowRunInContainer = false;
// Null means 'use bootstrap image'
request.recipeId = null;
request.sshPublicKey = sshKey.getKeyPair().getPublic();
request.sshPublicKeyName = service.getSshKeyName();
request.securityGroups = Lists.newArrayList();
String securityGroup = service.getSecurityGroupName();
request.securityGroups.add(securityGroup);
// We don't need a lot of memory to build a disk image (I think!)
request.minimumMemoryMB = 256;
Machine machine = cloud.createInstance(request, image.getKey());
opsContext.takeOwnership(machine);
machine = waitForAddress(machine);
OpsTarget target = machine.getTarget(sshKey);
waitForTarget(target);
File tempDir = target.createTempDir();
apt.update(target, true);
// We need to install curl first so we can detect the performance of our proxies
// Needed for proxy testing at least
apt.install(target, "curl");
CommandEnvironment httpProxyEnv = httpProxies.getHttpProxyEnvironment(target, Usage.SoftwarePackages, null);
// For now, we assume that this image doesn't have debootstrap pre-installed
apt.install(target, "debootstrap");
// For transferring the file to a direct image server
// debootstrap with LXC seems to have serious problems...
boolean supportLxc = false;
if (supportLxc) {
apt.install(target, "fakechroot", "fakeroot");
}
Command command;
File rootfsDir;
File imageFile;
File loopbackPartition = null;
if (!buildTar) {
apt.install(target, "mbr");
apt.install(target, "parted");
apt.install(target, "kpartx");
apt.install(target, "extlinux");
// Same with qemu-kvm
// (needed for qemu-img convert ... a lot of extra stuff for just the
// utils!)
// packageHelpers.getPackageFor("qemu-img", operatingSystem);
String qemuImgPackage = "qemu-utils";
apt.install(target, qemuImgPackage);
// Use local ephemeral storage...
imageFile = new File(tempDir, "image.raw");
command = Command.build("dd if=/dev/null bs=1M seek=8180 of={0}", imageFile);
target.executeCommand(command);
// Create partitions
target.executeCommand(Command.build("parted -s {0} mklabel msdos", imageFile));
target.executeCommand(Command.build("parted -s {0} mkpart primary 0% 100%", imageFile));
target.executeCommand(Command.build("parted -s {0} set 1 boot on", imageFile));
// Install Master Boot Record
target.executeCommand(Command.build("install-mbr {0}", imageFile));
// Mount the partitions
// Hopefully it’s loop0p1...
target.executeCommand(Command.build("modprobe dm-mod"));
// boolean isMounted = false;
//
// {
// ProcessExecution mountExecution = target.executeCommand(Command.build("mount", imageFile));
// String stdout = mountExecution.getStdOut();
// System.out.println(stdout);
//
// for (String line : Splitter.on('\n').split(stdout)) {
// line = line.trim();
// if (line.isEmpty()) {
// continue;
// }
//
// List<String> tokens = Lists.newArrayList(Splitter.on(' ').split(line));
// if (tokens.size() < 3) {
// throw new IllegalStateException("Cannot parse mount line: " + line);
// }
//
// String mountDir = tokens.get(2);
// if (mountDir.equals(mntDir.getAbsolutePath())) {
// isMounted = true;
// loopbackPartition = new File(tokens.get(0));
// break;
// }
// }
//
// // /dev/sda1 on / type ext4 (rw,errors=remount-ro)
// // tmpfs on /lib/init/rw type tmpfs (rw,nosuid,mode=0755)
// // proc on /proc type proc (rw,noexec,nosuid,nodev)
// // sysfs on /sys type sysfs (rw,noexec,nosuid,nodev)
// // udev on /dev type tmpfs (rw,mode=0755)
// // tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev)
// // devpts on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=620)
// // /dev/mapper/loop0p1 on /tmp/8389210e66cd0df6/mnt type ext3 (rw)
// // proc on /tmp/8389210e66cd0df6/mnt/proc type proc (rw)
// }
//
// if (!isMounted)
{
ProcessExecution kpartxExecution = target.executeCommand(Command.build("kpartx -av {0}", imageFile));
String stdout = kpartxExecution.getStdOut();
List<String> tokens = Lists.newArrayList(Splitter.on(' ').split(stdout));
if (tokens.size() != 9) {
throw new IllegalStateException("Cannot parse kpartx stdout: " + stdout);
}
// add map loop6p1 (253:6): 0 16750592 linear /dev/loop6 2048
String partitionDevice = tokens.get(2);
if (!partitionDevice.startsWith("loop")) {
throw new IllegalStateException("kpartx output does not look like a partition: " + stdout);
}
loopbackPartition = new File("/dev/mapper/" + partitionDevice);
}
// Format filesystem
command = Command.build("yes | mkfs." + filesystem + " {0}", loopbackPartition);
command.setTimeout(TimeSpan.FIVE_MINUTES);
target.executeCommand(command);
// Get this onto disk now, so we don't delay later commands
target.executeCommand(Command.build("sync").setTimeout(TimeSpan.FIVE_MINUTES));
// Don’t force a check based on dates
target.executeCommand(Command.build("tune2fs -i 0 {0}", loopbackPartition).setTimeout(TimeSpan.FIVE_MINUTES));
// Get this onto disk now, so we don't delay later commands
target.executeCommand(Command.build("sync").setTimeout(TimeSpan.FIVE_MINUTES));
// Mount on mnt/
File mntDir = new File(tempDir, "mnt");
target.executeCommand("mkdir {0}", mntDir);
target.executeCommand(Command.build("mount {0} {1}", loopbackPartition, mntDir).setTimeout(TimeSpan.FIVE_MINUTES));
rootfsDir = mntDir;
} else {
rootfsDir = new File(tempDir, "rootfs");
imageFile = new File(tempDir, "image.tar.bz2");
}
if (buildTar) {
apt.install(target, "bzip2");
}
if (supportLxc) {
command = Command.build("fakechroot fakeroot debootstrap");
} else {
command = Command.build("debootstrap");
}
command.addLiteral("--verbose");
command.addLiteral("--resolve-deps");
if (supportLxc) {
// Lxc has problems with mounting etc; fakechroot avoids this
command.addLiteral("--variant=fakechroot");
// command.addLiteral("--variant=minbase");
}
command.addQuoted("--include=", Joiner.on(",").join(packages));
command.addLiteral(operatingSystem.getVersion());
command.addFile(rootfsDir);
// command.addQuoted(aptSource);
command.setEnvironment(httpProxyEnv);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
try {
target.executeCommand(command);
} catch (ProcessExecutionException e) {
String debootstrapLog = target.readTextFile(new File(rootfsDir, "debootstrap/debootstrap.log"));
log.warn("Debootstrap log: " + debootstrapLog);
throw new OpsException("Error running debootstrap", e);
}
// TODO: Switch to ChrootOpsTarget, so we can move this stuff into utility functions
ChrootOpsTarget chrootTarget = new ChrootOpsTarget(rootfsDir, new File("/tmp"), target);
FileUpload.upload(target, new File(rootfsDir, "etc/hostname"), hostname);
{
// Stop services being started in the chroot
String policy = ResourceUtils.get(getClass(), "usr.sbin.policy-rc.d");
File policyFile = new File(rootfsDir, "usr/sbin/policy-rc.d");
FileUpload.upload(target, policyFile, policy);
target.chmod(policyFile, "755");
}
target.executeCommand("mount -t proc proc {0}", new File(rootfsDir, "proc"));
apt.update(chrootTarget, true);
target.executeCommand("chroot {0} locale-gen en_US.utf8", rootfsDir);
target.executeCommand("chroot {0} /bin/bash -c \"DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales\"", rootfsDir);
if (!buildTar) {
{
File kernelImgConf = new File(rootfsDir, "etc/kernel-img.conf");
String preseedData = ResourceUtils.get(getClass(), "kernel-img.conf");
FileUpload.upload(target, kernelImgConf, preseedData);
}
{
File preseedTmpDir = target.createTempDir();
File preseedFile = new File(preseedTmpDir, "kernel.preseed");
String preseedData = ResourceUtils.get(getClass(), "kernel.preseed");
FileUpload.upload(target, preseedFile, preseedData);
target.executeCommand(Command.build("cat {0} | chroot {1} debconf-set-selections", preseedFile, rootfsDir));
apt.install(chrootTarget, kernelPackage);
}
}
preconfigurePackages(chrootTarget, recipe.configurePackage);
if (recipe.repositoryKey != null) {
addRepositoryKeys(chrootTarget, recipe.repositoryKey);
}
if (recipe.repository != null) {
addRepositories(chrootTarget, recipe.repository);
apt.update(chrootTarget, true);
}
if (recipe.addPackage != null) {
apt.install(chrootTarget, recipe.addPackage);
if (recipe.addPackage.contains("jenkins")) {
// It looks like jenkins doesn't honor policy-rc.d (?)
// TODO: Fix this monstrosity...
log.warn("Hard-coding service stop after jenkins installation");
target.executeCommand(Command.build("chroot {0} /etc/init.d/jenkins stop", rootfsDir));
}
}
apt.upgrade(chrootTarget);
apt.clean(chrootTarget);
if (!buildTar) {
String uuid;
{
ProcessExecution uuidExecution = target.executeCommand("blkid -o value -s UUID {0}", loopbackPartition);
uuid = uuidExecution.getStdOut().trim();
}
// Set up /etc/fstab
String fstab = "# /etc/fstab: static file system information.\n";
// TODO: Swap
fstab += "proc\t/proc\tproc\tnodev,noexec,nosuid\t0\t0\n";
// fstab += "/dev/sda1\t/\t" + filesystem +
// "\terrors=remount-ro\t0\t1\n";
fstab += String.format("UUID=%s\t/\t%s\terrors=remount-ro\t0\t1\n", uuid, filesystem);
if (supportCloudConfigDisk) {
if (useConfigDriveSymlinks) {
// Use configuration from cloud_config mount
target.mkdir(new File(rootfsDir, "media/config"));
fstab += "/dev/disk/by-label/" + configDriveLabel + "\t/media/config\tudf,iso9660\tro\t0\t0\n";
}
}
FileUpload.upload(target, new File(rootfsDir, "etc/fstab"), fstab);
log.info("fstab = " + fstab);
// Set up extlinux
{
ProcessExecution kernelExecution = target.executeCommand("chroot {0} find boot/ -name \"vmlinuz-*\"", rootfsDir);
List<String> kernels = Lists.newArrayList();
for (String kernel : kernelExecution.getStdOut().split("\n")) {
kernel = kernel.trim();
if (kernel.isEmpty()) {
continue;
}
kernels.add(kernel);
}
if (kernels.size() > 1) {
throw new IllegalStateException("Multiple kernels found");
} else if (kernels.size() != 1) {
throw new IllegalStateException("No kernels found");
}
ProcessExecution initrdExecution = target.executeCommand("chroot {0} find boot/ -name \"initrd*\"", rootfsDir);
List<String> initrds = Lists.newArrayList();
for (String initrd : initrdExecution.getStdOut().split("\n")) {
initrd = initrd.trim();
if (initrd.isEmpty()) {
continue;
}
if (initrd.endsWith(".bak")) {
continue;
}
initrds.add(initrd);
}
if (initrds.size() > 1) {
throw new IllegalStateException("Multiple initrds found");
} else if (initrds.size() != 1) {
throw new IllegalStateException("No initrds found");
}
String conf = String.format("default linux\ntimeout 1\n\nlabel linux\nkernel %s\nappend initrd=%s root=UUID=%s ro quiet", kernels.get(0), initrds.get(0), uuid);
FileUpload.upload(target, new File(rootfsDir, "extlinux.conf"), conf);
log.info("extlinux.conf = " + conf);
}
target.executeCommand(Command.build("extlinux --install {0}", rootfsDir).setTimeout(TimeSpan.FIVE_MINUTES));
}
if (supportCloudConfigDisk) {
if (useConfigDriveSymlinks) {
target.rm(new File(rootfsDir, "etc/network/interfaces"));
target.executeCommand("ln -s /media/config/etc/network/interfaces {0}", new File(rootfsDir, "etc/network/interfaces"));
target.mkdir(new File(rootfsDir, "root/.ssh"));
target.executeCommand("ln -s /media/config/root/.ssh/authorized_keys {0}", new File(rootfsDir, "root/.ssh/authorized_keys"));
} else {
String initScript = ResourceUtils.get(getClass(), "openstack-config");
File initScriptFile = new File(rootfsDir, "etc/init.d/openstack-config");
FileUpload.upload(target, initScriptFile, initScript);
target.executeCommand("chmod +x {0}", initScriptFile);
chrootTarget.executeCommand("/usr/sbin/update-rc.d openstack-config defaults");
}
}
{
// Remove policy file
File policyFile = new File(rootfsDir, "usr/sbin/policy-rc.d");
target.rm(policyFile);
}
target.executeCommand("sync");
target.executeCommand("umount {0}", new File(rootfsDir, "proc"));
if (!buildTar) {
target.executeCommand("sync");
target.executeCommand("umount {0}", rootfsDir);
target.executeCommand("sync");
target.executeCommand("kpartx -d {0}", imageFile);
target.executeCommand("sync");
}
if (buildTar) {
Command compress = Command.build("cd {0}; tar jcf {1} .", rootfsDir, imageFile);
target.executeCommand(compress.setTimeout(TimeSpan.FIFTEEN_MINUTES));
}
FilesystemInfo imageInfo = target.getFilesystemInfoFile(imageFile);
File uploadImageFile;
if (!buildTar) {
boolean isQcow2 = imageFormat == ImageFormat.DiskQcow2;
if (isQcow2) {
// We create the image as a raw image (making use of sparse files)
// and then convert it to qcow2. This is a little less efficient, but
// has a few advantages...
// 1) We can support different formats
// 2) The final image is defragmented
// 3) Mounting a qcow2 image (or other image formats) is tricky vs
// loopback mount
uploadImageFile = new File(imageFile.getParentFile(), "image.qcow2");
command = Command.build("qemu-img convert -f raw -O qcow2 {0} {1}", imageFile, uploadImageFile);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
target.executeCommand(command);
} else {
uploadImageFile = new File(imageFile.getParentFile(), "image.raw.gz");
command = Command.build("gzip -c --best {0} > {1}", imageFile, uploadImageFile);
command.setTimeout(TimeSpan.THIRTY_MINUTES);
target.executeCommand(command);
}
} else {
uploadImageFile = imageFile;
}
String imageId;
// Upload & tag the image with the recipe ID
{
Tags tags = new Tags();
tags.add(Tag.buildParentTag(recipe.getKey()));
tags.add(imageFormat.toTag());
imageId = cloud.getImageStore(targetCloud).uploadImage(target, tags, uploadImageFile, imageInfo.size);
}
// Tag the recipe with the image ID
{
TagChanges tagChanges = new TagChanges();
tagChanges.addTags.add(Tag.IMAGE_ID.build(imageId));
platformLayer.changeTags(image.getKey(), tagChanges);
}
// Our pessimism proved unfounded...
opsContext.setFailure(false);
}
use of org.platformlayer.images.model.DiskImageRecipe in project platformlayer by platformlayer.
the class DiskImageController method build.
@Handler
public void build(DiskImage image) throws OpsException, IOException {
String imageId = Tag.IMAGE_ID.findUnique(image.getTags());
if (imageId == null) {
// Check for existing image
MachineProvider targetCloud = cloudHelpers.getCloud(image.cloud);
DiskImageRecipe recipe = platformLayer.getItem(image.recipeId, DiskImageRecipe.class);
ImageStore imageStore = cloud.getImageStore(targetCloud);
List<CloudImage> existingImages = imageStore.findImages(Collections.<Tag>emptyList());
for (CloudImage existingImage : existingImages) {
// TODO: Fetch the parent, fetch the description, see if it's a match??
log.info("Image found, but not know whether we can re-use: " + existingImage);
}
}
if (imageId == null) {
buildImage(image);
}
}
Aggregations