use of com.microsoft.azure.management.resources.ResourceGroup in project azure-sdk-for-java by Azure.
the class VirtualMachineManagedDiskOperationsTests method canUpdateVirtualMachineByAddingAndRemovingManagedDisks.
@Test
public void canUpdateVirtualMachineByAddingAndRemovingManagedDisks() {
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
final String uname = "juser";
final String password = "123tEst!@|ac";
// Create with implicit + explicit empty disks, check default and override
//
final String vmName1 = "myvm1";
final String explicitlyCreatedEmptyDiskName1 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
final String explicitlyCreatedEmptyDiskName2 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
final String explicitlyCreatedEmptyDiskName3 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
ResourceGroup resourceGroup = resourceManager.resourceGroups().define(RG_NAME).withRegion(region).create();
Creatable<Disk> creatableEmptyDisk1 = computeManager.disks().define(explicitlyCreatedEmptyDiskName1).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
Creatable<Disk> creatableEmptyDisk2 = computeManager.disks().define(explicitlyCreatedEmptyDiskName2).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
Creatable<Disk> creatableEmptyDisk3 = computeManager.disks().define(explicitlyCreatedEmptyDiskName3).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
VirtualMachine virtualMachine1 = computeManager.virtualMachines().define(vmName1).withRegion(region).withExistingResourceGroup(resourceGroup).withNewPrimaryNetwork("10.0.0.0/28").withPrimaryPrivateIPAddressDynamic().withNewPrimaryPublicIPAddress(publicIpDnsLabel).withPopularLinuxImage(linuxImage).withRootUsername(uname).withRootPassword(password).withNewDataDisk(// CreateOption: EMPTY
100).withNewDataDisk(100, 1, // CreateOption: EMPTY
CachingTypes.READ_WRITE).withNewDataDisk(// CreateOption: ATTACH
creatableEmptyDisk1).withNewDataDisk(creatableEmptyDisk2, 2, // CreateOption: ATTACH
CachingTypes.NONE).withNewDataDisk(creatableEmptyDisk3, 3, // CreateOption: ATTACH
CachingTypes.NONE).withDataDiskDefaultCachingType(CachingTypes.READ_ONLY).withDataDiskDefaultStorageAccountType(StorageAccountTypes.STANDARD_LRS).withSize(VirtualMachineSizeTypes.STANDARD_D5_V2).withOSDiskCaching(CachingTypes.READ_WRITE).create();
virtualMachine1.update().withoutDataDisk(1).withNewDataDisk(100, 6, // CreateOption: EMPTY
CachingTypes.READ_WRITE).apply();
Map<Integer, VirtualMachineDataDisk> dataDisks = virtualMachine1.dataDisks();
Assert.assertNotNull(dataDisks);
// Removed one added another
Assert.assertEquals(dataDisks.size(), 5);
Assert.assertTrue(dataDisks.containsKey(6));
Assert.assertFalse(dataDisks.containsKey(1));
}
use of com.microsoft.azure.management.resources.ResourceGroup in project azure-sdk-for-java by Azure.
the class VirtualMachineManagedDiskOperationsTests method canCreateUpdateVirtualMachineWithEmptyManagedDataDisks.
@Test
public void canCreateUpdateVirtualMachineWithEmptyManagedDataDisks() {
final String publicIpDnsLabel = generateRandomResourceName("pip", 20);
final String uname = "juser";
final String password = "123tEst!@|ac";
// Create with implicit + explicit empty disks, check default and override
//
final String vmName1 = "myvm1";
final String explicitlyCreatedEmptyDiskName1 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
final String explicitlyCreatedEmptyDiskName2 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
final String explicitlyCreatedEmptyDiskName3 = generateRandomResourceName(vmName1 + "_mdisk_", 25);
ResourceGroup resourceGroup = resourceManager.resourceGroups().define(RG_NAME).withRegion(region).create();
Creatable<Disk> creatableEmptyDisk1 = computeManager.disks().define(explicitlyCreatedEmptyDiskName1).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
Creatable<Disk> creatableEmptyDisk2 = computeManager.disks().define(explicitlyCreatedEmptyDiskName2).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
Creatable<Disk> creatableEmptyDisk3 = computeManager.disks().define(explicitlyCreatedEmptyDiskName3).withRegion(region).withExistingResourceGroup(resourceGroup).withData().withSizeInGB(150);
VirtualMachine virtualMachine = computeManager.virtualMachines().define(vmName1).withRegion(region).withExistingResourceGroup(resourceGroup).withNewPrimaryNetwork("10.0.0.0/28").withPrimaryPrivateIPAddressDynamic().withNewPrimaryPublicIPAddress(publicIpDnsLabel).withPopularLinuxImage(linuxImage).withRootUsername(uname).withRootPassword(password).withNewDataDisk(// CreateOption: EMPTY
100).withNewDataDisk(100, 1, // CreateOption: EMPTY
CachingTypes.READ_ONLY).withNewDataDisk(// CreateOption: ATTACH
creatableEmptyDisk1).withNewDataDisk(creatableEmptyDisk2, 2, // CreateOption: ATTACH
CachingTypes.NONE).withNewDataDisk(creatableEmptyDisk3, 3, // CreateOption: ATTACH
CachingTypes.NONE).withSize(VirtualMachineSizeTypes.STANDARD_D5_V2).withOSDiskCaching(CachingTypes.READ_WRITE).create();
Assert.assertTrue(virtualMachine.isManagedDiskEnabled());
// There should not be any un-managed data disks
//
Assert.assertNotNull(virtualMachine.unmanagedDataDisks());
Assert.assertEquals(virtualMachine.unmanagedDataDisks().size(), 0);
// Validate the managed data disks
//
Map<Integer, VirtualMachineDataDisk> dataDisks = virtualMachine.dataDisks();
Assert.assertNotNull(dataDisks);
Assert.assertTrue(dataDisks.size() == 5);
Assert.assertTrue(dataDisks.containsKey(1));
VirtualMachineDataDisk dataDiskLun1 = dataDisks.get(1);
Assert.assertNotNull(dataDiskLun1.id());
Assert.assertEquals(dataDiskLun1.cachingType(), CachingTypes.READ_ONLY);
Assert.assertEquals(dataDiskLun1.size(), 100);
Assert.assertTrue(dataDisks.containsKey(2));
VirtualMachineDataDisk dataDiskLun2 = dataDisks.get(2);
Assert.assertNotNull(dataDiskLun2.id());
Assert.assertEquals(dataDiskLun2.cachingType(), CachingTypes.NONE);
Assert.assertEquals(dataDiskLun2.size(), 150);
Assert.assertTrue(dataDisks.containsKey(3));
VirtualMachineDataDisk dataDiskLun3 = dataDisks.get(3);
Assert.assertNotNull(dataDiskLun3.id());
Assert.assertEquals(dataDiskLun3.cachingType(), CachingTypes.NONE);
Assert.assertEquals(dataDiskLun3.size(), 150);
//
for (VirtualMachineDataDisk dataDisk : dataDisks.values()) {
if (dataDisk.lun() != 1 && dataDisk.lun() != 2 && dataDisk.lun() != 3) {
Assert.assertEquals(dataDisk.cachingType(), CachingTypes.READ_WRITE);
Assert.assertEquals(dataDisk.storageAccountType(), StorageAccountTypes.STANDARD_LRS);
}
}
// Updating and adding disk as part of VM Update seems consistency failing, CRP is aware of
// this, hence until it is fixed comment-out the test
//
// {
// "startTime": "2017-01-26T05:48:59.9290573+00:00",
// "endTime": "2017-01-26T05:49:02.2884052+00:00",
// "status": "Failed",
// "error": {
// "code": "InternalExecutionError",
// "message": "An internal execution error occurred."
// },
// "name": "bc8072a7-38bb-445b-ae59-f16cf125342c"
// }
//
// virtualMachine.deallocate();
//
// virtualMachine.update()
// .withDataDiskUpdated(1, 200)
// .withDataDiskUpdated(2, 200, CachingTypes.READ_WRITE)
// .withNewDataDisk(60)
// .apply();
//
// Assert.assertTrue(virtualMachine.isManagedDiskEnabled());
// // There should not be any un-managed data disks
// //
// Assert.assertNotNull(virtualMachine.unmanagedDataDisks());
// Assert.assertEquals(virtualMachine.unmanagedDataDisks().size(), 0);
//
// // Validate the managed data disks
// //
// dataDisks = virtualMachine.dataDisks();
// Assert.assertNotNull(dataDisks);
// Assert.assertTrue(dataDisks.size() == 6);
// Assert.assertTrue(dataDisks.containsKey(1));
// dataDiskLun1 = dataDisks.get(1);
// Assert.assertNotNull(dataDiskLun1.id());
// Assert.assertEquals(dataDiskLun1.cachingType(), CachingTypes.READ_ONLY);
// Assert.assertEquals(dataDiskLun1.size(), 200); // 100 -> 200
//
// Assert.assertTrue(dataDisks.containsKey(2));
// dataDiskLun2 = dataDisks.get(2);
// Assert.assertNotNull(dataDiskLun2.id());
// Assert.assertEquals(dataDiskLun2.cachingType(), CachingTypes.READ_WRITE); // NONE -> READ_WRITE
// Assert.assertEquals(dataDiskLun2.size(), 200); // 150 -> 200
//
// Assert.assertTrue(dataDisks.containsKey(3));
// dataDiskLun3 = dataDisks.get(3);
// Assert.assertNotNull(dataDiskLun3.id());
// Assert.assertEquals(dataDiskLun3.cachingType(), CachingTypes.NONE);
// Assert.assertEquals(dataDiskLun3.size(), 150);
//
// // Ensure defaults of other disks are not affected
// for (VirtualMachineDataDisk dataDisk : dataDisks.values()) {
// if (dataDisk.lun() != 1 && dataDisk.lun() != 3) {
// Assert.assertEquals(dataDisk.cachingType(), CachingTypes.READ_WRITE);
// Assert.assertEquals(dataDisk.storageAccountType(), StorageAccountTypes.STANDARD_LRS);
// }
// }
}
use of com.microsoft.azure.management.resources.ResourceGroup in project azure-sdk-for-java by Azure.
the class ManagedDiskOperationsTests method canOperateOnManagedDiskFromDisk.
@Test
public void canOperateOnManagedDiskFromDisk() {
final String diskName1 = generateRandomResourceName("md-1", 20);
final String diskName2 = generateRandomResourceName("md-2", 20);
ResourceGroup resourceGroup = resourceManager.resourceGroups().define(RG_NAME).withRegion(region).create();
// Create an empty managed disk
//
Disk emptyDisk = computeManager.disks().define(diskName1).withRegion(region).withExistingResourceGroup(resourceGroup.name()).withData().withSizeInGB(100).create();
// Create a managed disk from existing managed disk
//
Disk disk = computeManager.disks().define(diskName2).withRegion(region).withExistingResourceGroup(resourceGroup.name()).withData().fromDisk(emptyDisk).withSizeInGB(200).withSku(DiskSkuTypes.STANDARD_LRS).create();
disk = computeManager.disks().getById(disk.id());
Assert.assertNotNull(disk.id());
Assert.assertTrue(disk.name().equalsIgnoreCase(diskName2));
Assert.assertEquals(disk.sku(), DiskSkuTypes.STANDARD_LRS);
Assert.assertEquals(disk.creationMethod(), DiskCreateOption.COPY);
Assert.assertFalse(disk.isAttachedToVirtualMachine());
Assert.assertEquals(disk.sizeInGB(), 200);
Assert.assertNull(disk.osType());
Assert.assertNotNull(disk.source());
Assert.assertEquals(disk.source().type(), CreationSourceType.COPIED_FROM_DISK);
Assert.assertTrue(disk.source().sourceId().equalsIgnoreCase(emptyDisk.id()));
computeManager.disks().deleteById(emptyDisk.id());
computeManager.disks().deleteById(disk.id());
}
use of com.microsoft.azure.management.resources.ResourceGroup in project azure-sdk-for-java by Azure.
the class ManageSimpleTrafficManager method runSample.
/**
* Main function which runs the actual sample.
* @param azure instance of the azure client
* @return true if sample runs successfully
*/
public static boolean runSample(Azure azure) {
final String rgName = SdkContext.randomResourceName("rgCOPD", 24);
final String userName = "tirekicker";
final String sshKey = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfSPC2K7LZcFKEO+/t3dzmQYtrJFZNxOsbVgOVKietqHyvmYGHEC0J2wPdAqQ/63g/hhAEFRoyehM+rbeDri4txB3YFfnOK58jqdkyXzupWqXzOrlKY4Wz9SKjjN765+dqUITjKRIaAip1Ri137szRg71WnrmdP3SphTRlCx1Bk2nXqWPsclbRDCiZeF8QOTi4JqbmJyK5+0UqhqYRduun8ylAwKKQJ1NJt85sYIHn9f1Rfr6Tq2zS0wZ7DHbZL+zB5rSlAr8QyUdg/GQD+cmSs6LvPJKL78d6hMGk84ARtFo4A79ovwX/Fj01znDQkU6nJildfkaolH2rWFG/qttD azjava@javalib.com";
final int vmCountPerRegion = 2;
Set<Region> regions = new HashSet<>(Arrays.asList(Region.US_EAST, Region.US_WEST));
try {
//=============================================================
// Create a shared resource group for all the resources so they can all be deleted together
//
ResourceGroup resourceGroup = azure.resourceGroups().define(rgName).withRegion(Region.US_EAST).create();
System.out.println("Created a new resource group - " + resourceGroup.id());
// Prepare a batch of creatable VM definitions to put behind the traffic manager
//
List<Creatable<VirtualMachine>> creatableVirtualMachines = new ArrayList<>();
for (Region region : regions) {
String linuxVMNamePrefix = SdkContext.randomResourceName("vm", 15);
for (int i = 0; i < vmCountPerRegion; i++) {
//=============================================================
// Create a virtual machine in its own virtual network
String vmName = String.format("%s-%d", linuxVMNamePrefix, i);
Creatable<VirtualMachine> vmDefinition = azure.virtualMachines().define(vmName).withRegion(region).withExistingResourceGroup(resourceGroup).withNewPrimaryNetwork("10.0.0.0/29").withPrimaryPrivateIPAddressDynamic().withNewPrimaryPublicIPAddress(vmName).withPopularLinuxImage(KnownLinuxVirtualMachineImage.UBUNTU_SERVER_16_04_LTS).withRootUsername(userName).withSsh(sshKey).withSize(VirtualMachineSizeTypes.STANDARD_A1);
creatableVirtualMachines.add(vmDefinition);
}
}
//=============================================================
// Create the VMs !!
StopWatch stopwatch = new StopWatch();
System.out.println("Creating the virtual machines...");
stopwatch.start();
Collection<VirtualMachine> virtualMachines = azure.virtualMachines().create(creatableVirtualMachines).values();
stopwatch.stop();
System.out.println(String.format("Created virtual machines in %d seconds.", stopwatch.getTime() / 1000));
//=============================================================
// Create 1 traffic manager profile
//
String trafficManagerName = SdkContext.randomResourceName("tra", 15);
TrafficManagerProfile.DefinitionStages.WithEndpoint profileWithEndpoint = azure.trafficManagerProfiles().define(trafficManagerName).withExistingResourceGroup(resourceGroup).withLeafDomainLabel(trafficManagerName).withPerformanceBasedRouting();
TrafficManagerProfile.DefinitionStages.WithCreate profileWithCreate = null;
int routingPriority = 1;
for (VirtualMachine vm : virtualMachines) {
String endpointName = SdkContext.randomResourceName("ep", 15);
profileWithCreate = profileWithEndpoint.defineAzureTargetEndpoint(endpointName).toResourceId(vm.getPrimaryPublicIPAddressId()).withRoutingPriority(routingPriority++).attach();
}
stopwatch.reset();
stopwatch.start();
TrafficManagerProfile trafficManagerProfile = profileWithCreate.create();
stopwatch.stop();
System.out.println(String.format("Created a traffic manager profile %s\n in %d seconds.", trafficManagerProfile.id(), stopwatch.getTime() / 1000));
//=============================================================
// Modify the traffic manager to use priority based routing
//
trafficManagerProfile.update().withPriorityBasedRouting().apply();
System.out.println("Modified the traffic manager to use priority-based routing.");
return true;
} catch (Exception f) {
System.out.println(f.getMessage());
f.printStackTrace();
} finally {
try {
System.out.println("Deleting Resource Group: " + rgName);
azure.resourceGroups().deleteByName(rgName);
System.out.println("Deleted Resource Group: " + rgName);
} catch (NullPointerException npe) {
System.out.println("Did not create any resources in Azure. No clean up is necessary");
} catch (Exception g) {
g.printStackTrace();
}
}
return false;
}
use of com.microsoft.azure.management.resources.ResourceGroup in project azure-sdk-for-java by Azure.
the class RedisCacheOperationsTests method canCRUDRedisCache.
@Test
public void canCRUDRedisCache() throws Exception {
// Create
Creatable<ResourceGroup> resourceGroups = resourceManager.resourceGroups().define(RG_NAME_SECOND).withRegion(Region.US_CENTRAL);
Creatable<RedisCache> redisCacheDefinition1 = redisManager.redisCaches().define(RR_NAME).withRegion(Region.ASIA_EAST).withNewResourceGroup(RG_NAME).withBasicSku();
Creatable<RedisCache> redisCacheDefinition2 = redisManager.redisCaches().define(RR_NAME_SECOND).withRegion(Region.US_CENTRAL).withNewResourceGroup(resourceGroups).withPremiumSku().withShardCount(10).withPatchSchedule(DayOfWeek.SUNDAY, 10, Period.minutes(302));
Creatable<RedisCache> redisCacheDefinition3 = redisManager.redisCaches().define(RR_NAME_THIRD).withRegion(Region.US_CENTRAL).withNewResourceGroup(resourceGroups).withPremiumSku(2).withRedisConfiguration("maxclients", "2").withNonSslPort();
CreatedResources<RedisCache> batchRedisCaches = redisManager.redisCaches().create(redisCacheDefinition1, redisCacheDefinition2, redisCacheDefinition3);
StorageAccount storageAccount = storageManager.storageAccounts().define(SA_NAME).withRegion(Region.US_CENTRAL).withExistingResourceGroup(RG_NAME_SECOND).create();
RedisCache redisCache = batchRedisCaches.get(redisCacheDefinition1.key());
RedisCache redisCachePremium = batchRedisCaches.get(redisCacheDefinition3.key());
Assert.assertEquals(RG_NAME, redisCache.resourceGroupName());
Assert.assertEquals(SkuName.BASIC, redisCache.sku().name());
// List by Resource Group
List<RedisCache> redisCaches = redisManager.redisCaches().listByResourceGroup(RG_NAME);
boolean found = false;
for (RedisCache existingRedisCache : redisCaches) {
if (existingRedisCache.name().equals(RR_NAME)) {
found = true;
}
}
Assert.assertTrue(found);
Assert.assertEquals(1, redisCaches.size());
// List all Redis resources
redisCaches = redisManager.redisCaches().list();
found = false;
for (RedisCache existingRedisCache : redisCaches) {
if (existingRedisCache.name().equals(RR_NAME)) {
found = true;
}
}
Assert.assertTrue(found);
Assert.assertTrue(redisCaches.size() >= 3);
// Get
RedisCache redisCacheGet = redisManager.redisCaches().getByResourceGroup(RG_NAME, RR_NAME);
Assert.assertNotNull(redisCacheGet);
Assert.assertEquals(redisCache.id(), redisCacheGet.id());
Assert.assertEquals(redisCache.provisioningState(), redisCacheGet.provisioningState());
// Get Keys
RedisAccessKeys redisKeys = redisCache.keys();
Assert.assertNotNull(redisKeys);
Assert.assertNotNull(redisKeys.primaryKey());
Assert.assertNotNull(redisKeys.secondaryKey());
// Regen key
RedisAccessKeys oldKeys = redisCache.refreshKeys();
RedisAccessKeys updatedPrimaryKey = redisCache.regenerateKey(RedisKeyType.PRIMARY);
RedisAccessKeys updatedSecondaryKey = redisCache.regenerateKey(RedisKeyType.SECONDARY);
Assert.assertNotNull(oldKeys);
Assert.assertNotNull(updatedPrimaryKey);
Assert.assertNotNull(updatedSecondaryKey);
Assert.assertNotEquals(oldKeys.primaryKey(), updatedPrimaryKey.primaryKey());
Assert.assertEquals(oldKeys.secondaryKey(), updatedPrimaryKey.secondaryKey());
Assert.assertNotEquals(oldKeys.secondaryKey(), updatedSecondaryKey.secondaryKey());
Assert.assertNotEquals(updatedPrimaryKey.secondaryKey(), updatedSecondaryKey.secondaryKey());
Assert.assertEquals(updatedPrimaryKey.primaryKey(), updatedSecondaryKey.primaryKey());
// Update to STANDARD Sku from BASIC SKU
redisCache = redisCache.update().withStandardSku().apply();
Assert.assertEquals(SkuName.STANDARD, redisCache.sku().name());
Assert.assertEquals(SkuFamily.C, redisCache.sku().family());
try {
redisCache.update().withBasicSku(1).apply();
fail();
} catch (CloudException e) {
// expected since Sku downgrade is not supported
}
// Refresh
redisCache.refresh();
// delete
redisManager.redisCaches().deleteById(redisCache.id());
// Premium SKU Functionality
RedisCachePremium premiumCache = redisCachePremium.asPremium();
Assert.assertEquals(SkuFamily.P, premiumCache.sku().family());
// Redis configuration update
premiumCache.update().withRedisConfiguration("maxclients", "3").apply();
premiumCache.update().withoutRedisConfiguration("maxclients").apply();
premiumCache.update().withoutRedisConfiguration().apply();
premiumCache.update().withPatchSchedule(DayOfWeek.MONDAY, 1).withPatchSchedule(DayOfWeek.TUESDAY, 5).apply();
// Reboot
premiumCache.forceReboot(RebootType.ALL_NODES);
// Patch Schedule
List<ScheduleEntry> patchSchedule = premiumCache.listPatchSchedules();
Assert.assertEquals(2, patchSchedule.size());
premiumCache.deletePatchSchedule();
patchSchedule = redisManager.redisCaches().getById(premiumCache.id()).asPremium().listPatchSchedules();
Assert.assertNull(patchSchedule);
// currently throws because SAS url of the container should be provided as
// {"error":{
// "code":"InvalidRequestBody",
// "message": "One of the SAS URIs provided could not be used for the following reason:
// The SAS token is poorly formatted.\r\nRequestID=ed105089-b93b-427e-9cbb-d78ed80d23b0",
// "target":null}}
// com.microsoft.azure.CloudException: One of the SAS URIs provided could not be used for the following reason: The SAS token is poorly formatted.
/*premiumCache.exportData(storageAccount.name(),"snapshot1");
premiumCache.importData(Arrays.asList("snapshot1"));*/
}
Aggregations