use of com.google.api.services.compute.Compute in project cloudbreak by hortonworks.
the class TagsUtil method checkTagsGcp.
protected static void checkTagsGcp(ApplicationContext applicationContext, String applicationName, String projectId, String serviceAccountId, String p12File, String availabilityZone, Iterable<String> instanceIdList, Map<String, String> tagsToCheckMap) throws Exception {
String serviceAccountPrivateKey = ResourceUtil.readBase64EncodedContentFromResource(applicationContext, p12File);
HttpTransport httpTransport = GoogleNetHttpTransport.newTrustedTransport();
PrivateKey privateKey = SecurityUtils.loadPrivateKeyFromKeyStore(SecurityUtils.getPkcs12KeyStore(), new ByteArrayInputStream(Base64.decodeBase64(serviceAccountPrivateKey)), "notasecret", "privatekey", "notasecret");
JacksonFactory jsonFactory = JacksonFactory.getDefaultInstance();
GoogleCredential googleCredential = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).setServiceAccountId(serviceAccountId).setServiceAccountScopes(Collections.singletonList(ComputeScopes.COMPUTE)).setServiceAccountPrivateKey(privateKey).build();
Compute compute = new Builder(httpTransport, jsonFactory, null).setApplicationName(applicationName).setHttpRequestInitializer(googleCredential).build();
Instances instances = compute.instances();
for (String id : instanceIdList) {
Get response = instances.get(projectId, availabilityZone, id);
com.google.api.services.compute.model.Instance instance = response.execute();
Tags gcpTags = instance.getTags();
Map<String, String> extractedTags = new HashMap<>();
List<String> tagList = gcpTags.getItems();
for (String i : tagList) {
String[] tmpTagList = i.split("-");
if (tmpTagList.length > 1) {
extractedTags.put(tmpTagList[0], tmpTagList[1]);
}
}
checkTags(tagsToCheckMap, extractedTags);
extractedTags.clear();
}
}
use of com.google.api.services.compute.Compute in project cloudbreak by hortonworks.
the class GcpCreateVirtualNetworkTest method createNetwork.
@Test
@Parameters({ "networkName", "description", "publicInAccount", "resourceGroupName", "vpcName", "vpcSubnet", "subnetCIDR", "networkType" })
public void createNetwork(String networkName, @Optional("") String description, @Optional("false") boolean publicInAccount, @Optional("europe-west1") String subnetRegion, @Optional("it-vpc") String vpcName, @Optional("it-vpc-subnet") String vpcSubnet, @Optional("10.0.36.0/24") String subnetCIDR, NetworkType networkType) throws Exception {
String serviceAccountPrivateKey = ResourceUtil.readBase64EncodedContentFromResource(applicationContext, defaultP12File);
HttpTransport httpTransport = GoogleNetHttpTransport.newTrustedTransport();
PrivateKey privateKey = SecurityUtils.loadPrivateKeyFromKeyStore(SecurityUtils.getPkcs12KeyStore(), new ByteArrayInputStream(Base64.decodeBase64(serviceAccountPrivateKey)), "notasecret", "privatekey", "notasecret");
JacksonFactory jsonFactory = JacksonFactory.getDefaultInstance();
GoogleCredential googleCredential = new GoogleCredential.Builder().setTransport(httpTransport).setJsonFactory(jsonFactory).setServiceAccountId(defaultServiceAccountId).setServiceAccountScopes(Collections.singletonList(ComputeScopes.COMPUTE)).setServiceAccountPrivateKey(privateKey).build();
Compute compute = new Builder(httpTransport, jsonFactory, null).setApplicationName(defaultName).setHttpRequestInitializer(googleCredential).build();
Network gcpNetwork = new Network();
gcpNetwork.setName(vpcName);
if (!LAGACY_NETWORK.equals(networkType)) {
gcpNetwork.setAutoCreateSubnetworks(false);
}
Networks.Insert networkInsert = compute.networks().insert(defaultProjectId, gcpNetwork);
Operation networkInsertResponse = networkInsert.execute();
if (networkInsertResponse.getHttpErrorStatusCode() != null) {
throw new IllegalStateException("gcp network operation failed: " + networkInsertResponse.getHttpErrorMessage());
}
waitOperation(compute, networkInsertResponse);
if (EXISTING_SUBNET_IN_EXISTING_NETWORK.equals(networkType)) {
Subnetwork gcpSubnet = new Subnetwork();
gcpSubnet.setName(vpcSubnet);
gcpSubnet.setIpCidrRange(subnetCIDR);
gcpSubnet.setNetwork(String.format("https://www.googleapis.com/compute/v1/projects/%s/global/networks/%s", defaultProjectId, vpcName));
Insert subNetworkInsert = compute.subnetworks().insert(defaultProjectId, subnetRegion, gcpSubnet);
Operation subNetInsertResponse = subNetworkInsert.execute();
if (subNetInsertResponse.getHttpErrorStatusCode() != null) {
throw new IllegalStateException("gcp subnetwork operation failed: " + subNetInsertResponse.getHttpErrorMessage());
}
}
NetworkRequest networkRequest = new NetworkRequest();
networkRequest.setName(networkName);
networkRequest.setDescription(description);
if (NEW_SUBNET_IN_EXISTING_NETWORK.equals(networkType)) {
networkRequest.setSubnetCIDR(subnetCIDR);
}
Map<String, Object> map = new HashMap<>();
map.put("networkId", vpcName);
if (EXISTING_SUBNET_IN_EXISTING_NETWORK.equals(networkType)) {
map.put("subnetId", vpcSubnet);
}
networkRequest.setParameters(map);
networkRequest.setCloudPlatform("GCP");
String id = getCloudbreakClient().networkEndpoint().postPrivate(networkRequest).getId().toString();
getItContext().putContextParam(CloudbreakITContextConstants.NETWORK_ID, id, true);
}
use of com.google.api.services.compute.Compute in project druid by druid-io.
the class GceAutoScaler method terminateWithIds.
/**
* Terminates the instances in the list of IDs provided by the caller
*/
@Override
public AutoScalingData terminateWithIds(List<String> ids) {
log.info("Asked to terminate IDs: [%s]", String.join(",", ids));
if (ids.isEmpty()) {
return new AutoScalingData(new ArrayList<>());
}
try {
final String project = envConfig.getProjectId();
final String zone = envConfig.getZoneName();
final String managedInstanceGroupName = envConfig.getManagedInstanceGroupName();
List<String> before = getRunningInstances();
InstanceGroupManagersDeleteInstancesRequest requestBody = new InstanceGroupManagersDeleteInstancesRequest();
requestBody.setInstances(namesToInstances(ids));
Compute computeService = createComputeService();
Compute.InstanceGroupManagers.DeleteInstances request = computeService.instanceGroupManagers().deleteInstances(project, zone, managedInstanceGroupName, requestBody);
Operation response = request.execute();
Operation.Error err = waitForOperationEnd(computeService, response);
if (err == null || err.isEmpty()) {
List<String> after = null;
// certain amount of retries in checking)
for (int i = 0; i < RUNNING_INSTANCES_MAX_RETRIES; i++) {
after = getRunningInstances();
if (after.size() == (before.size() - ids.size())) {
break;
}
log.info("Machines not down yet, waiting");
Thread.sleep(POLL_INTERVAL_MS);
}
// keep only the ones no more present
before.removeAll(after);
return new AutoScalingData(before);
} else {
log.error("Unable to terminate instances: %s", err.toPrettyString());
}
} catch (Exception e) {
log.error(e, "Unable to terminate any instances.");
}
return new AutoScalingData(new ArrayList<>());
}
use of com.google.api.services.compute.Compute in project druid by druid-io.
the class GceAutoScaler method provision.
/**
* When called resizes envConfig.getManagedInstanceGroupName() increasing it by creating
* envConfig.getNumInstances() new workers (unless the maximum is reached). Return the
* IDs of the workers created
*/
@Override
public AutoScalingData provision() {
final String project = envConfig.getProjectId();
final String zone = envConfig.getZoneName();
final int numInstances = envConfig.getNumInstances();
final String managedInstanceGroupName = envConfig.getManagedInstanceGroupName();
try {
List<String> before = getRunningInstances();
log.debug("Existing instances [%s]", String.join(",", before));
int toSize = Math.min(before.size() + numInstances, getMaxNumWorkers());
if (before.size() >= toSize) {
// nothing to scale
return new AutoScalingData(new ArrayList<>());
}
log.info("Asked to provision instances, will resize to %d", toSize);
Compute computeService = createComputeService();
Compute.InstanceGroupManagers.Resize request = computeService.instanceGroupManagers().resize(project, zone, managedInstanceGroupName, toSize);
Operation response = request.execute();
Operation.Error err = waitForOperationEnd(computeService, response);
if (err == null || err.isEmpty()) {
List<String> after = null;
// certain amount of retries in checking)
for (int i = 0; i < RUNNING_INSTANCES_MAX_RETRIES; i++) {
after = getRunningInstances();
if (after.size() == toSize) {
break;
}
log.info("Machines not up yet, waiting");
Thread.sleep(POLL_INTERVAL_MS);
}
// these should be the new ones
after.removeAll(before);
log.info("Added instances [%s]", String.join(",", after));
return new AutoScalingData(after);
} else {
log.error("Unable to provision instances: %s", err.toPrettyString());
}
} catch (Exception e) {
log.error(e, "Unable to provision any gce instances.");
}
return new AutoScalingData(new ArrayList<>());
}
use of com.google.api.services.compute.Compute in project elasticsearch by elastic.
the class GceInstancesServiceImpl method client.
public synchronized Compute client() {
if (refreshInterval != null && refreshInterval.millis() != 0) {
if (client != null && (refreshInterval.millis() < 0 || (System.currentTimeMillis() - lastRefresh) < refreshInterval.millis())) {
if (logger.isTraceEnabled())
logger.trace("using cache to retrieve client");
return client;
}
lastRefresh = System.currentTimeMillis();
}
try {
gceJsonFactory = new JacksonFactory();
logger.info("starting GCE discovery service");
// Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
// See https://developers.google.com/compute/docs/metadata#metadataserver
String tokenServerEncodedUrl = GceMetadataService.GCE_HOST.get(settings) + "/computeMetadata/v1/instance/service-accounts/default/token";
ComputeCredential credential = new ComputeCredential.Builder(getGceHttpTransport(), gceJsonFactory).setTokenServerEncodedUrl(tokenServerEncodedUrl).build();
// hack around code messiness in GCE code
// TODO: get this fixed
Access.doPrivilegedIOException(credential::refreshToken);
logger.debug("token [{}] will expire in [{}] s", credential.getAccessToken(), credential.getExpiresInSeconds());
if (credential.getExpiresInSeconds() != null) {
refreshInterval = TimeValue.timeValueSeconds(credential.getExpiresInSeconds() - 1);
}
Compute.Builder builder = new Compute.Builder(getGceHttpTransport(), gceJsonFactory, null).setApplicationName(VERSION).setRootUrl(GCE_ROOT_URL.get(settings));
if (RETRY_SETTING.exists(settings)) {
TimeValue maxWait = MAX_WAIT_SETTING.get(settings);
RetryHttpInitializerWrapper retryHttpInitializerWrapper;
if (maxWait.getMillis() > 0) {
retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential, maxWait);
} else {
retryHttpInitializerWrapper = new RetryHttpInitializerWrapper(credential);
}
builder.setHttpRequestInitializer(retryHttpInitializerWrapper);
} else {
builder.setHttpRequestInitializer(credential);
}
this.client = builder.build();
} catch (Exception e) {
logger.warn("unable to start GCE discovery service", e);
throw new IllegalArgumentException("unable to start GCE discovery service", e);
}
return this.client;
}
Aggregations