use of com.google.api.client.http.HttpHeaders in project google-api-java-client by google.
the class OAuth2Utils method runningOnComputeEngine.
static boolean runningOnComputeEngine(HttpTransport transport, SystemEnvironmentProvider environment) {
// If the environment has requested that we do no GCE checks, return immediately.
if (Boolean.parseBoolean(environment.getEnv("NO_GCE_CHECK"))) {
return false;
}
GenericUrl tokenUrl = new GenericUrl(getMetadataServerUrl(environment));
for (int i = 1; i <= MAX_COMPUTE_PING_TRIES; ++i) {
try {
HttpRequest request = transport.createRequestFactory().buildGetRequest(tokenUrl);
request.setConnectTimeout(COMPUTE_PING_CONNECTION_TIMEOUT_MS);
HttpResponse response = request.execute();
try {
HttpHeaders headers = response.getHeaders();
return headersContainValue(headers, "Metadata-Flavor", "Google");
} finally {
response.disconnect();
}
} catch (SocketTimeoutException expected) {
// Ignore logging timeouts which is the expected failure mode in non GCE environments.
} catch (IOException e) {
LOGGER.log(Level.WARNING, "Failed to detect whether we are running on Google Compute Engine.", e);
}
}
return false;
}
use of com.google.api.client.http.HttpHeaders in project google-api-java-client by google.
the class BatchUnparsedResponse method parseAndCallback.
/**
* Parse an object into a new instance of the data class using
* {@link HttpResponse#parseAs(java.lang.reflect.Type)}.
*/
private <T, E> void parseAndCallback(RequestInfo<T, E> requestInfo, int statusCode, HttpResponse response) throws IOException {
BatchCallback<T, E> callback = requestInfo.callback;
HttpHeaders responseHeaders = response.getHeaders();
HttpUnsuccessfulResponseHandler unsuccessfulResponseHandler = requestInfo.request.getUnsuccessfulResponseHandler();
BackOffPolicy backOffPolicy = requestInfo.request.getBackOffPolicy();
// Reset backOff flag.
backOffRequired = false;
if (HttpStatusCodes.isSuccess(statusCode)) {
if (callback == null) {
// No point in parsing if there is no callback.
return;
}
T parsed = getParsedDataClass(requestInfo.dataClass, response, requestInfo);
callback.onSuccess(parsed, responseHeaders);
} else {
HttpContent content = requestInfo.request.getContent();
boolean retrySupported = retryAllowed && (content == null || content.retrySupported());
boolean errorHandled = false;
boolean redirectRequest = false;
if (unsuccessfulResponseHandler != null) {
errorHandled = unsuccessfulResponseHandler.handleResponse(requestInfo.request, response, retrySupported);
}
if (!errorHandled) {
if (requestInfo.request.handleRedirect(response.getStatusCode(), response.getHeaders())) {
redirectRequest = true;
} else if (retrySupported && backOffPolicy != null && backOffPolicy.isBackOffRequired(response.getStatusCode())) {
backOffRequired = true;
}
}
if (retrySupported && (errorHandled || backOffRequired || redirectRequest)) {
unsuccessfulRequestInfos.add(requestInfo);
} else {
if (callback == null) {
// No point in parsing if there is no callback.
return;
}
E parsed = getParsedDataClass(requestInfo.errorClass, response, requestInfo);
callback.onFailure(parsed, responseHeaders);
}
}
}
use of com.google.api.client.http.HttpHeaders in project beam by apache.
the class FhirIOTestUtil method tearDownTempBucket.
public static void tearDownTempBucket() throws IOException {
GoogleCredentials credentials = GoogleCredentials.getApplicationDefault();
HttpRequestInitializer requestInitializer = request -> {
HttpHeaders requestHeaders = request.getHeaders();
if (!credentials.hasRequestMetadata()) {
return;
}
URI uri = null;
if (request.getUrl() != null) {
uri = request.getUrl().toURI();
}
Map<String, List<String>> credentialHeaders = credentials.getRequestMetadata(uri);
if (credentialHeaders == null) {
return;
}
for (Map.Entry<String, List<String>> entry : credentialHeaders.entrySet()) {
String headerName = entry.getKey();
List<String> requestValues = new ArrayList<>(entry.getValue());
requestHeaders.put(headerName, requestValues);
}
// 1 minute connect timeout
request.setConnectTimeout(60000);
// 1 minute read timeout
request.setReadTimeout(60000);
};
Storage storage = new Storage.Builder(new NetHttpTransport(), new JacksonFactory(), requestInitializer).build();
List<StorageObject> blobs = storage.objects().list(DEFAULT_TEMP_BUCKET).execute().getItems();
if (blobs != null) {
for (StorageObject blob : blobs) {
storage.objects().delete(DEFAULT_TEMP_BUCKET, blob.getId());
}
}
}
use of com.google.api.client.http.HttpHeaders in project elasticsearch by elastic.
the class GceMetadataService method metadata.
public String metadata(String metadataPath) throws IOException, URISyntaxException {
// Forcing Google Token API URL as set in GCE SDK to
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
// See https://developers.google.com/compute/docs/metadata#metadataserver
final URI urlMetadataNetwork = new URI(GCE_HOST.get(settings)).resolve("/computeMetadata/v1/instance/").resolve(metadataPath);
logger.debug("get metadata from [{}]", urlMetadataNetwork);
HttpHeaders headers;
try {
// hack around code messiness in GCE code
// TODO: get this fixed
headers = Access.doPrivileged(HttpHeaders::new);
GenericUrl genericUrl = Access.doPrivileged(() -> new GenericUrl(urlMetadataNetwork));
// This is needed to query meta data: https://cloud.google.com/compute/docs/metadata
headers.put("Metadata-Flavor", "Google");
HttpResponse response = Access.doPrivilegedIOException(() -> getGceHttpTransport().createRequestFactory().buildGetRequest(genericUrl).setHeaders(headers).execute());
String metadata = response.parseAsString();
logger.debug("metadata found [{}]", metadata);
return metadata;
} catch (Exception e) {
throw new IOException("failed to fetch metadata from [" + urlMetadataNetwork + "]", e);
}
}
use of com.google.api.client.http.HttpHeaders in project elasticsearch by elastic.
the class GoogleCloudStorageBlobStore method deleteBlobs.
/**
* Deletes multiple blobs in the given bucket (uses a batch request to perform this)
*
* @param blobNames names of the bucket to delete
*/
void deleteBlobs(Collection<String> blobNames) throws IOException {
if (blobNames == null || blobNames.isEmpty()) {
return;
}
if (blobNames.size() == 1) {
deleteBlob(blobNames.iterator().next());
return;
}
final List<Storage.Objects.Delete> deletions = new ArrayList<>();
final Iterator<String> blobs = blobNames.iterator();
SocketAccess.doPrivilegedVoidIOException(() -> {
while (blobs.hasNext()) {
// Create a delete request for each blob to delete
deletions.add(client.objects().delete(bucket, blobs.next()));
if (blobs.hasNext() == false || deletions.size() == MAX_BATCHING_REQUESTS) {
try {
// Deletions are executed using a batch request
BatchRequest batch = client.batch();
// Used to track successful deletions
CountDown countDown = new CountDown(deletions.size());
for (Storage.Objects.Delete delete : deletions) {
// Queue the delete request in batch
delete.queue(batch, new JsonBatchCallback<Void>() {
@Override
public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException {
logger.error("failed to delete blob [{}] in bucket [{}]: {}", delete.getObject(), delete.getBucket(), e.getMessage());
}
@Override
public void onSuccess(Void aVoid, HttpHeaders responseHeaders) throws IOException {
countDown.countDown();
}
});
}
batch.execute();
if (countDown.isCountedDown() == false) {
throw new IOException("Failed to delete all [" + deletions.size() + "] blobs");
}
} finally {
deletions.clear();
}
}
}
});
}
Aggregations