use of org.apache.http.protocol.HttpContext in project oxCore by GluuFederation.
the class WebDavDocumentStoreProvider method saveDocument.
@Override
public boolean saveDocument(String path, String documentContent, Charset charset) {
if (true) {
log.debug("Save document: '{}'", path);
String normalizedPath = getNormalizedPath(path);
try {
HttpPut method = new HttpPut(baseServerUrl + "/" + normalizedPath);
HttpEntity entity = new StringEntity(documentContent, charset);
method.setEntity(entity);
HttpContext requestLocalContext = new BasicHttpContext(context);
HttpResponse response = httpClient.execute(method, requestLocalContext);
int statusCode = response.getStatusLine().getStatusCode();
return statusCode == HttpStatus.SC_CREATED || statusCode == HttpStatus.SC_NO_CONTENT;
} catch (IOException ex) {
log.error("Failed to write document to file '{}'", path, ex);
}
return false;
} else {
log.debug("Save document: '{}'", path);
String normalizedPath = getNormalizedPath(path);
try {
String url = baseServerUrl + "/" + normalizedPath;
client.put(url, IOUtils.toInputStream(documentContent));
return true;
} catch (IOException ex) {
log.error("Failed to write document to file '{}'", path, ex);
}
return false;
}
}
use of org.apache.http.protocol.HttpContext in project oxCore by GluuFederation.
the class WebDavDocumentStoreProvider method saveDocumentStream.
@Override
public boolean saveDocumentStream(String path, InputStream documentStream) {
log.debug("Save document from stream: '{}'", path);
String normalizedPath = getNormalizedPath(path);
try {
HttpPut method = new HttpPut(baseServerUrl + "/" + normalizedPath);
HttpEntity entity = new InputStreamEntity(documentStream);
method.setEntity(entity);
HttpContext requestLocalContext = new BasicHttpContext(context);
HttpResponse response = httpClient.execute(method, requestLocalContext);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_CREATED;
} catch (IOException ex) {
log.error("Failed to write document from stream to file '{}'", path, ex);
}
return false;
}
use of org.apache.http.protocol.HttpContext in project hive by apache.
the class HiveConnection method getHttpClient.
private CloseableHttpClient getHttpClient(Boolean useSsl) throws SQLException {
boolean isCookieEnabled = sessConfMap.get(JdbcConnectionParams.COOKIE_AUTH) == null || (!JdbcConnectionParams.COOKIE_AUTH_FALSE.equalsIgnoreCase(sessConfMap.get(JdbcConnectionParams.COOKIE_AUTH)));
String cookieName = sessConfMap.get(JdbcConnectionParams.COOKIE_NAME) == null ? JdbcConnectionParams.DEFAULT_COOKIE_NAMES_HS2 : sessConfMap.get(JdbcConnectionParams.COOKIE_NAME);
CookieStore cookieStore = isCookieEnabled ? new BasicCookieStore() : null;
HttpClientBuilder httpClientBuilder = null;
// Request interceptor for any request pre-processing logic
HttpRequestInterceptor requestInterceptor;
Map<String, String> additionalHttpHeaders = new HashMap<String, String>();
Map<String, String> customCookies = new HashMap<String, String>();
// Retrieve the additional HttpHeaders
for (Map.Entry<String, String> entry : sessConfMap.entrySet()) {
String key = entry.getKey();
if (key.startsWith(JdbcConnectionParams.HTTP_HEADER_PREFIX)) {
additionalHttpHeaders.put(key.substring(JdbcConnectionParams.HTTP_HEADER_PREFIX.length()), entry.getValue());
}
if (key.startsWith(JdbcConnectionParams.HTTP_COOKIE_PREFIX)) {
customCookies.put(key.substring(JdbcConnectionParams.HTTP_COOKIE_PREFIX.length()), entry.getValue());
}
}
// Configure http client for kerberos/password based authentication
if (isKerberosAuthMode()) {
if (assumeSubject) {
// With this option, we're assuming that the external application,
// using the JDBC driver has done a JAAS kerberos login already
AccessControlContext context = AccessController.getContext();
loggedInSubject = Subject.getSubject(context);
if (loggedInSubject == null) {
throw new SQLException("The Subject is not set");
}
}
/**
* Add an interceptor which sets the appropriate header in the request.
* It does the kerberos authentication and get the final service ticket,
* for sending to the server before every request.
* In https mode, the entire information is encrypted
*/
requestInterceptor = new HttpKerberosRequestInterceptor(sessConfMap.get(JdbcConnectionParams.AUTH_PRINCIPAL), host, getServerHttpUrl(useSsl), loggedInSubject, cookieStore, cookieName, useSsl, additionalHttpHeaders, customCookies);
} else if (isBrowserAuthMode()) {
requestInterceptor = new HttpSamlAuthRequestInterceptor(browserClient, cookieStore, cookieName, useSsl, additionalHttpHeaders, customCookies);
} else {
// Check for delegation token, if present add it in the header
String tokenStr = getClientDelegationToken(sessConfMap);
if (tokenStr != null) {
requestInterceptor = new HttpTokenAuthInterceptor(tokenStr, cookieStore, cookieName, useSsl, additionalHttpHeaders, customCookies);
} else {
/**
* Add an interceptor to pass username/password in the header.
* In https mode, the entire information is encrypted
*/
requestInterceptor = new HttpBasicAuthInterceptor(getUserName(), getPassword(), cookieStore, cookieName, useSsl, additionalHttpHeaders, customCookies);
}
}
// Configure http client for cookie based authentication
if (isCookieEnabled) {
// Create a http client with a retry mechanism when the server returns a status code of 401.
httpClientBuilder = HttpClients.custom().setDefaultCookieStore(cookieStore).setServiceUnavailableRetryStrategy(new ServiceUnavailableRetryStrategy() {
@Override
public boolean retryRequest(final HttpResponse response, final int executionCount, final HttpContext context) {
int statusCode = response.getStatusLine().getStatusCode();
boolean sentCredentials = context.getAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS) != null && context.getAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS).equals(Utils.HIVE_SERVER2_CONST_TRUE);
boolean ret = statusCode == 401 && executionCount <= 1 && !sentCredentials;
// interceptor
if (ret) {
context.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_CONST_TRUE);
}
return ret;
}
@Override
public long getRetryInterval() {
// Immediate retry
return 0;
}
});
} else {
httpClientBuilder = HttpClientBuilder.create();
}
// Beeline <------> LB <------> Reverse Proxy <-----> Hiveserver2
// In case of deployments like above, the LoadBalancer (LB) can be configured with Idle Timeout after which the LB
// will send TCP RST to Client (Beeline) and Backend (Reverse Proxy). If user is connected to beeline, idle for
// sometime and resubmits a query after the idle timeout there is a broken pipe between beeline and LB. When Beeline
// tries to submit the query one of two things happen, it either hangs or times out (if socketTimeout is defined in
// the jdbc param). The hang is because of the default infinite socket timeout for which there is no auto-recovery
// (user have to manually interrupt the query). If the socketTimeout jdbc param was specified, beeline will receive
// SocketTimeoutException (Read Timeout) or NoHttpResponseException both of which can be retried if maxRetries is
// also specified by the user (jdbc param).
// The following retry handler handles the above cases in addition to retries for idempotent and unsent requests.
httpClientBuilder.setRetryHandler(new HttpRequestRetryHandler() {
// This handler is mostly a copy of DefaultHttpRequestRetryHandler except it also retries some exceptions
// which could be thrown in certain cases where idle timeout from intermediate proxy triggers a connection reset.
private final List<Class<? extends IOException>> nonRetriableClasses = Arrays.asList(InterruptedIOException.class, UnknownHostException.class, ConnectException.class, SSLException.class);
// socket exceptions could happen because of timeout, broken pipe or server not responding in which case it is
// better to reopen the connection and retry if user specified maxRetries
private final List<Class<? extends IOException>> retriableClasses = Arrays.asList(SocketTimeoutException.class, SocketException.class, NoHttpResponseException.class);
@Override
public boolean retryRequest(IOException exception, int executionCount, HttpContext context) {
Args.notNull(exception, "Exception parameter");
Args.notNull(context, "HTTP context");
if (executionCount > maxRetries) {
// Do not retry if over max retry count
LOG.error("Max retries (" + maxRetries + ") exhausted.", exception);
return false;
}
if (this.retriableClasses.contains(exception.getClass())) {
LOG.info("Retrying " + exception.getClass() + " as it is in retriable classes list.");
return true;
}
if (this.nonRetriableClasses.contains(exception.getClass())) {
LOG.info("Not retrying as the class (" + exception.getClass() + ") is non-retriable class.");
return false;
} else {
for (final Class<? extends IOException> rejectException : this.nonRetriableClasses) {
if (rejectException.isInstance(exception)) {
LOG.info("Not retrying as the class (" + exception.getClass() + ") is an instance of is non-retriable class.");
return false;
}
}
}
final HttpClientContext clientContext = HttpClientContext.adapt(context);
final HttpRequest request = clientContext.getRequest();
if (requestIsAborted(request)) {
LOG.info("Not retrying as request is aborted.");
return false;
}
if (handleAsIdempotent(request)) {
LOG.info("Retrying idempotent request. Attempt " + executionCount + " of " + maxRetries);
// Retry if the request is considered idempotent
return true;
}
if (!clientContext.isRequestSent()) {
LOG.info("Retrying unsent request. Attempt " + executionCount + " of " + maxRetries);
// if it's OK to retry methods that have been sent
return true;
}
LOG.info("Not retrying as the request is not idempotent or is already sent.");
// otherwise do not retry
return false;
}
// requests that handles "Expect continue" handshakes. If server received the header and is waiting for body
// then those requests can be retried. Most basic http method methods except DELETE are idempotent as long as they
// are not aborted.
protected boolean handleAsIdempotent(final HttpRequest request) {
return !(request instanceof HttpEntityEnclosingRequest);
}
// checks if the request got aborted
protected boolean requestIsAborted(final HttpRequest request) {
HttpRequest req = request;
if (request instanceof RequestWrapper) {
// does not forward request to original
req = ((RequestWrapper) request).getOriginal();
}
return (req instanceof HttpUriRequest && ((HttpUriRequest) req).isAborted());
}
});
if (isBrowserAuthMode()) {
httpClientBuilder.setRedirectStrategy(new HiveJdbcSamlRedirectStrategy(browserClient));
}
// Add the request interceptor to the client builder
httpClientBuilder.addInterceptorFirst(requestInterceptor);
// Add an interceptor to add in an XSRF header
httpClientBuilder.addInterceptorLast(new XsrfHttpRequestInterceptor());
// set the specified timeout (socketTimeout jdbc param) for http connection as well
RequestConfig config = RequestConfig.custom().setConnectTimeout(loginTimeout * 1000).setConnectionRequestTimeout(loginTimeout * 1000).setSocketTimeout(loginTimeout * 1000).build();
httpClientBuilder.setDefaultRequestConfig(config);
// Configure http client for SSL
if (useSsl) {
String useTwoWaySSL = sessConfMap.get(JdbcConnectionParams.USE_TWO_WAY_SSL);
String sslTrustStorePath = sessConfMap.get(JdbcConnectionParams.SSL_TRUST_STORE);
String sslTrustStorePassword = sessConfMap.get(JdbcConnectionParams.SSL_TRUST_STORE_PASSWORD);
KeyStore sslTrustStore;
SSLConnectionSocketFactory socketFactory;
SSLContext sslContext;
/**
* The code within the try block throws: SSLInitializationException, KeyStoreException,
* IOException, NoSuchAlgorithmException, CertificateException, KeyManagementException &
* UnrecoverableKeyException. We don't want the client to retry on any of these,
* hence we catch all and throw a SQLException.
*/
try {
if (useTwoWaySSL != null && useTwoWaySSL.equalsIgnoreCase(JdbcConnectionParams.TRUE)) {
socketFactory = getTwoWaySSLSocketFactory();
} else if (sslTrustStorePath == null || sslTrustStorePath.isEmpty()) {
// Create a default socket factory based on standard JSSE trust material
socketFactory = SSLConnectionSocketFactory.getSocketFactory();
} else {
// Pick trust store config from the given path
String trustStoreType = sessConfMap.get(JdbcConnectionParams.SSL_TRUST_STORE_TYPE);
if (trustStoreType == null || trustStoreType.isEmpty()) {
trustStoreType = KeyStore.getDefaultType();
}
sslTrustStore = KeyStore.getInstance(trustStoreType);
try (FileInputStream fis = new FileInputStream(sslTrustStorePath)) {
sslTrustStore.load(fis, sslTrustStorePassword.toCharArray());
}
sslContext = SSLContexts.custom().loadTrustMaterial(sslTrustStore, null).build();
socketFactory = new SSLConnectionSocketFactory(sslContext, new DefaultHostnameVerifier(null));
}
final Registry<ConnectionSocketFactory> registry = RegistryBuilder.<ConnectionSocketFactory>create().register("https", socketFactory).build();
httpClientBuilder.setConnectionManager(new BasicHttpClientConnectionManager(registry));
} catch (Exception e) {
String msg = "Could not create an https connection to " + jdbcUriString + ". " + e.getMessage();
throw new SQLException(msg, " 08S01", e);
}
}
return httpClientBuilder.build();
}
use of org.apache.http.protocol.HttpContext in project warn-report by saaavsaaa.
the class WebRequestClient method setCookies.
private static void setCookies(Cookie[] cookies) {
// 创建一个本地上下文信息
HttpContext localContext = new BasicHttpContext();
for (Cookie one : cookies) {
cs.addCookie(one);
}
// 在本地上下问中绑定一个本地存储
localContext.setAttribute(HttpClientContext.COOKIE_STORE, cs);
}
use of org.apache.http.protocol.HttpContext in project warn-report by saaavsaaa.
the class WebRequestClient method setCookies.
private static void setCookies(Cookie[] cookies) {
// 创建一个本地上下文信息
HttpContext localContext = new BasicHttpContext();
for (Cookie one : cookies) {
cs.addCookie(one);
}
// 在本地上下问中绑定一个本地存储
localContext.setAttribute(HttpClientContext.COOKIE_STORE, cs);
}
Aggregations