use of com.microsoft.azure.storage.OperationContext in project camel by apache.
the class QueueServiceUtil method getRequestOptions.
public static QueueServiceRequestOptions getRequestOptions(Exchange exchange) {
QueueServiceRequestOptions opts = exchange.getIn().getHeader(QueueServiceConstants.QUEUE_SERVICE_REQUEST_OPTIONS, QueueServiceRequestOptions.class);
if (opts != null) {
return opts;
} else {
opts = new QueueServiceRequestOptions();
}
QueueRequestOptions requestOpts = exchange.getIn().getHeader(QueueServiceConstants.QUEUE_REQUEST_OPTIONS, QueueRequestOptions.class);
OperationContext opContext = exchange.getIn().getHeader(QueueServiceConstants.OPERATION_CONTEXT, OperationContext.class);
opts.setOpContext(opContext);
opts.setRequestOpts(requestOpts);
return opts;
}
use of com.microsoft.azure.storage.OperationContext in project camel by apache.
the class BlobServiceUtil method getRequestOptions.
public static BlobServiceRequestOptions getRequestOptions(Exchange exchange) {
BlobServiceRequestOptions opts = exchange.getIn().getHeader(BlobServiceConstants.BLOB_SERVICE_REQUEST_OPTIONS, BlobServiceRequestOptions.class);
if (opts != null) {
return opts;
} else {
opts = new BlobServiceRequestOptions();
}
AccessCondition accessCond = exchange.getIn().getHeader(BlobServiceConstants.ACCESS_CONDITION, AccessCondition.class);
BlobRequestOptions requestOpts = exchange.getIn().getHeader(BlobServiceConstants.BLOB_REQUEST_OPTIONS, BlobRequestOptions.class);
OperationContext opContext = exchange.getIn().getHeader(BlobServiceConstants.OPERATION_CONTEXT, OperationContext.class);
opts.setAccessCond(accessCond);
opts.setOpContext(opContext);
opts.setRequestOpts(requestOpts);
return opts;
}
use of com.microsoft.azure.storage.OperationContext in project hadoop by apache.
the class AzureNativeFileSystemStore method getInstrumentedContext.
/**
* Creates a new OperationContext for the Azure Storage operation that has
* listeners hooked to it that will update the metrics for this file system.
*
* @param bindConcurrentOOBIo
* - bind to intercept send request call backs to handle OOB I/O.
*
* @return The OperationContext object to use.
*/
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
OperationContext operationContext = new OperationContext();
if (selfThrottlingEnabled) {
SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor, selfThrottlingWriteFactor);
}
if (bandwidthGaugeUpdater != null) {
//bandwidthGaugeUpdater is null when we config to skip azure metrics
ResponseReceivedMetricUpdater.hook(operationContext, instrumentation, bandwidthGaugeUpdater);
}
// the conditional header on all Azure blob storage read requests.
if (bindConcurrentOOBIo) {
SendRequestIntercept.bind(storageInteractionLayer.getCredentials(), operationContext, true);
}
if (testHookOperationContext != null) {
operationContext = testHookOperationContext.modifyOperationContext(operationContext);
}
ErrorMetricUpdater.hook(operationContext, instrumentation);
// Return the operation context.
return operationContext;
}
use of com.microsoft.azure.storage.OperationContext in project hadoop by apache.
the class TestBlobDataValidation method testCheckBlockMd5.
private void testCheckBlockMd5(final boolean expectMd5Checked) throws Exception {
assumeNotNull(testAccount);
Path testFilePath = new Path("/testFile");
// Add a hook to check that for GET/PUT requests we set/don't set
// the block-level MD5 field as configured. I tried to do clever
// testing by also messing with the raw data to see if we actually
// validate the data as expected, but the HttpURLConnection wasn't
// pluggable enough for me to do that.
testAccount.getFileSystem().getStore().addTestHookToOperationContext(new TestHookOperationContext() {
@Override
public OperationContext modifyOperationContext(OperationContext original) {
original.getResponseReceivedEventHandler().addListener(new ContentMD5Checker(expectMd5Checked));
return original;
}
});
OutputStream outStream = testAccount.getFileSystem().create(testFilePath);
outStream.write(new byte[] { 5, 15 });
outStream.close();
InputStream inStream = testAccount.getFileSystem().open(testFilePath);
byte[] inBuf = new byte[100];
while (inStream.read(inBuf) > 0) {
//nothing;
}
inStream.close();
}
Aggregations