use of com.amazonaws.services.s3.model.Region in project hadoop by apache.
the class S3AUtils method translateException.
/**
* Translate an exception raised in an operation into an IOException.
* The specific type of IOException depends on the class of
* {@link AmazonClientException} passed in, and any status codes included
* in the operation. That is: HTTP error codes are examined and can be
* used to build a more specific response.
* @param operation operation
* @param path path operated on (may be null)
* @param exception amazon exception raised
* @return an IOE which wraps the caught exception.
*/
@SuppressWarnings("ThrowableInstanceNeverThrown")
public static IOException translateException(String operation, String path, AmazonClientException exception) {
String message = String.format("%s%s: %s", operation, path != null ? (" on " + path) : "", exception);
if (!(exception instanceof AmazonServiceException)) {
if (containsInterruptedException(exception)) {
return (IOException) new InterruptedIOException(message).initCause(exception);
}
return new AWSClientIOException(message, exception);
} else {
IOException ioe;
AmazonServiceException ase = (AmazonServiceException) exception;
// this exception is non-null if the service exception is an s3 one
AmazonS3Exception s3Exception = ase instanceof AmazonS3Exception ? (AmazonS3Exception) ase : null;
int status = ase.getStatusCode();
switch(status) {
case 301:
if (s3Exception != null) {
if (s3Exception.getAdditionalDetails() != null && s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
message = String.format("Received permanent redirect response to " + "endpoint %s. This likely indicates that the S3 endpoint " + "configured in %s does not match the AWS region containing " + "the bucket.", s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), ENDPOINT);
}
ioe = new AWSS3IOException(message, s3Exception);
} else {
ioe = new AWSServiceIOException(message, ase);
}
break;
// permissions
case 401:
case 403:
ioe = new AccessDeniedException(path, null, message);
ioe.initCause(ase);
break;
// the object isn't there
case 404:
case 410:
ioe = new FileNotFoundException(message);
ioe.initCause(ase);
break;
// a shorter one while it is being read.
case 416:
ioe = new EOFException(message);
break;
default:
// no specific exit code. Choose an IOE subclass based on the class
// of the caught exception
ioe = s3Exception != null ? new AWSS3IOException(message, s3Exception) : new AWSServiceIOException(message, ase);
break;
}
return ioe;
}
}
use of com.amazonaws.services.s3.model.Region in project jackrabbit-oak by apache.
the class S3Backend method init.
public void init(CachingDataStore store, String homeDir, Properties prop) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
startTime = new Date();
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
LOG.debug("init");
this.store = store;
s3ReqDecorator = new S3RequestDecorator(prop);
s3service = Utils.openService(prop);
if (bucket == null || "".equals(bucket.trim())) {
bucket = prop.getProperty(S3Constants.S3_BUCKET);
// Alternately check if the 'container' property is set
if (Strings.isNullOrEmpty(bucket)) {
bucket = prop.getProperty(S3Constants.S3_CONTAINER);
}
}
String region = prop.getProperty(S3Constants.S3_REGION);
Region s3Region = null;
if (StringUtils.isNullOrEmpty(region)) {
com.amazonaws.regions.Region ec2Region = Regions.getCurrentRegion();
if (ec2Region != null) {
s3Region = Region.fromValue(ec2Region.getName());
} else {
throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
}
} else {
if (Utils.DEFAULT_AWS_BUCKET_REGION.equals(region)) {
s3Region = Region.US_Standard;
} else if (Region.EU_Ireland.toString().equals(region)) {
s3Region = Region.EU_Ireland;
} else {
s3Region = Region.fromValue(region);
}
}
if (!s3service.doesBucketExist(bucket)) {
s3service.createBucket(bucket, s3Region);
LOG.info("Created bucket [{}] in [{}] ", bucket, region);
} else {
LOG.info("Using bucket [{}] in [{}] ", bucket, region);
}
int writeThreads = 10;
String writeThreadsStr = prop.getProperty(S3Constants.S3_WRITE_THREADS);
if (writeThreadsStr != null) {
writeThreads = Integer.parseInt(writeThreadsStr);
}
LOG.info("Using thread pool of [{}] threads in S3 transfer manager.", writeThreads);
tmx = new TransferManager(s3service, (ThreadPoolExecutor) Executors.newFixedThreadPool(writeThreads, new NamedThreadFactory("s3-transfer-manager-worker")));
int asyncWritePoolSize = 10;
String maxConnsStr = prop.getProperty(S3Constants.S3_MAX_CONNS);
if (maxConnsStr != null) {
asyncWritePoolSize = Integer.parseInt(maxConnsStr) - writeThreads;
}
asyncWriteExecuter = (ThreadPoolExecutor) Executors.newFixedThreadPool(asyncWritePoolSize, new NamedThreadFactory("s3-write-worker"));
String renameKeyProp = prop.getProperty(S3Constants.S3_RENAME_KEYS);
boolean renameKeyBool = (renameKeyProp == null || "".equals(renameKeyProp)) ? false : Boolean.parseBoolean(renameKeyProp);
LOG.info("Rename keys [{}]", renameKeyBool);
if (renameKeyBool) {
renameKeys();
}
LOG.debug("S3 Backend initialized in [{}] ms", +(System.currentTimeMillis() - startTime.getTime()));
} catch (Exception e) {
LOG.debug(" error ", e);
Map<String, String> filteredMap = Maps.newHashMap();
if (prop != null) {
filteredMap = Maps.filterKeys(Maps.fromProperties(prop), new Predicate<String>() {
@Override
public boolean apply(String input) {
return !input.equals(S3Constants.ACCESS_KEY) && !input.equals(S3Constants.SECRET_KEY);
}
});
}
throw new DataStoreException("Could not initialize S3 from " + filteredMap, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of com.amazonaws.services.s3.model.Region in project presto by prestodb.
the class PrestoS3FileSystem method createAmazonS3Client.
private AmazonS3Client createAmazonS3Client(URI uri, Configuration hadoopConfig, ClientConfiguration clientConfig) {
AWSCredentialsProvider credentials = getAwsCredentialsProvider(uri, hadoopConfig);
Optional<EncryptionMaterialsProvider> emp = createEncryptionMaterialsProvider(hadoopConfig);
AmazonS3Client client;
String signerType = hadoopConfig.get(S3_SIGNER_TYPE);
if (signerType != null) {
clientConfig.withSignerOverride(signerType);
}
if (emp.isPresent()) {
client = new AmazonS3EncryptionClient(credentials, emp.get(), clientConfig, new CryptoConfiguration(), METRIC_COLLECTOR);
} else {
client = new AmazonS3Client(credentials, clientConfig, METRIC_COLLECTOR);
}
// use local region when running inside of EC2
if (pinS3ClientToCurrentRegion) {
Region region = Regions.getCurrentRegion();
if (region != null) {
client.setRegion(region);
}
}
String endpoint = hadoopConfig.get(S3_ENDPOINT);
if (endpoint != null) {
client.setEndpoint(endpoint);
}
return client;
}
use of com.amazonaws.services.s3.model.Region in project YCSB by brianfrankcooper.
the class S3Client method init.
/**
* Initialize any state for the storage.
* Called once per S3 instance; If the client is not null it is re-used.
*/
@Override
public void init() throws DBException {
final int count = INIT_COUNT.incrementAndGet();
synchronized (S3Client.class) {
Properties propsCL = getProperties();
int recordcount = Integer.parseInt(propsCL.getProperty("recordcount"));
int operationcount = Integer.parseInt(propsCL.getProperty("operationcount"));
int numberOfOperations = 0;
if (recordcount > 0) {
if (recordcount > operationcount) {
numberOfOperations = recordcount;
} else {
numberOfOperations = operationcount;
}
} else {
numberOfOperations = operationcount;
}
if (count <= numberOfOperations) {
String accessKeyId = null;
String secretKey = null;
String endPoint = null;
String region = null;
String maxErrorRetry = null;
String maxConnections = null;
String protocol = null;
BasicAWSCredentials s3Credentials;
ClientConfiguration clientConfig;
if (s3Client != null) {
System.out.println("Reusing the same client");
return;
}
try {
InputStream propFile = S3Client.class.getClassLoader().getResourceAsStream("s3.properties");
Properties props = new Properties(System.getProperties());
props.load(propFile);
accessKeyId = props.getProperty("s3.accessKeyId");
if (accessKeyId == null) {
accessKeyId = propsCL.getProperty("s3.accessKeyId");
}
System.out.println(accessKeyId);
secretKey = props.getProperty("s3.secretKey");
if (secretKey == null) {
secretKey = propsCL.getProperty("s3.secretKey");
}
System.out.println(secretKey);
endPoint = props.getProperty("s3.endPoint");
if (endPoint == null) {
endPoint = propsCL.getProperty("s3.endPoint", "s3.amazonaws.com");
}
System.out.println(endPoint);
region = props.getProperty("s3.region");
if (region == null) {
region = propsCL.getProperty("s3.region", "us-east-1");
}
System.out.println(region);
maxErrorRetry = props.getProperty("s3.maxErrorRetry");
if (maxErrorRetry == null) {
maxErrorRetry = propsCL.getProperty("s3.maxErrorRetry", "15");
}
maxConnections = props.getProperty("s3.maxConnections");
if (maxConnections == null) {
maxConnections = propsCL.getProperty("s3.maxConnections");
}
protocol = props.getProperty("s3.protocol");
if (protocol == null) {
protocol = propsCL.getProperty("s3.protocol", "HTTPS");
}
sse = props.getProperty("s3.sse");
if (sse == null) {
sse = propsCL.getProperty("s3.sse", "false");
}
String ssec = props.getProperty("s3.ssec");
if (ssec == null) {
ssec = propsCL.getProperty("s3.ssec", null);
} else {
ssecKey = new SSECustomerKey(ssec);
}
} catch (Exception e) {
System.err.println("The file properties doesn't exist " + e.toString());
e.printStackTrace();
}
try {
System.out.println("Inizializing the S3 connection");
s3Credentials = new BasicAWSCredentials(accessKeyId, secretKey);
clientConfig = new ClientConfiguration();
clientConfig.setMaxErrorRetry(Integer.parseInt(maxErrorRetry));
if (protocol.equals("HTTP")) {
clientConfig.setProtocol(Protocol.HTTP);
} else {
clientConfig.setProtocol(Protocol.HTTPS);
}
if (maxConnections != null) {
clientConfig.setMaxConnections(Integer.parseInt(maxConnections));
}
s3Client = new AmazonS3Client(s3Credentials, clientConfig);
s3Client.setRegion(Region.getRegion(Regions.fromName(region)));
s3Client.setEndpoint(endPoint);
System.out.println("Connection successfully initialized");
} catch (Exception e) {
System.err.println("Could not connect to S3 storage: " + e.toString());
e.printStackTrace();
throw new DBException(e);
}
} else {
System.err.println("The number of threads must be less or equal than the operations");
throw new DBException(new Error("The number of threads must be less or equal than the operations"));
}
}
}
use of com.amazonaws.services.s3.model.Region in project quickutil by quickutil.
the class AWSS3Util method buildClient.
/**
* 获取客户端实例
*
* @param s3Name-S3Name
* @return
*/
public static AmazonS3 buildClient(String s3Name) {
AmazonS3ClientBuilder s3Builder = AmazonS3ClientBuilder.standard();
s3Builder.setCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(bucketMap.get(s3Name).get("access_key"), bucketMap.get(s3Name).get("secret_key"))));
s3Builder.setEndpointConfiguration(new EndpointConfiguration(bucketMap.get(s3Name).get("endpoint"), bucketMap.get(s3Name).get("region")));
return s3Builder.build();
}
Aggregations