use of com.amazonaws.services.s3.model.Bucket in project stocator by SparkTC.
the class COSInputStream method reopen.
/**
* Opens up the stream at specified target position and for given length.
*
* @param reason reason for reopen
* @param targetPos target position
* @param length length requested
* @throws IOException on any failure to open the object
*/
private synchronized void reopen(String reason, long targetPos, long length) throws IOException {
if (wrappedStream != null) {
closeStream("reopen(" + reason + ")", contentRangeFinish, false);
}
contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos, length, contentLength, readahead);
LOG.debug("reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}", uri, reason, targetPos, contentRangeFinish, length, pos, nextReadPos);
try {
GetObjectRequest request = new GetObjectRequest(bucket, key).withRange(targetPos, contentRangeFinish - 1);
wrappedStream = client.getObject(request).getObjectContent();
contentRangeStart = targetPos;
if (wrappedStream == null) {
throw new IOException("Null IO stream from reopen of (" + reason + ") " + uri);
}
} catch (AmazonClientException e) {
throw COSUtils.translateException("Reopen at position " + targetPos, uri, e);
}
pos = targetPos;
}
use of com.amazonaws.services.s3.model.Bucket in project ignite by apache.
the class S3CheckpointSpi method spiStart.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "BusyWait" })
@Override
public void spiStart(String igniteInstanceName) throws IgniteSpiException {
// Start SPI start stopwatch.
startStopwatch();
assertParameter(cred != null, "awsCredentials != null");
if (log.isDebugEnabled()) {
log.debug(configInfo("awsCredentials", cred));
log.debug(configInfo("clientConfiguration", cfg));
log.debug(configInfo("bucketNameSuffix", bucketNameSuffix));
log.debug(configInfo("bucketEndpoint", bucketEndpoint));
log.debug(configInfo("SSEAlgorithm", sseAlg));
}
if (cfg == null)
U.warn(log, "Amazon client configuration is not set (will use default).");
if (F.isEmpty(bucketNameSuffix)) {
U.warn(log, "Bucket name suffix is null or empty (will use default bucket name).");
bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX;
} else
bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix;
s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred);
if (!F.isEmpty(bucketEndpoint))
s3.setEndpoint(bucketEndpoint);
if (!s3.doesBucketExist(bucketName)) {
try {
s3.createBucket(bucketName);
if (log.isDebugEnabled())
log.debug("Created S3 bucket: " + bucketName);
while (!s3.doesBucketExist(bucketName)) try {
U.sleep(200);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
} catch (AmazonClientException e) {
try {
if (!s3.doesBucketExist(bucketName))
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
} catch (AmazonClientException ignored) {
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
}
}
}
Collection<S3TimeData> s3TimeDataLst = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
S3CheckpointData data = read(sum.getKey());
if (data != null) {
s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey()));
if (log.isDebugEnabled())
log.debug("Registered existing checkpoint from key: " + data.getKey());
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
} catch (IgniteCheckedException e) {
throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e);
}
// Track expiration for only those data that are made by this node
timeoutWrk = new S3TimeoutWorker();
timeoutWrk.add(s3TimeDataLst);
timeoutWrk.start();
registerMBean(igniteInstanceName, new S3CheckpointSpiMBeanImpl(this), S3CheckpointSpiMBean.class);
// Ack ok start.
if (log.isDebugEnabled())
log.debug(startInfo());
}
use of com.amazonaws.services.s3.model.Bucket in project ignite by apache.
the class TcpDiscoveryS3IpFinder method getRegisteredAddresses.
/**
* {@inheritDoc}
*/
@Override
public Collection<InetSocketAddress> getRegisteredAddresses() throws IgniteSpiException {
initClient();
Collection<InetSocketAddress> addrs = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
String key = sum.getKey();
StringTokenizer st = new StringTokenizer(key, DELIM);
if (st.countTokens() != 2)
U.error(log, "Failed to parse S3 entry due to invalid format: " + key);
else {
String addrStr = st.nextToken();
String portStr = st.nextToken();
int port = -1;
try {
port = Integer.parseInt(portStr);
} catch (NumberFormatException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
if (port != -1)
try {
addrs.add(new InetSocketAddress(addrStr, port));
} catch (IllegalArgumentException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to list objects in the bucket: " + bucketName, e);
}
return addrs;
}
use of com.amazonaws.services.s3.model.Bucket in project ignite by apache.
the class S3CheckpointSpiSelfTest method afterSpiStopped.
/**
* @throws Exception If error.
*/
@Override
protected void afterSpiStopped() throws Exception {
AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey());
AmazonS3 s3 = new AmazonS3Client(cred);
String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) s3.deleteObject(bucketName, sum.getKey());
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
}
}
use of com.amazonaws.services.s3.model.Bucket in project elasticsearch by elastic.
the class AbstractS3SnapshotRestoreTest method cleanRepositoryFiles.
/**
* Deletes content of the repository files in the bucket
*/
public void cleanRepositoryFiles(String basePath) {
Settings settings = internalCluster().getInstance(Settings.class);
Settings[] buckets = { settings.getByPrefix("repositories.s3."), settings.getByPrefix("repositories.s3.private-bucket."), settings.getByPrefix("repositories.s3.remote-bucket."), settings.getByPrefix("repositories.s3.external-bucket.") };
for (Settings bucket : buckets) {
String bucketName = bucket.get("bucket");
// We check that settings has been set in elasticsearch.yml integration test file
// as described in README
assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY, null, randomBoolean(), null);
try {
ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
//we can do at most 1K objects per delete
//We don't know the bucket name until first object listing
DeleteObjectsRequest multiObjectDeleteRequest = null;
ArrayList<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<DeleteObjectsRequest.KeyVersion>();
while (true) {
ObjectListing list;
if (prevListing != null) {
list = client.listNextBatchOfObjects(prevListing);
} else {
list = client.listObjects(bucketName, basePath);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
}
for (S3ObjectSummary summary : list.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
//Every 500 objects batch the delete request
if (keys.size() > 500) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
keys.clear();
}
}
if (list.isTruncated()) {
prevListing = list;
} else {
break;
}
}
if (!keys.isEmpty()) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
}
} catch (Exception ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex);
}
}
}
Aggregations