use of com.amazonaws.AmazonClientException in project amazon-cognito-android by aws.
the class CognitoSyncStorage method unsubscribeFromDataset.
@Override
public void unsubscribeFromDataset(String datasetName, String deviceId) {
String identityId = provider.getIdentityId();
UnsubscribeFromDatasetRequest request = new UnsubscribeFromDatasetRequest().withIdentityPoolId(provider.getIdentityPoolId()).withIdentityId(identityId).withDatasetName(datasetName).withDeviceId(deviceId);
try {
client.unsubscribeFromDataset(request);
} catch (AmazonClientException ace) {
Log.e(TAG, "Failed to unsubscribe from dataset", ace);
throw new UnsubscribeFailedException("Failed to unsubscribe from dataset", ace);
}
}
use of com.amazonaws.AmazonClientException in project amazon-cognito-android by aws.
the class CognitoSyncStorage method getDatasets.
/*
* (non-Javadoc)
* @see com.amazonaws.cognitov2.RemoteStorage#listDatasets()
*/
@Override
public List<DatasetMetadata> getDatasets() {
List<DatasetMetadata> datasets = new ArrayList<DatasetMetadata>();
String nextToken = null;
do {
ListDatasetsRequest request = new ListDatasetsRequest();
appendUserAgent(request, userAgent);
request.setIdentityPoolId(identityPoolId);
// a large enough number to reduce # of requests
request.setMaxResults("64");
request.setNextToken(nextToken);
ListDatasetsResult result = null;
try {
request.setIdentityId(getIdentityId());
result = client.listDatasets(request);
} catch (AmazonClientException ace) {
throw handleException(ace, "Failed to list dataset metadata");
}
for (com.amazonaws.services.cognitosync.model.Dataset dataset : result.getDatasets()) {
datasets.add(modelToDatasetMetadata(dataset));
}
nextToken = result.getNextToken();
} while (nextToken != null);
return datasets;
}
use of com.amazonaws.AmazonClientException in project stocator by SparkTC.
the class COSAPIClient method rename.
@Override
public boolean rename(String hostName, String srcPath, String dstPath) throws IOException {
LOG.debug("Rename path {} to {}", srcPath, dstPath);
Path src = new Path(srcPath);
Path dst = new Path(dstPath);
String srcKey = pathToKey(src);
String dstKey = pathToKey(dst);
if (srcKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " source is root directory");
}
if (dstKey.isEmpty()) {
throw new IOException("Rename failed " + srcPath + " to " + dstPath + " dest is root directory");
}
// get the source file status; this raises a FNFE if there is no source
// file.
FileStatus srcStatus = getFileStatus(hostName, src, "rename");
if (srcKey.equals(dstKey)) {
LOG.debug("rename: src and dest refer to the same file or directory: {}", dstPath);
throw new IOException("source + " + srcPath + "and dest " + dstPath + " refer to the same file or directory");
}
FileStatus dstStatus = null;
try {
dstStatus = getFileStatus(hostName, dst, "rename");
// whether or not it can be the destination of the rename.
if (srcStatus.isDirectory()) {
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "source is a directory and dest is a file");
}
// at this point the destination is an empty directory
} else {
// empty or not
if (dstStatus.isFile()) {
throw new IOException("source + " + srcPath + "and dest " + dstPath + "Cannot rename onto an existing file");
}
}
} catch (FileNotFoundException e) {
LOG.debug("rename: destination path {} not found", dstPath);
}
if (srcStatus.isFile()) {
LOG.debug("rename: renaming file {} to {}", src, dst);
long length = srcStatus.getLen();
if (dstStatus != null && dstStatus.isDirectory()) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1);
newDstKey = newDstKey + filename;
copyFile(srcKey, newDstKey, length);
} else {
copyFile(srcKey, dstKey, srcStatus.getLen());
}
delete(hostName, src, false);
} else {
ObjectListing objects = mClient.listObjects(mBucket, srcKey);
List<S3ObjectSummary> summaries = objects.getObjectSummaries();
while (objects.isTruncated()) {
objects = mClient.listNextBatchOfObjects(objects);
summaries.addAll(objects.getObjectSummaries());
}
// Batch copy using TransferManager
// Build a list of copyRequests
List<CopyObjectRequest> copyRequests = new ArrayList<>();
List<KeyVersion> keysToDelete = new ArrayList<>();
for (S3ObjectSummary objectSummary : summaries) {
String newSrcKey = objectSummary.getKey();
keysToDelete.add(new KeyVersion(newSrcKey));
// Just in case there are still folders returned as objects
if (newSrcKey.endsWith("/")) {
LOG.debug("rename: {} is folder and will be ignored", newSrcKey);
continue;
}
long length = objectSummary.getSize();
if ((dstStatus != null && dstStatus.isDirectory()) || (dstStatus == null)) {
String newDstKey = dstKey;
if (!newDstKey.endsWith("/")) {
newDstKey = newDstKey + "/";
}
String filename = newSrcKey.substring(pathToKey(src).length() + 1);
newDstKey = newDstKey + filename;
CopyObjectRequest copyObjectRequest = new CopyObjectRequest(mBucket, newSrcKey, mBucket, newDstKey);
ObjectMetadata srcmd = getObjectMetadata(newSrcKey);
if (srcmd != null) {
copyObjectRequest.setNewObjectMetadata(srcmd);
}
copyRequests.add(copyObjectRequest);
} else {
throw new IOException("Unexpected dstStatus");
}
}
// Submit the copy jobs to transfermanager
CountDownLatch doneSignal = new CountDownLatch(copyRequests.size());
ArrayList copyJobs = new ArrayList();
for (CopyObjectRequest request : copyRequests) {
request.setGeneralProgressListener(new CopyCompleteListener(request.getSourceBucketName() + "/" + request.getSourceKey(), request.getDestinationBucketName() + "/" + request.getDestinationKey(), doneSignal));
copyJobs.add(transfers.copy(request));
}
try {
doneSignal.await();
} catch (AmazonClientException e) {
throw new IOException("Couldn't wait for all copies to be finished");
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying objects, cancelling");
}
// Delete source objects
DeleteObjectsRequest keysDeleteRequest = new DeleteObjectsRequest(mBucket).withKeys(keysToDelete).withQuiet(false);
// Verify that the object versions were successfully deleted.
DeleteObjectsResult deletedKeys = mClient.deleteObjects(keysDeleteRequest);
int successfulDeletes = deletedKeys.getDeletedObjects().size();
LOG.debug(successfulDeletes + " objects successfully deleted");
}
if (!(src.getParent().equals(dst.getParent()))) {
LOG.debug("{} is not equal to {}. Going to create directory {}", src.getParent(), dst.getParent(), src.getParent());
createDirectoryIfNecessary(hostName, src.getParent());
}
return true;
}
use of com.amazonaws.AmazonClientException in project stocator by SparkTC.
the class COSInputStream method reopen.
/**
* Opens up the stream at specified target position and for given length.
*
* @param reason reason for reopen
* @param targetPos target position
* @param length length requested
* @throws IOException on any failure to open the object
*/
private synchronized void reopen(String reason, long targetPos, long length) throws IOException {
if (wrappedStream != null) {
closeStream("reopen(" + reason + ")", contentRangeFinish, false);
}
contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos, length, contentLength, readahead);
LOG.debug("reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}", uri, reason, targetPos, contentRangeFinish, length, pos, nextReadPos);
try {
GetObjectRequest request = new GetObjectRequest(bucket, key).withRange(targetPos, contentRangeFinish - 1);
wrappedStream = client.getObject(request).getObjectContent();
contentRangeStart = targetPos;
if (wrappedStream == null) {
throw new IOException("Null IO stream from reopen of (" + reason + ") " + uri);
}
} catch (AmazonClientException e) {
throw COSUtils.translateException("Reopen at position " + targetPos, uri, e);
}
pos = targetPos;
}
use of com.amazonaws.AmazonClientException in project stocator by SparkTC.
the class COSOutputStream method close.
@Override
public void close() throws IOException {
if (closed.getAndSet(true)) {
return;
}
mBackupOutputStream.close();
LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
try {
final ObjectMetadata om = new ObjectMetadata();
om.setContentLength(mBackupFile.length());
om.setContentType(mContentType);
om.setUserMetadata(mMetadata);
// to ensure the write is atomic
if (mAvoidOverwrite) {
LOG.debug("Avoid Overwrite - setting If-None-Match header");
om.setHeader("If-None-Match", "*");
}
PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
putObjectRequest.setMetadata(om);
Upload upload = transfers.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
} catch (AmazonClientException e) {
throw new IOException(String.format("saving output %s %s", mKey, e));
} finally {
if (!mBackupFile.delete()) {
LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
}
super.close();
}
LOG.debug("OutputStream for key '{}' upload complete", mKey);
}
Aggregations