Search in sources :

Example 96 with AmazonClientException

use of com.amazonaws.AmazonClientException in project amazon-cognito-android by aws.

the class CognitoSyncStorage method unsubscribeFromDataset.

@Override
public void unsubscribeFromDataset(String datasetName, String deviceId) {
    String identityId = provider.getIdentityId();
    UnsubscribeFromDatasetRequest request = new UnsubscribeFromDatasetRequest().withIdentityPoolId(provider.getIdentityPoolId()).withIdentityId(identityId).withDatasetName(datasetName).withDeviceId(deviceId);
    try {
        client.unsubscribeFromDataset(request);
    } catch (AmazonClientException ace) {
        Log.e(TAG, "Failed to unsubscribe from dataset", ace);
        throw new UnsubscribeFailedException("Failed to unsubscribe from dataset", ace);
    }
}
Also used : UnsubscribeFromDatasetRequest(com.amazonaws.services.cognitosync.model.UnsubscribeFromDatasetRequest) UnsubscribeFailedException(com.amazonaws.mobileconnectors.cognito.exceptions.UnsubscribeFailedException) AmazonClientException(com.amazonaws.AmazonClientException)

Example 97 with AmazonClientException

use of com.amazonaws.AmazonClientException in project amazon-cognito-android by aws.

the class CognitoSyncStorage method getDatasets.

/*
     * (non-Javadoc)
     * @see com.amazonaws.cognitov2.RemoteStorage#listDatasets()
     */
@Override
public List<DatasetMetadata> getDatasets() {
    List<DatasetMetadata> datasets = new ArrayList<DatasetMetadata>();
    String nextToken = null;
    do {
        ListDatasetsRequest request = new ListDatasetsRequest();
        appendUserAgent(request, userAgent);
        request.setIdentityPoolId(identityPoolId);
        // a large enough number to reduce # of requests
        request.setMaxResults("64");
        request.setNextToken(nextToken);
        ListDatasetsResult result = null;
        try {
            request.setIdentityId(getIdentityId());
            result = client.listDatasets(request);
        } catch (AmazonClientException ace) {
            throw handleException(ace, "Failed to list dataset metadata");
        }
        for (com.amazonaws.services.cognitosync.model.Dataset dataset : result.getDatasets()) {
            datasets.add(modelToDatasetMetadata(dataset));
        }
        nextToken = result.getNextToken();
    } while (nextToken != null);
    return datasets;
}
Also used : DatasetMetadata(com.amazonaws.mobileconnectors.cognito.DatasetMetadata) AmazonClientException(com.amazonaws.AmazonClientException) ArrayList(java.util.ArrayList) ListDatasetsResult(com.amazonaws.services.cognitosync.model.ListDatasetsResult) ListDatasetsRequest(com.amazonaws.services.cognitosync.model.ListDatasetsRequest)

Example 98 with AmazonClientException

use of com.amazonaws.AmazonClientException in project stocator by SparkTC.

the class COSAPIClient method rename.

@Override
public boolean rename(String hostName, String srcPath, String dstPath) throws IOException {
    LOG.debug("Rename path {} to {}", srcPath, dstPath);
    Path src = new Path(srcPath);
    Path dst = new Path(dstPath);
    String srcKey = pathToKey(src);
    String dstKey = pathToKey(dst);
    if (srcKey.isEmpty()) {
        throw new IOException("Rename failed " + srcPath + " to " + dstPath + " source is root directory");
    }
    if (dstKey.isEmpty()) {
        throw new IOException("Rename failed " + srcPath + " to " + dstPath + " dest is root directory");
    }
    // get the source file status; this raises a FNFE if there is no source
    // file.
    FileStatus srcStatus = getFileStatus(hostName, src, "rename");
    if (srcKey.equals(dstKey)) {
        LOG.debug("rename: src and dest refer to the same file or directory: {}", dstPath);
        throw new IOException("source + " + srcPath + "and dest " + dstPath + " refer to the same file or directory");
    }
    FileStatus dstStatus = null;
    try {
        dstStatus = getFileStatus(hostName, dst, "rename");
        // whether or not it can be the destination of the rename.
        if (srcStatus.isDirectory()) {
            if (dstStatus.isFile()) {
                throw new IOException("source + " + srcPath + "and dest " + dstPath + "source is a directory and dest is a file");
            }
        // at this point the destination is an empty directory
        } else {
            // empty or not
            if (dstStatus.isFile()) {
                throw new IOException("source + " + srcPath + "and dest " + dstPath + "Cannot rename onto an existing file");
            }
        }
    } catch (FileNotFoundException e) {
        LOG.debug("rename: destination path {} not found", dstPath);
    }
    if (srcStatus.isFile()) {
        LOG.debug("rename: renaming file {} to {}", src, dst);
        long length = srcStatus.getLen();
        if (dstStatus != null && dstStatus.isDirectory()) {
            String newDstKey = dstKey;
            if (!newDstKey.endsWith("/")) {
                newDstKey = newDstKey + "/";
            }
            String filename = srcKey.substring(pathToKey(src.getParent()).length() + 1);
            newDstKey = newDstKey + filename;
            copyFile(srcKey, newDstKey, length);
        } else {
            copyFile(srcKey, dstKey, srcStatus.getLen());
        }
        delete(hostName, src, false);
    } else {
        ObjectListing objects = mClient.listObjects(mBucket, srcKey);
        List<S3ObjectSummary> summaries = objects.getObjectSummaries();
        while (objects.isTruncated()) {
            objects = mClient.listNextBatchOfObjects(objects);
            summaries.addAll(objects.getObjectSummaries());
        }
        // Batch copy using TransferManager
        // Build a list of copyRequests
        List<CopyObjectRequest> copyRequests = new ArrayList<>();
        List<KeyVersion> keysToDelete = new ArrayList<>();
        for (S3ObjectSummary objectSummary : summaries) {
            String newSrcKey = objectSummary.getKey();
            keysToDelete.add(new KeyVersion(newSrcKey));
            // Just in case there are still folders returned as objects
            if (newSrcKey.endsWith("/")) {
                LOG.debug("rename: {} is folder and will be ignored", newSrcKey);
                continue;
            }
            long length = objectSummary.getSize();
            if ((dstStatus != null && dstStatus.isDirectory()) || (dstStatus == null)) {
                String newDstKey = dstKey;
                if (!newDstKey.endsWith("/")) {
                    newDstKey = newDstKey + "/";
                }
                String filename = newSrcKey.substring(pathToKey(src).length() + 1);
                newDstKey = newDstKey + filename;
                CopyObjectRequest copyObjectRequest = new CopyObjectRequest(mBucket, newSrcKey, mBucket, newDstKey);
                ObjectMetadata srcmd = getObjectMetadata(newSrcKey);
                if (srcmd != null) {
                    copyObjectRequest.setNewObjectMetadata(srcmd);
                }
                copyRequests.add(copyObjectRequest);
            } else {
                throw new IOException("Unexpected dstStatus");
            }
        }
        // Submit the copy jobs to transfermanager
        CountDownLatch doneSignal = new CountDownLatch(copyRequests.size());
        ArrayList copyJobs = new ArrayList();
        for (CopyObjectRequest request : copyRequests) {
            request.setGeneralProgressListener(new CopyCompleteListener(request.getSourceBucketName() + "/" + request.getSourceKey(), request.getDestinationBucketName() + "/" + request.getDestinationKey(), doneSignal));
            copyJobs.add(transfers.copy(request));
        }
        try {
            doneSignal.await();
        } catch (AmazonClientException e) {
            throw new IOException("Couldn't wait for all copies to be finished");
        } catch (InterruptedException e) {
            throw new InterruptedIOException("Interrupted copying objects, cancelling");
        }
        // Delete source objects
        DeleteObjectsRequest keysDeleteRequest = new DeleteObjectsRequest(mBucket).withKeys(keysToDelete).withQuiet(false);
        // Verify that the object versions were successfully deleted.
        DeleteObjectsResult deletedKeys = mClient.deleteObjects(keysDeleteRequest);
        int successfulDeletes = deletedKeys.getDeletedObjects().size();
        LOG.debug(successfulDeletes + " objects successfully deleted");
    }
    if (!(src.getParent().equals(dst.getParent()))) {
        LOG.debug("{} is not equal to {}. Going to create directory {}", src.getParent(), dst.getParent(), src.getParent());
        createDirectoryIfNecessary(hostName, src.getParent());
    }
    return true;
}
Also used : StocatorPath(com.ibm.stocator.fs.common.StocatorPath) Path(org.apache.hadoop.fs.Path) InterruptedIOException(java.io.InterruptedIOException) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) KeyVersion(com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion) AmazonClientException(com.amazonaws.AmazonClientException) FileNotFoundException(java.io.FileNotFoundException) ArrayList(java.util.ArrayList) ObjectListing(com.amazonaws.services.s3.model.ObjectListing) S3ObjectSummary(com.amazonaws.services.s3.model.S3ObjectSummary) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DeleteObjectsResult(com.amazonaws.services.s3.model.DeleteObjectsResult) CountDownLatch(java.util.concurrent.CountDownLatch) DeleteObjectsRequest(com.amazonaws.services.s3.model.DeleteObjectsRequest) CopyObjectRequest(com.amazonaws.services.s3.model.CopyObjectRequest) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata)

Example 99 with AmazonClientException

use of com.amazonaws.AmazonClientException in project stocator by SparkTC.

the class COSInputStream method reopen.

/**
 * Opens up the stream at specified target position and for given length.
 *
 * @param reason reason for reopen
 * @param targetPos target position
 * @param length length requested
 * @throws IOException on any failure to open the object
 */
private synchronized void reopen(String reason, long targetPos, long length) throws IOException {
    if (wrappedStream != null) {
        closeStream("reopen(" + reason + ")", contentRangeFinish, false);
    }
    contentRangeFinish = calculateRequestLimit(inputPolicy, targetPos, length, contentLength, readahead);
    LOG.debug("reopen({}) for {} range[{}-{}], length={}," + " streamPosition={}, nextReadPosition={}", uri, reason, targetPos, contentRangeFinish, length, pos, nextReadPos);
    try {
        GetObjectRequest request = new GetObjectRequest(bucket, key).withRange(targetPos, contentRangeFinish - 1);
        wrappedStream = client.getObject(request).getObjectContent();
        contentRangeStart = targetPos;
        if (wrappedStream == null) {
            throw new IOException("Null IO stream from reopen of (" + reason + ") " + uri);
        }
    } catch (AmazonClientException e) {
        throw COSUtils.translateException("Reopen at position " + targetPos, uri, e);
    }
    pos = targetPos;
}
Also used : AmazonClientException(com.amazonaws.AmazonClientException) IOException(java.io.IOException) GetObjectRequest(com.amazonaws.services.s3.model.GetObjectRequest)

Example 100 with AmazonClientException

use of com.amazonaws.AmazonClientException in project stocator by SparkTC.

the class COSOutputStream method close.

@Override
public void close() throws IOException {
    if (closed.getAndSet(true)) {
        return;
    }
    mBackupOutputStream.close();
    LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
    try {
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(mBackupFile.length());
        om.setContentType(mContentType);
        om.setUserMetadata(mMetadata);
        // to ensure the write is atomic
        if (mAvoidOverwrite) {
            LOG.debug("Avoid Overwrite - setting If-None-Match header");
            om.setHeader("If-None-Match", "*");
        }
        PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
        putObjectRequest.setMetadata(om);
        Upload upload = transfers.upload(putObjectRequest);
        upload.waitForUploadResult();
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
    } catch (AmazonClientException e) {
        throw new IOException(String.format("saving output %s %s", mKey, e));
    } finally {
        if (!mBackupFile.delete()) {
            LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
        }
        super.close();
    }
    LOG.debug("OutputStream for key '{}' upload complete", mKey);
}
Also used : InterruptedIOException(java.io.InterruptedIOException) AmazonClientException(com.amazonaws.AmazonClientException) Upload(com.amazonaws.services.s3.transfer.Upload) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) ObjectMetadata(com.amazonaws.services.s3.model.ObjectMetadata) PutObjectRequest(com.amazonaws.services.s3.model.PutObjectRequest)

Aggregations

AmazonClientException (com.amazonaws.AmazonClientException)202 IOException (java.io.IOException)70 AmazonServiceException (com.amazonaws.AmazonServiceException)32 ArrayList (java.util.ArrayList)32 ObjectMetadata (com.amazonaws.services.s3.model.ObjectMetadata)23 ObjectListing (com.amazonaws.services.s3.model.ObjectListing)19 S3ObjectSummary (com.amazonaws.services.s3.model.S3ObjectSummary)17 HashMap (java.util.HashMap)16 PutObjectRequest (com.amazonaws.services.s3.model.PutObjectRequest)14 Test (org.junit.Test)14 SienaException (siena.SienaException)12 AWSCredentials (com.amazonaws.auth.AWSCredentials)11 AmazonS3Client (com.amazonaws.services.s3.AmazonS3Client)11 GetObjectRequest (com.amazonaws.services.s3.model.GetObjectRequest)11 ListObjectsRequest (com.amazonaws.services.s3.model.ListObjectsRequest)11 AmazonDynamoDB (com.amazonaws.services.dynamodbv2.AmazonDynamoDB)10 AmazonS3Exception (com.amazonaws.services.s3.model.AmazonS3Exception)10 InterruptedIOException (java.io.InterruptedIOException)10 DeleteObjectsRequest (com.amazonaws.services.s3.model.DeleteObjectsRequest)9 File (java.io.File)9