use of com.haulmont.cuba.core.app.filestorage.amazon.auth.AWS4SignerForChunkedUpload in project cuba by cuba-platform.
the class AmazonS3FileStorage method saveStream.
@Override
public long saveStream(FileDescriptor fileDescr, InputStream inputStream) throws FileStorageException {
Preconditions.checkNotNullArgument(fileDescr.getSize());
int chunkSize = amazonS3Config.getChunkSize();
long fileSize = fileDescr.getSize();
URL amazonUrl = getAmazonUrl(fileDescr);
// set the markers indicating we're going to send the upload as a series
// of chunks:
// -- 'x-amz-content-sha256' is the fixed marker indicating chunked
// upload
// -- 'content-length' becomes the total size in bytes of the upload
// (including chunk headers),
// -- 'x-amz-decoded-content-length' is used to transmit the actual
// length of the data payload, less chunk headers
Map<String, String> headers = new HashMap<>();
headers.put("x-amz-storage-class", "REDUCED_REDUNDANCY");
headers.put("x-amz-content-sha256", AWS4SignerForChunkedUpload.STREAMING_BODY_SHA256);
headers.put("content-encoding", "aws-chunked");
headers.put("x-amz-decoded-content-length", "" + fileSize);
AWS4SignerForChunkedUpload signer = new AWS4SignerForChunkedUpload(amazonUrl, "PUT", "s3", amazonS3Config.getRegionName());
// how big is the overall request stream going to be once we add the signature
// 'headers' to each chunk?
long totalLength = AWS4SignerForChunkedUpload.calculateChunkedContentLength(fileSize, chunkSize);
headers.put("content-length", "" + totalLength);
String authorization = signer.computeSignature(headers, // no query parameters
null, AWS4SignerForChunkedUpload.STREAMING_BODY_SHA256, amazonS3Config.getAccessKey(), amazonS3Config.getSecretAccessKey());
// place the computed signature into a formatted 'Authorization' header
// and call S3
headers.put("Authorization", authorization);
try {
// first set up the connection
HttpURLConnection connection = HttpUtils.createHttpConnection(amazonUrl, "PUT", headers);
// get the request stream and start writing the user data as chunks, as outlined
// above;
int bytesRead;
byte[] buffer = new byte[chunkSize];
DataOutputStream outputStream = new DataOutputStream(connection.getOutputStream());
// subclasses of InputStream
while ((bytesRead = IOUtils.read(inputStream, buffer, 0, chunkSize)) > 0) {
// process into a chunk
byte[] chunk = signer.constructSignedChunk(bytesRead, buffer);
// send the chunk
outputStream.write(chunk);
outputStream.flush();
}
// last step is to send a signed zero-length chunk to complete the upload
byte[] finalChunk = signer.constructSignedChunk(0, buffer);
outputStream.write(finalChunk);
outputStream.flush();
outputStream.close();
// make the call to Amazon S3
HttpUtils.HttpResponse httpResponse = HttpUtils.executeHttpRequest(connection);
if (!httpResponse.isStatusOk()) {
String message = String.format("Could not save file %s. %s", getFileName(fileDescr), getInputStreamContent(httpResponse));
throw new FileStorageException(FileStorageException.Type.IO_EXCEPTION, message);
}
} catch (IOException e) {
throw new RuntimeException("Error when sending chunked upload request", e);
}
return fileDescr.getSize();
}
Aggregations