use of com.amazonaws.services.s3.transfer.Download in project Synapse-Stack-Builder by Sage-Bionetworks.
the class ArtifactProcessing method createOrGetApplicationVersion.
/**
* Create the application version if it does not already exist.
* @param versionLabel
* @param fileURl
* @throws IOException
*/
public ApplicationVersionDescription createOrGetApplicationVersion(String appPrfix) throws IOException {
String s3Path = config.getVersionPath(appPrfix);
final String versionLabel = config.getVersionLabel(appPrfix);
String fileURL = config.getArtifactoryUrl(appPrfix);
// First determine if this version already exists
log.debug(String.format("Creating version: %1$s using: %2$s ", versionLabel, fileURL));
DescribeApplicationVersionsResult results = beanstalkClient.describeApplicationVersions(new DescribeApplicationVersionsRequest().withApplicationName(config.getElasticBeanstalkApplicationName()).withVersionLabels(versionLabel));
if (results.getApplicationVersions().size() < 1) {
log.debug(String.format("Version: %1$s does not already existing so it will be created...", versionLabel));
// first download the file
// Download the artifacts
File temp = null;
String key = s3Path + versionLabel;
try {
// First download the file from Artifactory
temp = downloadFile(fileURL);
// Now upload it to s3.
final long start = System.currentTimeMillis();
;
log.debug("Starting to upload file " + fileURL + " to S3...");
PutObjectResult putResult = s3Client.putObject(new PutObjectRequest(config.getStackConfigS3BucketName(), key, temp).withProgressListener(new ProgressListener() {
private volatile long lastUpdate = start;
public void progressChanged(ProgressEvent progressEvent) {
// The progress data they give use never seems to change so we just show the elase time.
long now = System.currentTimeMillis();
long lastUpdateElapase = now - lastUpdate;
long totalElapse = now - start;
if (lastUpdateElapase > 2 * 1000) {
// Log the event
log.debug(String.format("Uploading %1$s to S3. Elapse time: %2$tM:%2$tS:%2$tL ", versionLabel, totalElapse));
lastUpdate = now;
}
}
}));
} finally {
if (temp != null) {
temp.delete();
}
// Clean up the file
}
// The S3 Location for this file.
S3Location location = new S3Location(config.getStackConfigS3BucketName(), key);
// we need to create this version
beanstalkClient.createApplicationVersion(new CreateApplicationVersionRequest().withApplicationName(config.getElasticBeanstalkApplicationName()).withAutoCreateApplication(false).withSourceBundle(location).withVersionLabel(versionLabel));
// Describe the version
results = beanstalkClient.describeApplicationVersions(new DescribeApplicationVersionsRequest().withApplicationName(config.getElasticBeanstalkApplicationName()).withVersionLabels(versionLabel));
} else {
log.debug(String.format("Version: %1$s already exists.", versionLabel));
}
return results.getApplicationVersions().get(0);
}
use of com.amazonaws.services.s3.transfer.Download in project Synapse-Stack-Builder by Sage-Bionetworks.
the class SSLSetup method getCertificateStringFromS3.
/**
* Download a certificate file from S3 directly into a string, skipping an intermediate file.
* @param key
* @return
*/
public String getCertificateStringFromS3(String key) {
// For this case we do not write to file first
S3Object s3Object = s3Client.getObject(new GetObjectRequest(config.getDefaultS3BucketName(), key));
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
byte[] buffer = new byte[1024 * 10];
int bytesRead;
while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) {
outputStream.write(buffer, 0, bytesRead);
}
// Go right to string.
return new String(outputStream.toByteArray(), "UTF-8");
} catch (IOException e) {
s3Object.getObjectContent().abort();
throw new AmazonClientException("Unable to store object contents to disk: " + e.getMessage(), e);
} finally {
try {
outputStream.close();
} catch (Exception e) {
}
try {
s3Object.getObjectContent().close();
} catch (Exception e) {
}
}
}
use of com.amazonaws.services.s3.transfer.Download in project Synapse-Stack-Builder by Sage-Bionetworks.
the class StackDefaults method loadStackDefaultsFromS3.
/**
* Connect to S3 and downloads the default properties for this stack.
*
* @param stack
* @param s3Client
* @return
* @throws IOException
*/
public Properties loadStackDefaultsFromS3() throws IOException {
// Create the config bucket.
String bucketName = config.getStackConfigS3BucketName();
log.info("Creating S3 Bucket: " + bucketName);
// This call is idempotent and will only actually create the bucket if it does not already exist.
Bucket bucket = s3Client.createBucket(bucketName);
// This is the buck where we expect to find the properties.
bucketName = config.getDefaultS3BucketName();
log.info("Creating S3 Bucket: " + bucketName);
// This call is idempotent and will only actually create the bucket if it does not already exist.
bucket = s3Client.createBucket(bucketName);
String fileName = config.getDefaultPropertiesFileName();
File temp = File.createTempFile("DefaultProps", ".properties");
FileInputStream in = new FileInputStream(temp);
try {
// Download the file to a temp file.
s3Client.getObject(new GetObjectRequest(bucketName, fileName), temp);
Properties props = new Properties();
props.load(in);
// Did we get the expected properties?
validateProperties(bucketName, fileName, props);
// Done
return props;
} catch (IOException e) {
log.error("Failed to read the '" + fileName + "' downloaded from S3 bucket: '" + bucketName + "'. Expected the file to be a java.util.Properties file");
throw e;
} catch (AmazonClientException e) {
log.error("Failed to dowload the '" + fileName + "' from S3 bucket: '" + bucketName + "' make sure the file exists and try again.");
throw e;
} finally {
in.close();
// Delete the temp file
temp.delete();
}
}
use of com.amazonaws.services.s3.transfer.Download in project dataverse by IQSS.
the class S3AccessIO method generateTemporaryS3Url.
public String generateTemporaryS3Url() throws IOException {
// A. 1 hour by default seems like an OK number. Making it configurable seems like a good idea too. -- L.A.
if (s3 == null) {
throw new IOException("ERROR: s3 not initialised. ");
}
if (dvObject instanceof DataFile) {
key = getMainFileKey();
java.util.Date expiration = new java.util.Date();
long msec = expiration.getTime();
msec += 1000 * getUrlExpirationMinutes();
expiration.setTime(msec);
GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucketName, key);
// Default.
generatePresignedUrlRequest.setMethod(HttpMethod.GET);
generatePresignedUrlRequest.setExpiration(expiration);
ResponseHeaderOverrides responseHeaders = new ResponseHeaderOverrides();
// responseHeaders.setContentDisposition("attachment; filename="+this.getDataFile().getDisplayName());
// Encode the file name explicitly specifying the encoding as UTF-8:
// (otherwise S3 may not like non-ASCII characters!)
// Most browsers are happy with just "filename="+URLEncoder.encode(this.getDataFile().getDisplayName(), "UTF-8")
// in the header. But Firefox appears to require that "UTF8" is
// specified explicitly, as below:
responseHeaders.setContentDisposition("attachment; filename*=UTF-8''" + URLEncoder.encode(this.getDataFile().getDisplayName(), "UTF-8"));
// - without it, download will work, but Firefox will leave the special
// characters in the file name encoded. For example, the file name
// will look like "1976%E2%80%932016.txt" instead of "1976–2016.txt",
// where the dash is the "long dash", represented by a 3-byte UTF8
// character "\xE2\x80\x93"
responseHeaders.setContentType(this.getDataFile().getContentType());
generatePresignedUrlRequest.setResponseHeaders(responseHeaders);
URL s = s3.generatePresignedUrl(generatePresignedUrlRequest);
return s.toString();
} else if (dvObject instanceof Dataset) {
throw new IOException("Data Access: GenerateTemporaryS3Url: Invalid DvObject type : Dataset");
} else if (dvObject instanceof Dataverse) {
throw new IOException("Data Access: Invalid DvObject type : Dataverse");
} else {
throw new IOException("Data Access: Invalid DvObject type");
}
}
use of com.amazonaws.services.s3.transfer.Download in project molgenis by molgenis.
the class AmazonBucketIngester method ingest.
public FileMeta ingest(String jobExecutionID, String targetEntityTypeName, String bucket, String key, String extension, String accessKey, String secretKey, String region, boolean isExpression, Progress progress) {
FileMeta fileMeta;
try {
progress.setProgressMax(3);
progress.progress(0, "Connection to Amazon Bucket with accessKey '" + accessKey + "'");
AmazonS3 client = amazonBucketClient.getClient(accessKey, secretKey, region);
progress.progress(1, "downloading...");
File file = amazonBucketClient.downloadFile(client, fileStore, jobExecutionID, bucket, key, extension, isExpression, targetEntityTypeName);
if (targetEntityTypeName != null && ExcelUtils.isExcelFile(file.getName())) {
if (ExcelUtils.getNumberOfSheets(file) == 1) {
ExcelUtils.renameSheet(targetEntityTypeName, file, 0);
} else {
throw new MolgenisDataException("Amazon Bucket imports to a specified entityType are only possible with CSV files or Excel files with one sheet");
}
}
progress.progress(2, "Importing...");
ImportService importService = importServiceFactory.getImportService(file.getName());
File renamed = new File(String.format("%s%s%s.%s", file.getParent(), File.separatorChar, targetEntityTypeName, extension));
Files.copy(file.toPath(), renamed.toPath(), StandardCopyOption.REPLACE_EXISTING);
RepositoryCollection repositoryCollection = fileRepositoryCollectionFactory.createFileRepositoryCollection(renamed);
EntityImportReport report = importService.doImport(repositoryCollection, DatabaseAction.ADD_UPDATE_EXISTING, "base");
progress.status("Download and import from Amazon Bucket done.");
progress.progress(3, "Successfully imported " + report.getNrImportedEntitiesMap().keySet().toString() + " entities.");
fileMeta = createFileMeta(jobExecutionID, file);
} catch (Exception e) {
throw new MolgenisDataException(e);
}
return fileMeta;
}
Aggregations