use of com.amazonaws.services.s3.AmazonS3Client in project bender by Nextdoor.
the class S3SnsNotifier method main.
public static void main(String[] args) throws ParseException, InterruptedException, IOException {
formatter = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZoneUTC();
/*
* Parse cli arguments
*/
Options options = new Options();
options.addOption(Option.builder().longOpt("bucket").hasArg().required().desc("Name of S3 bucket to list s3 objects from").build());
options.addOption(Option.builder().longOpt("key-file").hasArg().required().desc("Local file of S3 keys to process").build());
options.addOption(Option.builder().longOpt("sns-arn").hasArg().required().desc("SNS arn to publish to").build());
options.addOption(Option.builder().longOpt("throttle-ms").hasArg().desc("Amount of ms to wait between publishing to SNS").build());
options.addOption(Option.builder().longOpt("processed-file").hasArg().desc("Local file to use to store procssed S3 object names").build());
options.addOption(Option.builder().longOpt("skip-processed").hasArg(false).desc("Whether to skip S3 objects that have been processed").build());
options.addOption(Option.builder().longOpt("dry-run").hasArg(false).desc("If set do not publish to SNS").build());
CommandLineParser parser = new DefaultParser();
CommandLine cmd = parser.parse(options, args);
String bucket = cmd.getOptionValue("bucket");
String keyFile = cmd.getOptionValue("key-file");
String snsArn = cmd.getOptionValue("sns-arn");
String processedFile = cmd.getOptionValue("processed-file", null);
boolean skipProcessed = cmd.hasOption("skip-processed");
dryRun = cmd.hasOption("dry-run");
long throttle = Long.parseLong(cmd.getOptionValue("throttle-ms", "-1"));
if (processedFile != null) {
File file = new File(processedFile);
if (!file.exists()) {
logger.debug("creating local file to store processed s3 object names: " + processedFile);
file.createNewFile();
}
}
/*
* Import S3 keys that have been processed
*/
if (skipProcessed && processedFile != null) {
try (BufferedReader br = new BufferedReader(new FileReader(processedFile))) {
String line;
while ((line = br.readLine()) != null) {
alreadyPublished.add(line.trim());
}
}
}
/*
* Setup writer for file containing processed S3 keys
*/
FileWriter fw = null;
BufferedWriter bw = null;
if (processedFile != null) {
fw = new FileWriter(processedFile, true);
bw = new BufferedWriter(fw);
}
/*
* Create clients
*/
AmazonS3Client s3Client = new AmazonS3Client();
AmazonSNSClient snsClient = new AmazonSNSClient();
/*
* Get S3 object list
*/
try (BufferedReader br = new BufferedReader(new FileReader(keyFile))) {
String line;
while ((line = br.readLine()) != null) {
String key = line.trim();
if (alreadyPublished.contains(key)) {
logger.info("skipping " + key);
}
ObjectMetadata om = s3Client.getObjectMetadata(bucket, key);
S3EventNotification s3Notification = getS3Notification(key, bucket, om.getContentLength());
String json = s3Notification.toJson();
/*
* Publish to SNS
*/
if (publish(snsArn, json, snsClient, key) && processedFile != null) {
bw.write(key + "\n");
bw.flush();
}
if (throttle != -1) {
Thread.sleep(throttle);
}
}
}
if (processedFile != null) {
bw.close();
fw.close();
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project sandbox by irof.
the class S3MultipartUploadTest method 比較用の通常のputObject.
@Ignore
@Test
public void 比較用の通常のputObject() throws Exception {
byte[] bytes = new byte[5 * Constants.MB + 1];
ObjectMetadata meta = new ObjectMetadata();
meta.setContentLength(bytes.length);
PutObjectRequest putObjectRequest = new PutObjectRequest("irof-sandbox", "S3MultipartUpload/basic-5MB.dat", new ByteArrayInputStream(bytes), meta);
PutObjectResult result = new AmazonS3Client().putObject(putObjectRequest);
logger.info(result.getETag());
}
use of com.amazonaws.services.s3.AmazonS3Client in project stocator by CODAIT.
the class COSAPIClient method initiate.
@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
mCachedSparkOriginated = new HashMap<String, Boolean>();
mCachedSparkJobsStatus = new HashMap<String, Boolean>();
schemaProvided = scheme;
Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme);
// Set bucket name property
int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT);
memoryCache = MemoryCache.getInstance(cacheSize);
mBucket = props.getProperty(COS_BUCKET_PROPERTY);
workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI, getWorkingDirectory());
LOG.trace("Working directory set to {}", workingDir);
fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false"));
mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false"));
// Define COS client
String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY);
String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY);
if (accessKey == null) {
throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key");
}
if (secretKey == null) {
throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key");
}
BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConf = new ClientConfiguration();
int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS);
if (maxThreads < 2) {
LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
maxThreads = 2;
}
int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS);
long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, "");
int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1);
if (!proxyHost.isEmpty()) {
clientConf.setProxyHost(proxyHost);
if (proxyPort >= 0) {
clientConf.setProxyPort(proxyPort);
} else {
if (secureConnections) {
LOG.warn("Proxy host set without port. Using HTTPS default 443");
clientConf.setProxyPort(443);
} else {
LOG.warn("Proxy host set without port. Using HTTP default 80");
clientConf.setProxyPort(80);
}
}
String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME);
String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD);
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
clientConf.setProxyUsername(proxyUsername);
clientConf.setProxyPassword(proxyPassword);
clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN));
clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION));
if (LOG.isDebugEnabled()) {
LOG.debug("Using proxy server {}:{} as user {} on " + "domain {} as workstation {}", clientConf.getProxyHost(), clientConf.getProxyPort(), String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyDomain(), clientConf.getProxyWorkstation());
}
} else if (proxyPort >= 0) {
String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
initConnectionSettings(conf, clientConf);
if (mIsV2Signer) {
clientConf.withSignerOverride("S3SignerType");
}
mClient = new AmazonS3Client(creds, clientConf);
final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY);
if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) {
mClient.setEndpoint(serviceUrl);
}
mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Set block size property
String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128");
mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L;
bufferDirectory = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
bufferDirectoryKey = Utils.getConfigKey(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
LOG.trace("Buffer directory is set to {} for the key {}", bufferDirectory, bufferDirectoryKey);
boolean autoCreateBucket = "true".equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false")));
partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE);
LOG.debug(READAHEAD_RANGE + ":" + readAhead);
inputPolicy = COSInputPolicy.getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL));
initTransferManager();
maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING);
if (autoCreateBucket) {
try {
boolean bucketExist = mClient.doesBucketExist(mBucket);
if (bucketExist) {
LOG.trace("Bucket {} exists", mBucket);
} else {
LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket);
String mRegion = props.getProperty(REGION_COS_PROPERTY);
if (mRegion == null) {
mClient.createBucket(mBucket);
} else {
LOG.trace("Creating bucket {} in region {}", mBucket, mRegion);
mClient.createBucket(mBucket, mRegion);
}
}
} catch (AmazonServiceException ase) {
/*
* we ignore the BucketAlreadyExists exception since multiple processes or threads
* might try to create the bucket in parrallel, therefore it is expected that
* some will fail to create the bucket
*/
if (!ase.getErrorCode().equals("BucketAlreadyExists")) {
LOG.error(ase.getMessage());
throw (ase);
}
} catch (Exception e) {
LOG.error(e.getMessage());
throw (e);
}
}
initMultipartUploads(conf);
enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true);
blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
if (blockUploadEnabled) {
blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS);
LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
} else {
LOG.debug("Using COSOutputStream");
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project opentest by mcdcorp.
the class GetS3Object method run.
@Override
public void run() {
super.run();
String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
String bucket = this.readStringArgument("bucket");
String objectKey = this.readStringArgument("objectKey");
String targetFilePath = this.readStringArgument("targetFile");
Boolean overwrite = this.readBooleanArgument("overwrite", false);
AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
S3Object object = s3Client.getObject(new GetObjectRequest(bucket, objectKey));
InputStream objectDataStream = object.getObjectContent();
if (targetFilePath != null) {
File targetFile = new File(targetFilePath);
if (!targetFile.isAbsolute()) {
targetFile = Paths.get(this.getActor().getTempDir().getAbsolutePath(), targetFilePath).toFile();
}
targetFile.getParentFile().mkdirs();
try {
if (overwrite) {
Files.copy(objectDataStream, targetFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} else {
Files.copy(objectDataStream, targetFile.toPath());
}
} catch (Exception ex) {
throw new RuntimeException(String.format("Failed to transfer data from the input stream into file %s", targetFilePath), ex);
}
this.writeArgument("targetFile", targetFile.getAbsolutePath());
} else {
// TODO: Make targetFile arg optional so this branch can execute.
// Read data in memory and write it to an output value
}
}
use of com.amazonaws.services.s3.AmazonS3Client in project opentest by mcdcorp.
the class PutS3Object method run.
@Override
public void run() {
super.run();
String awsCredentialsProfile = this.readStringArgument("awsProfile", "default");
String bucket = this.readStringArgument("bucket");
String objectKey = this.readStringArgument("objectKey");
String sourceFilePath = this.readStringArgument("sourceFile");
File sourceFile = new File(sourceFilePath);
if (sourceFile.exists()) {
try {
AmazonS3 s3Client = new AmazonS3Client(new ProfileCredentialsProvider(awsCredentialsProfile));
s3Client.putObject(new PutObjectRequest(bucket, objectKey, sourceFile));
} catch (Exception ex) {
throw new RuntimeException(String.format("Failed to upload file \"%s\" to S3 bucket \"%s\"", sourceFilePath, bucket), ex);
}
} else {
throw new RuntimeException(String.format("Source file \"%s\" doesn't exist", sourceFilePath));
}
}
Aggregations