use of org.jclouds.s3.S3AsyncClient in project aws-doc-sdk-examples by awsdocs.
the class CreateBucketAsync method main.
public static void main(String[] args) throws URISyntaxException {
final String USAGE = "\n" + "Usage:\n" + " <bucketName> \n\n" + "Where:\n" + " bucketName - the name of the bucket to create. The bucket name must be unique, or an error occurs.\n\n";
if (args.length != 1) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = args[0];
System.out.format("Creating a bucket named %s\n", bucketName);
Region region = Region.US_EAST_1;
S3AsyncClient s3AsyncClient = S3AsyncClient.builder().region(region).build();
createBucket(s3AsyncClient, bucketName);
}
use of org.jclouds.s3.S3AsyncClient in project aws-doc-sdk-examples by awsdocs.
the class S3AsyncOps method main.
public static void main(String[] args) {
final String USAGE = "\n" + "Usage:\n" + " S3AsyncOps <bucketName> <key> <path>\n\n" + "Where:\n" + " bucketName - the name of the Amazon S3 bucket (for example, bucket1). \n\n" + " key - the name of the object (for example, book.pdf). \n" + " path - the local path to the file (for example, C:/AWS/book.pdf). \n";
if (args.length != 3) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = args[0];
String key = args[1];
String path = args[2];
Region region = Region.US_WEST_2;
S3AsyncClient client = S3AsyncClient.builder().region(region).build();
PutObjectRequest objectRequest = PutObjectRequest.builder().bucket(bucketName).key(key).build();
// Put the object into the bucket
CompletableFuture<PutObjectResponse> future = client.putObject(objectRequest, AsyncRequestBody.fromFile(Paths.get(path)));
future.whenComplete((resp, err) -> {
try {
if (resp != null) {
System.out.println("Object uploaded. Details: " + resp);
} else {
// Handle error
err.printStackTrace();
}
} finally {
// Only close the client when you are completely done with it
client.close();
}
});
future.join();
}
use of org.jclouds.s3.S3AsyncClient in project aws-doc-sdk-examples by awsdocs.
the class GetObjectDataAsync method main.
public static void main(String[] args) {
final String USAGE = "\n" + "Usage:\n" + " <bucketName> <keyName> <path>\n\n" + "Where:\n" + " bucketName - the Amazon S3 bucket name. \n\n" + " keyName - the key name. \n\n" + " path - the path where the file is written to. \n\n";
if (args.length != 3) {
System.out.println(USAGE);
System.exit(1);
}
String bucketName = args[0];
String keyName = args[1];
String path = args[2];
Region region = Region.US_EAST_1;
S3AsyncClient s3AsyncClient = S3AsyncClient.builder().region(region).build();
getObject(s3AsyncClient, bucketName, keyName, path);
s3AsyncClient.close();
}
use of org.jclouds.s3.S3AsyncClient in project legacy-jclouds-examples by jclouds.
the class MainApp method main.
public static void main(String[] args) throws IOException {
if (args.length < PARAMETERS)
throw new IllegalArgumentException(INVALID_SYNTAX);
// Args
String provider = args[0];
// note that you can check if a provider is present ahead of time
checkArgument(contains(allKeys, provider), "provider %s not in supported list: %s", provider, allKeys);
String identity = args[1];
String credential = args[2];
String containerName = args[3];
// Init
BlobStoreContext context = ContextBuilder.newBuilder(provider).credentials(identity, credential).buildView(BlobStoreContext.class);
try {
// Create Container
BlobStore blobStore = context.getBlobStore();
blobStore.createContainerInLocation(null, containerName);
// Add Blob
Blob blob = blobStore.blobBuilder("test").payload("testdata").build();
blobStore.putBlob(containerName, blob);
// List Container
for (StorageMetadata resourceMd : blobStore.list()) {
if (resourceMd.getType() == StorageType.CONTAINER || resourceMd.getType() == StorageType.FOLDER) {
// Use Map API
Map<String, InputStream> containerMap = context.createInputStreamMap(resourceMd.getName());
System.out.printf(" %s: %s entries%n", resourceMd.getName(), containerMap.size());
}
}
// Use Provider API
if (context.getBackendType().getRawType().equals(RestContext.class)) {
RestContext<?, ?> rest = context.unwrap();
if (rest.getApi() instanceof S3Client) {
RestContext<S3Client, S3AsyncClient> providerContext = context.unwrap();
providerContext.getApi().getBucketLogging(containerName);
} else if (rest.getApi() instanceof SwiftClient) {
RestContext<SwiftClient, SwiftAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getObjectInfo(containerName, "test");
} else if (rest.getApi() instanceof AzureBlobClient) {
RestContext<AzureBlobClient, AzureBlobAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getBlobProperties(containerName, "test");
} else if (rest.getApi() instanceof AtmosClient) {
RestContext<AtmosClient, AtmosAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getSystemMetadata(containerName + "/test");
}
}
} finally {
// Close connecton
context.close();
System.exit(0);
}
}
use of org.jclouds.s3.S3AsyncClient in project uploader by smoketurner.
the class UploaderApplication method run.
@Override
public void run(@Nonnull final UploaderConfiguration configuration, @Nonnull final Environment environment) throws Exception {
final NettyConfiguration nettyConfig = configuration.getNetty();
final AwsConfiguration awsConfig = configuration.getAws();
// we create the event loop groups first so we can share them between
// the Netty server receiving the requests and the AWS S3 client
// uploading the batches to S3.
final EventLoopGroup bossGroup = Netty.newBossEventLoopGroup();
final EventLoopGroup workerGroup = Netty.newWorkerEventLoopGroup();
environment.lifecycle().manage(new EventLoopGroupManager(bossGroup));
environment.lifecycle().manage(new EventLoopGroupManager(workerGroup));
final Size maxUploadSize = awsConfig.getMaxUploadSize();
final EventLoopGroupConfiguration eventLoopConfig = EventLoopGroupConfiguration.builder().eventLoopGroup(workerGroup).build();
final NettySdkHttpClientFactory nettyFactory = NettySdkHttpClientFactory.builder().eventLoopGroupConfiguration(eventLoopConfig).build();
final ClientAsyncHttpConfiguration httpConfig = ClientAsyncHttpConfiguration.builder().httpClientFactory(nettyFactory).build();
// build the asynchronous S3 client with the configured credentials
// provider and region and use the same Netty event group as the server.
final S3AsyncClient s3 = S3AsyncClient.builder().credentialsProvider(awsConfig.getCredentials()).region(awsConfig.getRegion()).asyncHttpConfiguration(httpConfig).build();
environment.lifecycle().manage(new AutoCloseableManager(s3));
final Uploader uploader = new Uploader(s3, awsConfig);
final UploadInitializer initializer = new UploadInitializer(nettyConfig, uploader, maxUploadSize.toBytes());
final ServerBootstrap bootstrap = new ServerBootstrap();
// Start the server
final ChannelFuture future = bootstrap.group(bossGroup, workerGroup).handler(new LoggingHandler(LogLevel.INFO)).option(ChannelOption.SO_BACKLOG, 128).channel(Netty.serverChannelType()).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(initializer).bind(nettyConfig.getListenPort());
environment.lifecycle().manage(new ChannelFutureManager(future));
// Resources
environment.jersey().register(new BatchResource(uploader));
environment.jersey().register(new PingResource());
environment.jersey().register(new VersionResource());
}
Aggregations