use of software.amazon.awssdk.services.s3.S3AsyncClient in project legacy-jclouds-examples by jclouds.
the class MainApp method main.
public static void main(String[] args) throws IOException {
if (args.length < PARAMETERS)
throw new IllegalArgumentException(INVALID_SYNTAX);
// Args
String provider = args[0];
// note that you can check if a provider is present ahead of time
checkArgument(contains(allKeys, provider), "provider %s not in supported list: %s", provider, allKeys);
String identity = args[1];
String credential = args[2];
String containerName = args[3];
// Init
BlobStoreContext context = ContextBuilder.newBuilder(provider).credentials(identity, credential).buildView(BlobStoreContext.class);
try {
// Create Container
BlobStore blobStore = context.getBlobStore();
blobStore.createContainerInLocation(null, containerName);
// Add Blob
Blob blob = blobStore.blobBuilder("test").payload("testdata").build();
blobStore.putBlob(containerName, blob);
// List Container
for (StorageMetadata resourceMd : blobStore.list()) {
if (resourceMd.getType() == StorageType.CONTAINER || resourceMd.getType() == StorageType.FOLDER) {
// Use Map API
Map<String, InputStream> containerMap = context.createInputStreamMap(resourceMd.getName());
System.out.printf(" %s: %s entries%n", resourceMd.getName(), containerMap.size());
}
}
// Use Provider API
if (context.getBackendType().getRawType().equals(RestContext.class)) {
RestContext<?, ?> rest = context.unwrap();
if (rest.getApi() instanceof S3Client) {
RestContext<S3Client, S3AsyncClient> providerContext = context.unwrap();
providerContext.getApi().getBucketLogging(containerName);
} else if (rest.getApi() instanceof SwiftClient) {
RestContext<SwiftClient, SwiftAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getObjectInfo(containerName, "test");
} else if (rest.getApi() instanceof AzureBlobClient) {
RestContext<AzureBlobClient, AzureBlobAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getBlobProperties(containerName, "test");
} else if (rest.getApi() instanceof AtmosClient) {
RestContext<AtmosClient, AtmosAsyncClient> providerContext = context.unwrap();
providerContext.getApi().getSystemMetadata(containerName + "/test");
}
}
} finally {
// Close connecton
context.close();
System.exit(0);
}
}
use of software.amazon.awssdk.services.s3.S3AsyncClient in project flink by apache.
the class AWSServicesTestUtils method listBucketObjects.
public static List<S3Object> listBucketObjects(S3AsyncClient s3, String bucketName) throws ExecutionException, InterruptedException {
ListObjectsRequest listObjects = ListObjectsRequest.builder().bucket(bucketName).build();
CompletableFuture<ListObjectsResponse> res = s3.listObjects(listObjects);
return res.get().contents();
}
use of software.amazon.awssdk.services.s3.S3AsyncClient in project uploader by smoketurner.
the class UploaderApplication method run.
@Override
public void run(@Nonnull final UploaderConfiguration configuration, @Nonnull final Environment environment) throws Exception {
final NettyConfiguration nettyConfig = configuration.getNetty();
final AwsConfiguration awsConfig = configuration.getAws();
// we create the event loop groups first so we can share them between
// the Netty server receiving the requests and the AWS S3 client
// uploading the batches to S3.
final EventLoopGroup bossGroup = Netty.newBossEventLoopGroup();
final EventLoopGroup workerGroup = Netty.newWorkerEventLoopGroup();
environment.lifecycle().manage(new EventLoopGroupManager(bossGroup));
environment.lifecycle().manage(new EventLoopGroupManager(workerGroup));
final Size maxUploadSize = awsConfig.getMaxUploadSize();
final EventLoopGroupConfiguration eventLoopConfig = EventLoopGroupConfiguration.builder().eventLoopGroup(workerGroup).build();
final NettySdkHttpClientFactory nettyFactory = NettySdkHttpClientFactory.builder().eventLoopGroupConfiguration(eventLoopConfig).build();
final ClientAsyncHttpConfiguration httpConfig = ClientAsyncHttpConfiguration.builder().httpClientFactory(nettyFactory).build();
// build the asynchronous S3 client with the configured credentials
// provider and region and use the same Netty event group as the server.
final S3AsyncClient s3 = S3AsyncClient.builder().credentialsProvider(awsConfig.getCredentials()).region(awsConfig.getRegion()).asyncHttpConfiguration(httpConfig).build();
environment.lifecycle().manage(new AutoCloseableManager(s3));
final Uploader uploader = new Uploader(s3, awsConfig);
final UploadInitializer initializer = new UploadInitializer(nettyConfig, uploader, maxUploadSize.toBytes());
final ServerBootstrap bootstrap = new ServerBootstrap();
// Start the server
final ChannelFuture future = bootstrap.group(bossGroup, workerGroup).handler(new LoggingHandler(LogLevel.INFO)).option(ChannelOption.SO_BACKLOG, 128).channel(Netty.serverChannelType()).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(initializer).bind(nettyConfig.getListenPort());
environment.lifecycle().manage(new ChannelFutureManager(future));
// Resources
environment.jersey().register(new BatchResource(uploader));
environment.jersey().register(new PingResource());
environment.jersey().register(new VersionResource());
}
use of software.amazon.awssdk.services.s3.S3AsyncClient in project flink by apache.
the class KinesisFirehoseTableITTest method readFromS3.
private List<Order> readFromS3() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
List<S3Object> ordersObjects;
List<Order> orders;
do {
Thread.sleep(1000);
ordersObjects = listBucketObjects(s3AsyncClient, BUCKET_NAME);
orders = readObjectsFromS3Bucket(s3AsyncClient, ordersObjects, BUCKET_NAME, responseBytes -> fromJson(new String(responseBytes.asByteArrayUnsafe()), Order.class));
} while (deadline.hasTimeLeft() && orders.size() < NUM_ELEMENTS);
return orders;
}
use of software.amazon.awssdk.services.s3.S3AsyncClient in project flink by apache.
the class KinesisFirehoseSinkITCase method firehoseSinkWritesCorrectDataToMockAWSServices.
@Test
public void firehoseSinkWritesCorrectDataToMockAWSServices() throws Exception {
LOG.info("1 - Creating the bucket for Firehose to deliver into...");
createBucket(s3AsyncClient, BUCKET_NAME);
LOG.info("2 - Creating the IAM Role for Firehose to write into the s3 bucket...");
createIAMRole(iamAsyncClient, ROLE_NAME);
LOG.info("3 - Creating the Firehose delivery stream...");
createDeliveryStream(STREAM_NAME, BUCKET_NAME, ROLE_ARN, firehoseAsyncClient);
KinesisFirehoseSink<String> kdsSink = KinesisFirehoseSink.<String>builder().setSerializationSchema(new SimpleStringSchema()).setDeliveryStreamName(STREAM_NAME).setMaxBatchSize(1).setFirehoseClientProperties(createConfig(mockFirehoseContainer.getEndpoint())).build();
KinesisFirehoseTestUtils.getSampleDataGenerator(env, NUMBER_OF_ELEMENTS).sinkTo(kdsSink);
env.execute("Integration Test");
List<S3Object> objects = listBucketObjects(createS3Client(mockFirehoseContainer.getEndpoint(), httpClient), BUCKET_NAME);
assertThat(objects.size()).isEqualTo(NUMBER_OF_ELEMENTS);
assertThat(readObjectsFromS3Bucket(s3AsyncClient, objects, BUCKET_NAME, response -> new String(response.asByteArrayUnsafe()))).containsAll(KinesisFirehoseTestUtils.getSampleData(NUMBER_OF_ELEMENTS));
}
Aggregations