use of org.jclouds.s3.S3AsyncClient in project uploader by smoketurner.
the class UploaderApplication method run.
@Override
public void run(@Nonnull final UploaderConfiguration configuration, @Nonnull final Environment environment) throws Exception {
final NettyConfiguration nettyConfig = configuration.getNetty();
final AwsConfiguration awsConfig = configuration.getAws();
// we create the event loop groups first so we can share them between
// the Netty server receiving the requests and the AWS S3 client
// uploading the batches to S3.
final EventLoopGroup bossGroup = Netty.newBossEventLoopGroup();
final EventLoopGroup workerGroup = Netty.newWorkerEventLoopGroup();
environment.lifecycle().manage(new EventLoopGroupManager(bossGroup));
environment.lifecycle().manage(new EventLoopGroupManager(workerGroup));
final Size maxUploadSize = awsConfig.getMaxUploadSize();
final EventLoopGroupConfiguration eventLoopConfig = EventLoopGroupConfiguration.builder().eventLoopGroup(workerGroup).build();
final NettySdkHttpClientFactory nettyFactory = NettySdkHttpClientFactory.builder().eventLoopGroupConfiguration(eventLoopConfig).build();
final ClientAsyncHttpConfiguration httpConfig = ClientAsyncHttpConfiguration.builder().httpClientFactory(nettyFactory).build();
// build the asynchronous S3 client with the configured credentials
// provider and region and use the same Netty event group as the server.
final S3AsyncClient s3 = S3AsyncClient.builder().credentialsProvider(awsConfig.getCredentials()).region(awsConfig.getRegion()).asyncHttpConfiguration(httpConfig).build();
environment.lifecycle().manage(new AutoCloseableManager(s3));
final Uploader uploader = new Uploader(s3, awsConfig);
final UploadInitializer initializer = new UploadInitializer(nettyConfig, uploader, maxUploadSize.toBytes());
final ServerBootstrap bootstrap = new ServerBootstrap();
// Start the server
final ChannelFuture future = bootstrap.group(bossGroup, workerGroup).handler(new LoggingHandler(LogLevel.INFO)).option(ChannelOption.SO_BACKLOG, 128).channel(Netty.serverChannelType()).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(initializer).bind(nettyConfig.getListenPort());
environment.lifecycle().manage(new ChannelFutureManager(future));
// Resources
environment.jersey().register(new BatchResource(uploader));
environment.jersey().register(new PingResource());
environment.jersey().register(new VersionResource());
}
use of org.jclouds.s3.S3AsyncClient in project flink by apache.
the class KinesisFirehoseTableITTest method readFromS3.
private List<Order> readFromS3() throws Exception {
Deadline deadline = Deadline.fromNow(Duration.ofMinutes(1));
List<S3Object> ordersObjects;
List<Order> orders;
do {
Thread.sleep(1000);
ordersObjects = listBucketObjects(s3AsyncClient, BUCKET_NAME);
orders = readObjectsFromS3Bucket(s3AsyncClient, ordersObjects, BUCKET_NAME, responseBytes -> fromJson(new String(responseBytes.asByteArrayUnsafe()), Order.class));
} while (deadline.hasTimeLeft() && orders.size() < NUM_ELEMENTS);
return orders;
}
Aggregations