use of com.amazonaws.services.s3.model.ObjectListing in project h2o-2 by h2oai.
the class ImportS3 method processListing.
public void processListing(ObjectListing listing, JsonArray succ, JsonArray fail) {
for (S3ObjectSummary obj : listing.getObjectSummaries()) {
try {
Key k = PersistS3.loadKey(obj);
JsonObject o = new JsonObject();
o.addProperty(KEY, k.toString());
o.addProperty(FILE, obj.getKey());
o.addProperty(VALUE_SIZE, obj.getSize());
succ.add(o);
} catch (IOException e) {
JsonObject o = new JsonObject();
o.addProperty(FILE, obj.getKey());
o.addProperty(ERROR, e.getMessage());
fail.add(o);
}
}
}
use of com.amazonaws.services.s3.model.ObjectListing in project h2o-2 by h2oai.
the class ImportS3 method serve.
@Override
protected Response serve() {
String bucket = _bucket.value();
Log.info("ImportS3 processing (" + bucket + ")");
JsonObject json = new JsonObject();
JsonArray succ = new JsonArray();
JsonArray fail = new JsonArray();
AmazonS3 s3 = PersistS3.getClient();
ObjectListing currentList = s3.listObjects(bucket);
processListing(currentList, succ, fail);
while (currentList.isTruncated()) {
currentList = s3.listNextBatchOfObjects(currentList);
processListing(currentList, succ, fail);
}
json.add(NUM_SUCCEEDED, new JsonPrimitive(succ.size()));
json.add(SUCCEEDED, succ);
json.add(NUM_FAILED, new JsonPrimitive(fail.size()));
json.add(FAILED, fail);
DKV.write_barrier();
Response r = Response.done(json);
r.setBuilder(SUCCEEDED + "." + KEY, new KeyCellBuilder());
return r;
}
use of com.amazonaws.services.s3.model.ObjectListing in project ignite by apache.
the class S3CheckpointSpi method spiStart.
/**
* {@inheritDoc}
*/
@SuppressWarnings({ "BusyWait" })
@Override
public void spiStart(String igniteInstanceName) throws IgniteSpiException {
// Start SPI start stopwatch.
startStopwatch();
assertParameter(cred != null, "awsCredentials != null");
if (log.isDebugEnabled()) {
log.debug(configInfo("awsCredentials", cred));
log.debug(configInfo("clientConfiguration", cfg));
log.debug(configInfo("bucketNameSuffix", bucketNameSuffix));
log.debug(configInfo("bucketEndpoint", bucketEndpoint));
log.debug(configInfo("SSEAlgorithm", sseAlg));
}
if (cfg == null)
U.warn(log, "Amazon client configuration is not set (will use default).");
if (F.isEmpty(bucketNameSuffix)) {
U.warn(log, "Bucket name suffix is null or empty (will use default bucket name).");
bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX;
} else
bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix;
s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred);
if (!F.isEmpty(bucketEndpoint))
s3.setEndpoint(bucketEndpoint);
if (!s3.doesBucketExist(bucketName)) {
try {
s3.createBucket(bucketName);
if (log.isDebugEnabled())
log.debug("Created S3 bucket: " + bucketName);
while (!s3.doesBucketExist(bucketName)) try {
U.sleep(200);
} catch (IgniteInterruptedCheckedException e) {
throw new IgniteSpiException("Thread has been interrupted.", e);
}
} catch (AmazonClientException e) {
try {
if (!s3.doesBucketExist(bucketName))
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
} catch (AmazonClientException ignored) {
throw new IgniteSpiException("Failed to create bucket: " + bucketName, e);
}
}
}
Collection<S3TimeData> s3TimeDataLst = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
S3CheckpointData data = read(sum.getKey());
if (data != null) {
s3TimeDataLst.add(new S3TimeData(data.getExpireTime(), data.getKey()));
if (log.isDebugEnabled())
log.debug("Registered existing checkpoint from key: " + data.getKey());
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
} catch (IgniteCheckedException e) {
throw new IgniteSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e);
}
// Track expiration for only those data that are made by this node
timeoutWrk = new S3TimeoutWorker();
timeoutWrk.add(s3TimeDataLst);
timeoutWrk.start();
registerMBean(igniteInstanceName, new S3CheckpointSpiMBeanImpl(this), S3CheckpointSpiMBean.class);
// Ack ok start.
if (log.isDebugEnabled())
log.debug(startInfo());
}
use of com.amazonaws.services.s3.model.ObjectListing in project ignite by apache.
the class TcpDiscoveryS3IpFinder method getRegisteredAddresses.
/**
* {@inheritDoc}
*/
@Override
public Collection<InetSocketAddress> getRegisteredAddresses() throws IgniteSpiException {
initClient();
Collection<InetSocketAddress> addrs = new LinkedList<>();
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) {
String key = sum.getKey();
StringTokenizer st = new StringTokenizer(key, DELIM);
if (st.countTokens() != 2)
U.error(log, "Failed to parse S3 entry due to invalid format: " + key);
else {
String addrStr = st.nextToken();
String portStr = st.nextToken();
int port = -1;
try {
port = Integer.parseInt(portStr);
} catch (NumberFormatException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
if (port != -1)
try {
addrs.add(new InetSocketAddress(addrStr, port));
} catch (IllegalArgumentException e) {
U.error(log, "Failed to parse port for S3 entry: " + key, e);
}
}
}
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to list objects in the bucket: " + bucketName, e);
}
return addrs;
}
use of com.amazonaws.services.s3.model.ObjectListing in project ignite by apache.
the class S3CheckpointSpiSelfTest method afterSpiStopped.
/**
* @throws Exception If error.
*/
@Override
protected void afterSpiStopped() throws Exception {
AWSCredentials cred = new BasicAWSCredentials(IgniteS3TestSuite.getAccessKey(), IgniteS3TestSuite.getSecretKey());
AmazonS3 s3 = new AmazonS3Client(cred);
String bucketName = S3CheckpointSpi.BUCKET_NAME_PREFIX + "unit-test-bucket";
try {
ObjectListing list = s3.listObjects(bucketName);
while (true) {
for (S3ObjectSummary sum : list.getObjectSummaries()) s3.deleteObject(bucketName, sum.getKey());
if (list.isTruncated())
list = s3.listNextBatchOfObjects(list);
else
break;
}
} catch (AmazonClientException e) {
throw new IgniteSpiException("Failed to read checkpoint bucket: " + bucketName, e);
}
}
Aggregations