use of com.amazonaws.services.s3.AmazonS3 in project simplejpa by appoxy.
the class Save method persistOnly.
protected void persistOnly(Object o, String id) throws AmazonClientException, IllegalAccessException, InvocationTargetException, IOException {
long start = System.currentTimeMillis();
em.invokeEntityListener(o, newObject ? PrePersist.class : PreUpdate.class);
AnnotationInfo ai = em.getFactory().getAnnotationManager().getAnnotationInfo(o);
UpdateCondition expected = null;
PersistentProperty versionField = null;
Long nextVersion = -1L;
String domainName;
if (ai.getRootClass() != null) {
domainName = em.getOrCreateDomain(ai.getRootClass());
} else {
domainName = em.getOrCreateDomain(o.getClass());
}
// Item item = DomainHelper.findItemById(this.em.getSimpleDb(),
// domainName, id);
// now set attributes
List<ReplaceableAttribute> attsToPut = new ArrayList<ReplaceableAttribute>();
List<Attribute> attsToDelete = new ArrayList<Attribute>();
if (ai.getDiscriminatorValue() != null) {
attsToPut.add(new ReplaceableAttribute(EntityManagerFactoryImpl.DTYPE, ai.getDiscriminatorValue(), true));
}
LazyInterceptor interceptor = null;
if (o instanceof Factory) {
Factory factory = (Factory) o;
/*
* for (Callback callback2 : factory.getCallbacks()) {
* if(logger.isLoggable(Level.FINER)) logger.finer("callback=" +
* callback2); if (callback2 instanceof LazyInterceptor) {
* interceptor = (LazyInterceptor) callback2; } }
*/
interceptor = (LazyInterceptor) factory.getCallback(0);
}
for (PersistentProperty field : ai.getPersistentProperties()) {
Object ob = field.getProperty(o);
String columnName = field.getColumnName();
if (ob == null) {
attsToDelete.add(new Attribute(columnName, null));
continue;
}
if (field.isForeignKeyRelationship()) {
// store the id of this object
if (Collection.class.isAssignableFrom(field.getRawClass())) {
for (Object each : (Collection) ob) {
String id2 = em.getId(each);
attsToPut.add(new ReplaceableAttribute(columnName, id2, true));
}
} else {
String id2 = em.getId(ob);
attsToPut.add(new ReplaceableAttribute(columnName, id2, true));
/* check if we should persist this */
boolean persistRelationship = false;
ManyToOne a = field.getGetter().getAnnotation(ManyToOne.class);
if (a != null && null != a.cascade()) {
CascadeType[] cascadeType = a.cascade();
for (CascadeType type : cascadeType) {
if (CascadeType.ALL == type || CascadeType.PERSIST == type) {
persistRelationship = true;
}
}
}
if (persistRelationship) {
em.persist(ob);
}
}
} else if (field.isVersioned()) {
Long curVersion = Long.parseLong("" + ob);
nextVersion = (1 + curVersion);
attsToPut.add(new ReplaceableAttribute(columnName, em.padOrConvertIfRequired(nextVersion), true));
if (curVersion > 0) {
expected = new UpdateCondition(columnName, em.padOrConvertIfRequired(curVersion), true);
} else {
expected = new UpdateCondition().withName(columnName).withExists(false);
}
versionField = field;
} else if (field.isInverseRelationship()) {
// FORCING BI-DIRECTIONAL RIGHT NOW SO JUST IGNORE
// ... except for cascading persistence down to all items in the
// OneToMany collection
/* check if we should persist this */
boolean persistRelationship = false;
OneToMany a = field.getGetter().getAnnotation(OneToMany.class);
CascadeType[] cascadeType = a.cascade();
for (CascadeType type : cascadeType) {
if (CascadeType.ALL == type || CascadeType.PERSIST == type) {
persistRelationship = true;
}
}
if (persistRelationship) {
if (ob instanceof Collection) {
// shouldn't it?
for (Object _item : (Collection) ob) {
// persist each item in the collection
em.persist(_item);
}
}
}
} else if (field.isLob()) {
// store in s3
AmazonS3 s3 = null;
// todo: need to make sure we only store to S3 if it's changed,
// too slow.
logger.fine("putting lob to s3");
long start3 = System.currentTimeMillis();
s3 = em.getS3Service();
String bucketName = em.getS3BucketName();
String s3ObjectId = id + "-" + field.getFieldName();
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream(bos);
out.writeObject(ob);
byte[] contentBytes = bos.toByteArray();
out.close();
InputStream input = new ByteArrayInputStream(contentBytes);
s3.putObject(bucketName, s3ObjectId, input, null);
em.statsS3Put(System.currentTimeMillis() - start3);
logger.finer("setting lobkeyattribute=" + columnName + " - " + s3ObjectId);
attsToPut.add(new ReplaceableAttribute(columnName, s3ObjectId, true));
} else if (field.getEnumType() != null) {
String toSet = getEnumValue(field, o);
attsToPut.add(new ReplaceableAttribute(columnName, toSet, true));
} else if (field.isId()) {
continue;
} else if (Collection.class.isInstance(ob)) {
for (Object each : ((Collection) ob)) {
String toSet = each != null ? em.padOrConvertIfRequired(each) : "";
// todo: throw an exception if this is going to exceed
// maximum size, suggest using @Lob
attsToPut.add(new ReplaceableAttribute(columnName, toSet, true));
}
} else {
String toSet = ob != null ? em.padOrConvertIfRequired(ob) : "";
// todo: throw an exception if this is going to exceed maximum
// size, suggest using @Lob
attsToPut.add(new ReplaceableAttribute(columnName, toSet, true));
}
}
// Now finally send it for storage (If have attributes to add)
long start2 = System.currentTimeMillis();
long duration2;
if (!attsToPut.isEmpty()) {
this.em.getSimpleDb().putAttributes(new PutAttributesRequest().withDomainName(domainName).withItemName(id).withAttributes(attsToPut).withExpected(expected));
duration2 = System.currentTimeMillis() - start2;
if (logger.isLoggable(Level.FINE))
logger.fine("putAttributes time=" + (duration2));
em.statsAttsPut(attsToPut.size(), duration2);
if (null != versionField)
versionField.setProperty(o, nextVersion);
}
/*
* Check for nulled attributes so we can send a delete call. Don't
* delete attributes if this is a new object AND don't delete atts if
* it's not dirty AND don't delete if no nulls were set (nulledField on
* LazyInterceptor)
*/
if (interceptor != null) {
if (interceptor.getNulledFields() != null && interceptor.getNulledFields().size() > 0) {
List<Attribute> attsToDelete2 = new ArrayList<Attribute>();
for (String s : interceptor.getNulledFields().keySet()) {
String columnName = ai.getPersistentProperty(s).getColumnName();
attsToDelete2.add(new Attribute(columnName, null));
}
start2 = System.currentTimeMillis();
this.em.getSimpleDb().deleteAttributes(new DeleteAttributesRequest().withDomainName(domainName).withItemName(id).withAttributes(attsToDelete2));
// todo: what about lobs? need to delete from s3
duration2 = System.currentTimeMillis() - start2;
logger.fine("deleteAttributes time=" + (duration2));
em.statsAttsDeleted(attsToDelete2.size(), duration2);
} else {
logger.fine("deleteAttributes time= no nulled fields, nothing to delete.");
}
} else {
if (!newObject && attsToDelete.size() > 0) {
// not enhanced, but still have to deal with deleted attributes
start2 = System.currentTimeMillis();
// for (ItemAttribute itemAttribute : attsToDelete) {
// System.out.println("itemAttr=" + itemAttribute.getName() +
// ": " + itemAttribute.getValue());
// }
this.em.getSimpleDb().deleteAttributes(new DeleteAttributesRequest().withDomainName(domainName).withItemName(id).withAttributes(attsToDelete));
// todo: what about lobs? need to delete from s3
duration2 = System.currentTimeMillis() - start2;
logger.fine("deleteAttributes time=" + (duration2));
em.statsAttsDeleted(attsToDelete.size(), duration2);
}
}
if (interceptor != null) {
// reset the interceptor since we're all synced with the db now
interceptor.reset();
}
em.invokeEntityListener(o, newObject ? PostPersist.class : PostUpdate.class);
if (logger.isLoggable(Level.FINE))
logger.fine("persistOnly time=" + (System.currentTimeMillis() - start));
}
use of com.amazonaws.services.s3.AmazonS3 in project simplejpa by appoxy.
the class EntityManagerSimpleJPA method getObjectFromS3.
public Object getObjectFromS3(String idOnS3) throws AmazonClientException, IOException, ClassNotFoundException {
long start = System.currentTimeMillis();
AmazonS3 s3 = factory.getS3Service();
S3Object s3o = s3.getObject(factory.getS3BucketName(), idOnS3);
logger.fine("got s3object=" + s3o);
Object ret = null;
try {
ObjectInputStream reader = new ObjectInputStream(new BufferedInputStream((s3o.getObjectContent())));
ret = reader.readObject();
} finally {
s3o.getObjectContent().close();
}
statsS3Get(System.currentTimeMillis() - start);
return ret;
}
use of com.amazonaws.services.s3.AmazonS3 in project crate by crate.
the class S3ClientHelper method client.
private AmazonS3 client(@Nullable String accessKey, @Nullable String secretKey) throws IOException {
int hash = hash(accessKey, secretKey);
AmazonS3 client = clientMap.get(hash);
if (client == null) {
client = initClient(accessKey, secretKey);
clientMap.put(hash, client);
}
return client;
}
use of com.amazonaws.services.s3.AmazonS3 in project elasticsearch by elastic.
the class AbstractS3SnapshotRestoreTest method cleanRepositoryFiles.
/**
* Deletes content of the repository files in the bucket
*/
public void cleanRepositoryFiles(String basePath) {
Settings settings = internalCluster().getInstance(Settings.class);
Settings[] buckets = { settings.getByPrefix("repositories.s3."), settings.getByPrefix("repositories.s3.private-bucket."), settings.getByPrefix("repositories.s3.remote-bucket."), settings.getByPrefix("repositories.s3.external-bucket.") };
for (Settings bucket : buckets) {
String bucketName = bucket.get("bucket");
// We check that settings has been set in elasticsearch.yml integration test file
// as described in README
assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY, null, randomBoolean(), null);
try {
ObjectListing prevListing = null;
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
//we can do at most 1K objects per delete
//We don't know the bucket name until first object listing
DeleteObjectsRequest multiObjectDeleteRequest = null;
ArrayList<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<DeleteObjectsRequest.KeyVersion>();
while (true) {
ObjectListing list;
if (prevListing != null) {
list = client.listNextBatchOfObjects(prevListing);
} else {
list = client.listObjects(bucketName, basePath);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
}
for (S3ObjectSummary summary : list.getObjectSummaries()) {
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
//Every 500 objects batch the delete request
if (keys.size() > 500) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
keys.clear();
}
}
if (list.isTruncated()) {
prevListing = list;
} else {
break;
}
}
if (!keys.isEmpty()) {
multiObjectDeleteRequest.setKeys(keys);
client.deleteObjects(multiObjectDeleteRequest);
}
} catch (Exception ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex);
}
}
}
use of com.amazonaws.services.s3.AmazonS3 in project elasticsearch by elastic.
the class AbstractS3SnapshotRestoreTest method testEncryption.
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
public void testEncryption() {
Client client = client();
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
Settings repositorySettings = Settings.builder().put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath).put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)).put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true).build();
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo").setType("s3").setSettings(repositorySettings).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
Settings settings = internalCluster().getInstance(Settings.class);
Settings bucket = settings.getByPrefix("repositories.s3.");
AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings, null, randomBoolean(), null);
String bucketName = bucket.get("bucket");
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
List<S3ObjectSummary> summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries();
for (S3ObjectSummary summary : summaries) {
assertThat(s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256"));
}
logger.info("--> delete some data");
for (int i = 0; i < 50; i++) {
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
}
for (int i = 50; i < 100; i++) {
client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
}
for (int i = 0; i < 100; i += 2) {
client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
}
refresh();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
logger.info("--> close indices");
client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
logger.info("--> restore all indices from the snapshot");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
// Test restore after index deletion
logger.info("--> delete indices");
cluster().wipeIndices("test-idx-1", "test-idx-2");
logger.info("--> restore one index after deletion");
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
ensureGreen();
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
}
Aggregations