use of org.apache.jackrabbit.core.data.util.NamedThreadFactory in project jackrabbit by apache.
the class TestLocalCache method testConcurrentInitWithStore.
/**
* Test concurrent {@link LocalCache} initialization with storing
* {@link LocalCache}
*/
public void testConcurrentInitWithStore() {
try {
AsyncUploadCache pendingFiles = new AsyncUploadCache();
pendingFiles.init(tempDirPath, cacheDirPath, 100);
pendingFiles.reset();
LocalCache cache = new LocalCache(cacheDirPath, tempDirPath, 10000000, 0.95, 0.70, pendingFiles);
Random random = new Random(12345);
int fileUploads = 1000;
Map<String, byte[]> byteMap = new HashMap<String, byte[]>(fileUploads);
byte[] data;
for (int i = 0; i < fileUploads; i++) {
data = new byte[100];
random.nextBytes(data);
String key = "a" + i;
byteMap.put(key, data);
cache.store(key, new ByteArrayInputStream(byteMap.get(key)));
}
cache.close();
ExecutorService executor = Executors.newFixedThreadPool(10, new NamedThreadFactory("localcache-store-worker"));
cache = new LocalCache(cacheDirPath, tempDirPath, 10000000, 0.95, 0.70, pendingFiles);
executor.execute(new StoreWorker(cache, byteMap));
executor.shutdown();
while (!executor.awaitTermination(15, TimeUnit.SECONDS)) {
}
} catch (Exception e) {
LOG.error("error:", e);
fail();
}
}
use of org.apache.jackrabbit.core.data.util.NamedThreadFactory in project jackrabbit by apache.
the class S3Backend method init.
public void init(CachingDataStore store, String homeDir, Properties prop) throws DataStoreException {
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
try {
startTime = new Date();
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
LOG.debug("init");
setDataStore(store);
s3ReqDecorator = new S3RequestDecorator(prop);
s3service = Utils.openService(prop);
if (bucket == null || "".equals(bucket.trim())) {
bucket = prop.getProperty(S3Constants.S3_BUCKET);
}
String region = prop.getProperty(S3Constants.S3_REGION);
Region s3Region = null;
if (StringUtils.isNullOrEmpty(region)) {
com.amazonaws.regions.Region ec2Region = Regions.getCurrentRegion();
if (ec2Region != null) {
s3Region = Region.fromValue(ec2Region.getName());
} else {
throw new AmazonClientException("parameter [" + S3Constants.S3_REGION + "] not configured and cannot be derived from environment");
}
} else {
if (Utils.DEFAULT_AWS_BUCKET_REGION.equals(region)) {
s3Region = Region.US_Standard;
} else if (Region.EU_Ireland.toString().equals(region)) {
s3Region = Region.EU_Ireland;
} else {
s3Region = Region.fromValue(region);
}
}
if (!s3service.doesBucketExist(bucket)) {
s3service.createBucket(bucket, s3Region);
LOG.info("Created bucket [{}] in [{}] ", bucket, region);
} else {
LOG.info("Using bucket [{}] in [{}] ", bucket, region);
}
int writeThreads = 10;
String writeThreadsStr = prop.getProperty(S3Constants.S3_WRITE_THREADS);
if (writeThreadsStr != null) {
writeThreads = Integer.parseInt(writeThreadsStr);
}
LOG.info("Using thread pool of [{}] threads in S3 transfer manager.", writeThreads);
tmx = new TransferManager(s3service, (ThreadPoolExecutor) Executors.newFixedThreadPool(writeThreads, new NamedThreadFactory("s3-transfer-manager-worker")));
int asyncWritePoolSize = 10;
String maxConnsStr = prop.getProperty(S3Constants.S3_MAX_CONNS);
if (maxConnsStr != null) {
asyncWritePoolSize = Integer.parseInt(maxConnsStr) - writeThreads;
}
setAsyncWritePoolSize(asyncWritePoolSize);
String renameKeyProp = prop.getProperty(S3Constants.S3_RENAME_KEYS);
boolean renameKeyBool = (renameKeyProp == null || "".equals(renameKeyProp)) ? false : Boolean.parseBoolean(renameKeyProp);
LOG.info("Rename keys [{}]", renameKeyBool);
if (renameKeyBool) {
renameKeys();
}
LOG.debug("S3 Backend initialized in [{}] ms", +(System.currentTimeMillis() - startTime.getTime()));
} catch (Exception e) {
LOG.debug(" error ", e);
throw new DataStoreException("Could not initialize S3 from " + prop, e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
use of org.apache.jackrabbit.core.data.util.NamedThreadFactory in project jackrabbit-oak by apache.
the class S3Backend method renameKeys.
/**
* This method rename object keys in S3 concurrently. The number of
* concurrent threads is defined by 'maxConnections' property in
* aws.properties. As S3 doesn't have "move" command, this method simulate
* move as copy object object to new key and then delete older key.
*/
private void renameKeys() throws DataStoreException {
long startTime = System.currentTimeMillis();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
long count = 0;
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
ObjectListing prevObjectListing = s3service.listObjects(bucket);
List<DeleteObjectsRequest.KeyVersion> deleteList = new ArrayList<DeleteObjectsRequest.KeyVersion>();
int nThreads = Integer.parseInt(properties.getProperty("maxConnections"));
ExecutorService executor = Executors.newFixedThreadPool(nThreads, new NamedThreadFactory("s3-object-rename-worker"));
boolean taskAdded = false;
while (true) {
for (S3ObjectSummary s3ObjSumm : prevObjectListing.getObjectSummaries()) {
executor.execute(new KeyRenameThread(s3ObjSumm.getKey()));
taskAdded = true;
count++;
// delete the object if it follows old key name format
if (s3ObjSumm.getKey().startsWith(KEY_PREFIX)) {
deleteList.add(new DeleteObjectsRequest.KeyVersion(s3ObjSumm.getKey()));
}
}
if (!prevObjectListing.isTruncated())
break;
prevObjectListing = s3service.listNextBatchOfObjects(prevObjectListing);
}
// This will make the executor accept no new threads
// and finish all existing threads in the queue
executor.shutdown();
try {
// Wait until all threads are finish
while (taskAdded && !executor.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.info("Rename S3 keys tasks timedout. Waiting again");
}
} catch (InterruptedException ie) {
}
LOG.info("Renamed [{}] keys, time taken [{}]sec", count, ((System.currentTimeMillis() - startTime) / 1000));
// Delete older keys.
if (deleteList.size() > 0) {
DeleteObjectsRequest delObjsReq = new DeleteObjectsRequest(bucket);
int batchSize = 500, startIndex = 0, size = deleteList.size();
int endIndex = batchSize < size ? batchSize : size;
while (endIndex <= size) {
delObjsReq.setKeys(Collections.unmodifiableList(deleteList.subList(startIndex, endIndex)));
DeleteObjectsResult dobjs = s3service.deleteObjects(delObjsReq);
LOG.info("Records[{}] deleted in datastore from index [{}] to [{}]", new Object[] { dobjs.getDeletedObjects().size(), startIndex, (endIndex - 1) });
if (endIndex == size) {
break;
} else {
startIndex = endIndex;
endIndex = (startIndex + batchSize) < size ? (startIndex + batchSize) : size;
}
}
}
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
}
Aggregations