use of com.evolvedbinary.j8fu.Try in project exist by eXist-db.
the class BlobStoreImpl method add.
@Override
public Tuple2<BlobId, Long> add(final Txn transaction, final InputStream is) throws IOException {
if (state.get() != State.OPEN) {
throw new IOException("Blob Store is not open!");
}
// stage the BLOB file
final Tuple3<Path, Long, MessageDigest> staged = stage(is);
final BlobVacuum.RequestDeleteStagedBlobFile requestDeleteStagedBlobFile = new BlobVacuum.RequestDeleteStagedBlobFile(stagingDir, staged._1.getFileName().toString());
// register a callback to cleanup the staged BLOB file ONLY after commit+checkpoint
final JournalManager journalManager = database.getJournalManager().orElse(null);
if (journalManager != null) {
final DeleteStagedBlobFile cleanupStagedBlob = new DeleteStagedBlobFile(vacuumQueue, requestDeleteStagedBlobFile);
journalManager.listen(cleanupStagedBlob);
transaction.registerListener(cleanupStagedBlob);
}
final BlobId blobId = new BlobId(staged._3.getValue());
// if the blob entry does not exist, we exclusively compute it as STAGED.
BlobReference blobReference = references.computeIfAbsent(blobId, k -> new BlobReference(STAGED));
try {
while (true) {
if (blobReference.count.compareAndSet(STAGED, PROMOTING)) {
// write journal entries to the WAL
if (journalManager != null) {
try {
journalManager.journal(new StoreBlobFileLoggable(transaction.getId(), blobId, staged._1.getFileName().toString()));
journalManager.journal(new UpdateBlobRefCountLoggable(transaction.getId(), blobId, 0, 1));
// force WAL entries to disk!
journalManager.flush(true, true);
} catch (final JournalException e) {
references.remove(blobId);
throw new IOException(e);
}
}
// promote the staged blob
promote(staged);
if (journalManager == null) {
// no journal (or recovery)... so go ahead and schedule cleanup of the staged blob file
enqueueVacuum(vacuumQueue, requestDeleteStagedBlobFile);
}
// schedule disk persist of the new value
persistQueue.put(Tuple(blobId, blobReference, 1));
// update memory with the new value
blobReference.count.set(1);
// done!
return Tuple(blobId, staged._2);
}
final int count = blobReference.count.get();
// guard against a concurrent #add or #remove
if (count == PROMOTING || count == UPDATING_COUNT) {
// spin whilst another thread promotes the blob, or updates the reference count
// sleep a small time to save CPU
Thread.sleep(10);
continue;
}
// i.e. wait for the deletion of the blob to complete, and then we can add the blob again
if (count == DELETING) {
blobReference = references.computeIfAbsent(blobId, k -> new BlobReference(STAGED));
// loop again
continue;
}
// only increment the blob reference if the blob is active!
if (count >= 0 && blobReference.count.compareAndSet(count, UPDATING_COUNT)) {
// NOTE: we are the only thread that can be in this branch for the blobId
final int newCount = count + 1;
// write journal entries to the WAL
if (journalManager != null) {
try {
journalManager.journal(new UpdateBlobRefCountLoggable(transaction.getId(), blobId, count, newCount));
// force WAL entries to disk!
journalManager.flush(true, true);
} catch (final JournalException e) {
// restore the state of the blobReference first!
blobReference.count.set(count);
throw new IOException(e);
}
}
// persist the new value
persistQueue.put(Tuple(blobId, blobReference, newCount));
// update memory with the new value, and release other spinning threads
blobReference.count.set(newCount);
// done!
return Tuple(blobId, staged._2);
}
}
} catch (final InterruptedException e) {
// thrown by persistQueue.put or Thread.sleep
Thread.currentThread().interrupt();
throw new IOException(e);
}
}
Aggregations