use of io.pravega.segmentstore.storage.metadata.ChunkMetadata in project pravega by pravega.
the class WriteOperation method updateMetadataForChunkAddition.
/**
* Updates the segment metadata for the newly added chunk.
*/
private ChunkMetadata updateMetadataForChunkAddition(MetadataTransaction txn, SegmentMetadata segmentMetadata, String newChunkName, boolean isFirstWriteAfterFailover, ChunkMetadata lastChunkMetadata) {
ChunkMetadata newChunkMetadata = ChunkMetadata.builder().name(newChunkName).build();
newChunkMetadata.setActive(true);
segmentMetadata.setLastChunk(newChunkName);
if (lastChunkMetadata == null) {
segmentMetadata.setFirstChunk(newChunkName);
} else {
lastChunkMetadata.setNextChunk(newChunkName);
txn.update(lastChunkMetadata);
}
segmentMetadata.setLastChunkStartOffset(segmentMetadata.getLength());
// Reset ownershipChanged flag after first write is done.
if (isFirstWriteAfterFailover) {
segmentMetadata.setOwnerEpoch(chunkedSegmentStorage.getEpoch());
segmentMetadata.setOwnershipChanged(false);
log.debug("{} write - First write after failover - op={}, segment={}.", chunkedSegmentStorage.getLogPrefix(), System.identityHashCode(this), segmentMetadata.getName());
}
segmentMetadata.setChunkCount(segmentMetadata.getChunkCount() + 1);
// Update the transaction.
txn.create(newChunkMetadata);
txn.update(segmentMetadata);
return newChunkMetadata;
}
use of io.pravega.segmentstore.storage.metadata.ChunkMetadata in project pravega by pravega.
the class GarbageCollector method deleteSegment.
/**
* Perform delete segment related tasks.
*/
private CompletableFuture<Void> deleteSegment(TaskInfo taskInfo) {
val streamSegmentName = taskInfo.getName();
val txn = metadataStore.beginTransaction(true, streamSegmentName);
return txn.get(streamSegmentName).thenComposeAsync(storageMetadata -> {
val segmentMetadata = (SegmentMetadata) storageMetadata;
if (null == segmentMetadata) {
log.debug("{}: deleteGarbage - Segment metadata does not exist. segment={}.", traceObjectId, streamSegmentName);
return CompletableFuture.completedFuture(null);
} else if (segmentMetadata.isActive()) {
log.debug("{}: deleteGarbage - Segment is not marked as deleted. segment={}.", traceObjectId, streamSegmentName);
return CompletableFuture.completedFuture(null);
} else {
val chunksToDelete = Collections.synchronizedSet(new HashSet<String>());
val currentBatch = Collections.synchronizedSet(new HashSet<ChunkMetadata>());
val currentChunkName = new AtomicReference<String>(segmentMetadata.getFirstChunk());
return Futures.loop(() -> null != currentChunkName.get(), () -> txn.get(currentChunkName.get()).thenComposeAsync(metadata -> {
val chunkMetadata = (ChunkMetadata) metadata;
CompletableFuture<Void> retFuture = CompletableFuture.completedFuture(null);
// Skip if metadata is possibly deleted in last attempt, we are done.
if (null == chunkMetadata) {
currentChunkName.set(null);
return retFuture;
}
// Add to list of chunks to delete
chunksToDelete.add(chunkMetadata.getName());
// Add to batch and commit batch if required.
currentBatch.add(chunkMetadata);
if (chunkMetadata.isActive()) {
if (currentBatch.size() > config.getGarbageCollectionTransactionBatchSize()) {
// Commit batch
retFuture = addTransactionForUpdateBatch(currentBatch, streamSegmentName);
// Clear batch
currentBatch.clear();
}
}
// Move next
currentChunkName.set(chunkMetadata.getNextChunk());
return retFuture;
}, storageExecutor), storageExecutor).thenComposeAsync(v -> {
if (currentBatch.size() > 0) {
return addTransactionForUpdateBatch(currentBatch, streamSegmentName);
}
return CompletableFuture.completedFuture(null);
}, storageExecutor).thenComposeAsync(v -> this.addChunksToGarbage(txn.getVersion(), chunksToDelete), storageExecutor).thenComposeAsync(v -> deleteBlockIndexEntriesForSegment(streamSegmentName, segmentMetadata.getStartOffset(), segmentMetadata.getLength())).thenComposeAsync(v -> {
val innerTxn = metadataStore.beginTransaction(false, segmentMetadata.getName());
innerTxn.delete(segmentMetadata.getName());
return innerTxn.commit().whenCompleteAsync((vv, ex) -> innerTxn.close(), storageExecutor);
}, storageExecutor).handleAsync((v, e) -> {
txn.close();
if (null != e) {
log.error(String.format("%s deleteGarbage - Could not delete metadata for garbage segment=%s.", traceObjectId, streamSegmentName), e);
return true;
}
return false;
}, storageExecutor).thenComposeAsync(failed -> {
if (failed) {
if (taskInfo.getAttempts() < config.getGarbageCollectionMaxAttempts()) {
val attempts = taskInfo.attempts + 1;
SLTS_GC_SEGMENT_RETRY.inc();
return addSegmentToGarbage(taskInfo.toBuilder().attempts(attempts).build());
} else {
SLTS_GC_SEGMENT_FAILED.inc();
log.info("{}: deleteGarbage - could not delete after max attempts segment={}.", traceObjectId, taskInfo.getName());
return failTask(taskInfo);
}
} else {
SLTS_GC_SEGMENT_PROCESSED.inc();
return CompletableFuture.completedFuture(null);
}
}, storageExecutor);
}
}, storageExecutor);
}
use of io.pravega.segmentstore.storage.metadata.ChunkMetadata in project pravega by pravega.
the class GarbageCollector method deleteChunk.
private CompletableFuture<Void> deleteChunk(TaskInfo infoToDelete) {
val chunkToDelete = infoToDelete.name;
val failed = new AtomicReference<Throwable>();
val txn = metadataStore.beginTransaction(false, chunkToDelete);
return txn.get(infoToDelete.name).thenComposeAsync(metadata -> {
val chunkMetadata = (ChunkMetadata) metadata;
// Delete if the chunk is not present at all in the metadata or is present but marked as inactive.
val shouldDeleteChunk = null == chunkMetadata || !chunkMetadata.isActive();
val shouldDeleteMetadata = new AtomicBoolean(null != metadata && !chunkMetadata.isActive());
// Delete chunk from storage.
if (shouldDeleteChunk) {
return chunkStorage.delete(ChunkHandle.writeHandle(chunkToDelete)).handleAsync((v, e) -> {
if (e != null) {
val ex = Exceptions.unwrap(e);
if (ex instanceof ChunkNotFoundException) {
// Ignore - nothing to do here.
log.debug("{}: deleteGarbage - Could not delete garbage chunk={}.", traceObjectId, chunkToDelete);
} else {
log.warn("{}: deleteGarbage - Could not delete garbage chunk={}.", traceObjectId, chunkToDelete);
shouldDeleteMetadata.set(false);
failed.set(e);
}
} else {
SLTS_GC_CHUNK_DELETED.inc();
log.debug("{}: deleteGarbage - deleted chunk={}.", traceObjectId, chunkToDelete);
}
return v;
}, storageExecutor).thenRunAsync(() -> {
if (shouldDeleteMetadata.get()) {
txn.delete(chunkToDelete);
log.debug("{}: deleteGarbage - deleted metadata for chunk={}.", traceObjectId, chunkToDelete);
}
}, storageExecutor).thenComposeAsync(v -> txn.commit(), storageExecutor).handleAsync((v, e) -> {
if (e != null) {
log.error(String.format("%s deleteGarbage - Could not delete metadata for garbage chunk=%s.", traceObjectId, chunkToDelete), e);
failed.set(e);
}
return v;
}, storageExecutor);
} else {
log.debug("{}: deleteGarbage - Chunk is not marked as garbage chunk={}.", traceObjectId, chunkToDelete);
return CompletableFuture.completedFuture(null);
}
}, storageExecutor).thenComposeAsync(v -> {
if (failed.get() != null) {
if (infoToDelete.getAttempts() < config.getGarbageCollectionMaxAttempts()) {
log.debug("{}: deleteGarbage - adding back chunk={}.", traceObjectId, chunkToDelete);
SLTS_GC_CHUNK_RETRY.inc();
return addChunkToGarbage(txn.getVersion(), chunkToDelete, infoToDelete.getScheduledTime() + config.getGarbageCollectionDelay().toMillis(), infoToDelete.getAttempts() + 1);
} else {
SLTS_GC_CHUNK_FAILED.inc();
log.info("{}: deleteGarbage - could not delete after max attempts chunk={}.", traceObjectId, chunkToDelete);
return failTask(infoToDelete);
}
}
return CompletableFuture.completedFuture(null);
}, storageExecutor).whenCompleteAsync((v, ex) -> {
if (ex != null) {
log.error(String.format("%s deleteGarbage - Could not find garbage chunk=%s.", traceObjectId, chunkToDelete), ex);
}
txn.close();
}, storageExecutor);
}
use of io.pravega.segmentstore.storage.metadata.ChunkMetadata in project pravega by pravega.
the class ChunkedSegmentStorage method claimOwnership.
/**
* Checks ownership and adjusts the length of the segment if required.
*
* @param txn Active {@link MetadataTransaction}.
* @param segmentMetadata {@link SegmentMetadata} for the segment to change ownership for.
* throws ChunkStorageException In case of any chunk storage related errors.
* throws StorageMetadataException In case of any chunk metadata store related errors.
*/
private CompletableFuture<Void> claimOwnership(MetadataTransaction txn, SegmentMetadata segmentMetadata) {
// Get the last chunk
val lastChunkName = segmentMetadata.getLastChunk();
final CompletableFuture<Boolean> f;
if (shouldAppend() && null != lastChunkName) {
f = txn.get(lastChunkName).thenComposeAsync(storageMetadata -> {
val lastChunk = (ChunkMetadata) storageMetadata;
Preconditions.checkState(null != lastChunk, "last chunk metadata must not be null.");
Preconditions.checkState(null != lastChunk.getName(), "Name of last chunk must not be null.");
log.debug("{} claimOwnership - current last chunk - segment={}, last chunk={}, Length={}.", logPrefix, segmentMetadata.getName(), lastChunk.getName(), lastChunk.getLength());
return chunkStorage.getInfo(lastChunkName).thenApplyAsync(chunkInfo -> {
Preconditions.checkState(chunkInfo != null, "chunkInfo for last chunk must not be null.");
Preconditions.checkState(lastChunk != null, "last chunk metadata must not be null.");
// Adjust its length;
if (chunkInfo.getLength() != lastChunk.getLength()) {
Preconditions.checkState(chunkInfo.getLength() > lastChunk.getLength(), "Length of last chunk on LTS must be greater than what is in metadata. Chunk=%s length=%s", lastChunk, chunkInfo.getLength());
// Whatever length you see right now is the final "sealed" length of the last chunk.
val oldLength = segmentMetadata.getLength();
lastChunk.setLength(chunkInfo.getLength());
segmentMetadata.setLength(segmentMetadata.getLastChunkStartOffset() + lastChunk.getLength());
if (!segmentMetadata.isStorageSystemSegment()) {
addBlockIndexEntriesForChunk(txn, segmentMetadata.getName(), lastChunk.getName(), segmentMetadata.getLastChunkStartOffset(), oldLength, segmentMetadata.getLength());
}
txn.update(lastChunk);
log.debug("{} claimOwnership - Length of last chunk adjusted - segment={}, last chunk={}, Length={}.", logPrefix, segmentMetadata.getName(), lastChunk.getName(), chunkInfo.getLength());
}
return true;
}, executor).exceptionally(e -> {
val ex = Exceptions.unwrap(e);
if (ex instanceof ChunkNotFoundException) {
// This probably means that this instance is fenced out and newer instance truncated this segment.
// Try a commit of unmodified data to fail fast.
log.debug("{} claimOwnership - Last chunk was missing, failing fast - segment={}, last chunk={}.", logPrefix, segmentMetadata.getName(), lastChunk.getName());
txn.update(segmentMetadata);
return false;
}
throw new CompletionException(ex);
});
}, executor);
} else {
f = CompletableFuture.completedFuture(true);
}
return f.thenComposeAsync(shouldChange -> {
// If this instance is no more owner, then transaction commit will fail.So it is still safe.
if (shouldChange) {
segmentMetadata.setOwnerEpoch(this.epoch);
segmentMetadata.setOwnershipChanged(true);
}
// Update and commit
// If This instance is fenced this update will fail.
txn.update(segmentMetadata);
return txn.commit();
}, executor);
}
use of io.pravega.segmentstore.storage.metadata.ChunkMetadata in project pravega by pravega.
the class DefragmentOperation method gatherChunks.
private CompletableFuture<Void> gatherChunks() {
chunksToConcat = Collections.synchronizedList(new ArrayList<>());
return txn.get(targetChunkName).thenComposeAsync(storageMetadata -> {
target = (ChunkMetadata) storageMetadata;
// Add target to the list of chunks
targetSizeAfterConcat.set(target.getLength());
chunksToConcat.add(new ChunkInfo(targetSizeAfterConcat.get(), targetChunkName));
nextChunkName = target.getNextChunk();
// Skip over when first chunk is smaller than min concat size or is greater than max concat size.
if (!chunkedSegmentStorage.shouldAppend()) {
if (target.getLength() <= chunkedSegmentStorage.getConfig().getMinSizeLimitForConcat() || target.getLength() > chunkedSegmentStorage.getConfig().getMaxSizeLimitForConcat()) {
return CompletableFuture.completedFuture(null);
}
}
val shouldContinueGathering = new AtomicBoolean(true);
return Futures.loop(() -> shouldContinueGathering.get(), () -> txn.get(nextChunkName).thenAcceptAsync(storageMetadata2 -> {
next = (ChunkMetadata) storageMetadata2;
if (shouldContinue()) {
chunksToConcat.add(new ChunkInfo(next.getLength(), nextChunkName));
targetSizeAfterConcat.addAndGet(next.getLength());
nextChunkName = next.getNextChunk();
} else {
shouldContinueGathering.set(false);
}
}, chunkedSegmentStorage.getExecutor()), chunkedSegmentStorage.getExecutor());
}, chunkedSegmentStorage.getExecutor());
}
Aggregations