use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerReadIndex method cleanup.
@Override
public void cleanup(Iterator<Long> segmentIds) {
Exceptions.checkNotClosed(this.closed.get(), this);
List<Long> removed = new ArrayList<>();
List<Long> notRemoved = new ArrayList<>();
synchronized (this.lock) {
if (segmentIds == null) {
segmentIds = new ArrayList<>(this.readIndices.keySet()).iterator();
}
while (segmentIds.hasNext()) {
long streamSegmentId = segmentIds.next();
SegmentMetadata segmentMetadata = this.metadata.getStreamSegmentMetadata(streamSegmentId);
boolean wasRemoved = false;
if (segmentMetadata == null || segmentMetadata.isDeleted() || !segmentMetadata.isActive()) {
wasRemoved = closeIndex(streamSegmentId, true);
}
if (wasRemoved) {
removed.add(streamSegmentId);
} else {
notRemoved.add(streamSegmentId);
}
}
}
if (notRemoved.size() > 0) {
log.debug("{}: Unable to clean up ReadIndex for Segments {} because no such index exists or the Segments are not deleted.", this.traceObjectId, notRemoved);
}
log.info("{}: Cleaned up ReadIndices for deleted Segments {}.", this.traceObjectId, removed);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testDeleteStreamSegment.
/**
* Tests the ability to delete a StreamSegment from the metadata, as well as any dependent (Transaction) StreamSegments.
*/
@Test
@SuppressWarnings("checkstyle:CyclomaticComplexity")
public void testDeleteStreamSegment() {
final UpdateableContainerMetadata m = new MetadataBuilder(CONTAINER_ID).build();
final int alreadyDeletedTransactionFrequency = 11;
ArrayList<Long> segmentIds = new ArrayList<>();
HashSet<Long> deletedStreamSegmentIds = new HashSet<>();
for (long i = 0; i < SEGMENT_COUNT; i++) {
final long segmentId = segmentIds.size();
segmentIds.add(segmentId);
m.mapStreamSegmentId(getName(segmentId), segmentId);
for (long j = 0; j < TRANSACTIONS_PER_SEGMENT_COUNT; j++) {
final long transactionId = segmentIds.size();
segmentIds.add(transactionId);
val tm = m.mapStreamSegmentId(getName(transactionId), transactionId, segmentId);
if (segmentIds.size() % alreadyDeletedTransactionFrequency == 0) {
// Mark this transaction as already deleted in Storage.
tm.markDeleted();
deletedStreamSegmentIds.add(transactionId);
} else if (segmentIds.size() % alreadyDeletedTransactionFrequency == 1) {
// Decoy: this is merged, but not in Storage.
tm.markMerged();
}
}
}
// By construction (see above, any index i=3n is a parent StreamSegment, and any index i=3n+1 or 3n+2 is a Transaction).
// Let's delete a few parent StreamSegments and verify their Transactions are also deleted.
// Then delete only Transactions, and verify those are the only ones to be deleted.
final int groupSize = TRANSACTIONS_PER_SEGMENT_COUNT + 1;
ArrayList<Integer> streamSegmentsToDelete = new ArrayList<>();
ArrayList<Integer> transactionsToDelete = new ArrayList<>();
for (int i = 0; i < segmentIds.size(); i++) {
if (i < segmentIds.size() / 2) {
// In the first half, we only delete the parents (which will force the Transactions to be deleted too).
if (i % groupSize == 0) {
streamSegmentsToDelete.add(i);
}
} else {
// In the second half, we only delete the first Transaction of any segment.
if (i % groupSize == 1) {
transactionsToDelete.add(i);
}
}
}
// Delete stand-alone StreamSegments (and verify Transactions are also deleted).
for (int index : streamSegmentsToDelete) {
long segmentId = segmentIds.get(index);
String name = m.getStreamSegmentMetadata(segmentId).getName();
Collection<String> expectedDeletedSegmentNames = new ArrayList<>();
expectedDeletedSegmentNames.add(name);
deletedStreamSegmentIds.add(segmentId);
for (int transIndex = 0; transIndex < TRANSACTIONS_PER_SEGMENT_COUNT; transIndex++) {
long transactionId = segmentIds.get(index + transIndex + 1);
if (deletedStreamSegmentIds.add(transactionId)) {
// We only expect a Transaction to be deleted if it hasn't already been deleted.
expectedDeletedSegmentNames.add(m.getStreamSegmentMetadata(transactionId).getName());
}
}
Collection<String> deletedSegmentNames = extract(m.deleteStreamSegment(name), SegmentMetadata::getName);
AssertExtensions.assertContainsSameElements("Unexpected StreamSegments were deleted.", expectedDeletedSegmentNames, deletedSegmentNames);
}
// Delete Transactions.
for (int index : transactionsToDelete) {
long transactionId = segmentIds.get(index);
String name = m.getStreamSegmentMetadata(transactionId).getName();
Collection<String> expectedDeletedSegmentNames = new ArrayList<>();
deletedStreamSegmentIds.add(transactionId);
expectedDeletedSegmentNames.add(name);
Collection<String> deletedSegmentNames = extract(m.deleteStreamSegment(name), SegmentMetadata::getName);
AssertExtensions.assertContainsSameElements("Unexpected StreamSegments were deleted.", expectedDeletedSegmentNames, deletedSegmentNames);
}
// Verify deleted segments have not been actually removed from the metadata.
Collection<Long> metadataSegmentIds = m.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Metadata does not contain the expected Segment Ids", segmentIds, metadataSegmentIds);
// Verify individual StreamSegmentMetadata.
for (long segmentId : segmentIds) {
boolean expectDeleted = deletedStreamSegmentIds.contains(segmentId);
Assert.assertEquals("Unexpected value for isDeleted.", expectDeleted, m.getStreamSegmentMetadata(segmentId).isDeleted());
}
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method checkEvictedSegmentCandidates.
private void checkEvictedSegmentCandidates(Collection<SegmentMetadata> candidates, Map<Long, Long> transactions, UpdateableContainerMetadata metadata, long expirationSeqNo, long truncatedSeqNo) {
long cutoffSeqNo = Math.min(expirationSeqNo, truncatedSeqNo);
HashSet<Long> candidateIds = new HashSet<>();
for (SegmentMetadata candidate : candidates) {
// Check that all segments in candidates are actually eligible for removal.
boolean isEligible = shouldExpectRemoval(candidate.getId(), metadata, transactions, cutoffSeqNo, truncatedSeqNo);
Assert.assertTrue("Unexpected eviction candidate in segment " + candidate.getId(), isEligible);
// Check that all segments in candidates are not actually removed from the metadata.
Assert.assertNotNull("ContainerMetadata no longer has metadata for eviction candidate segment " + candidate.getId(), metadata.getStreamSegmentMetadata(candidate.getId()));
Assert.assertNotEquals("ContainerMetadata no longer has name mapping for eviction candidate segment " + candidate.getId(), ContainerMetadata.NO_STREAM_SEGMENT_ID, metadata.getStreamSegmentId(candidate.getName(), false));
candidateIds.add(candidate.getId());
}
// Check that all segments remaining in the metadata are still eligible to remain there.
for (long segmentId : metadata.getAllStreamSegmentIds()) {
if (!candidateIds.contains(segmentId)) {
boolean expectedRemoved = shouldExpectRemoval(segmentId, metadata, transactions, cutoffSeqNo, truncatedSeqNo);
Assert.assertFalse("Unexpected non-eviction for segment " + segmentId, expectedRemoved);
}
}
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentMapper method getStreamSegmentInfo.
// endregion
// region GetSegmentInfo
/**
* Gets information about a StreamSegment. If the Segment is active, it returns this information directly from the
* in-memory Metadata. If the Segment is not active, it fetches the information from Storage and returns it, without
* activating the segment in the Metadata or otherwise touching the DurableLog.
*
* @param streamSegmentName The case-sensitive StreamSegment Name.
* @param timeout Timeout for the Operation.
* @return A CompletableFuture that, when complete, will contain a SegmentProperties object with the desired
* information. If failed, it will contain the exception that caused the failure.
*/
CompletableFuture<SegmentProperties> getStreamSegmentInfo(String streamSegmentName, Duration timeout) {
long streamSegmentId = this.containerMetadata.getStreamSegmentId(streamSegmentName, true);
CompletableFuture<SegmentProperties> result;
if (isValidStreamSegmentId(streamSegmentId)) {
// Looks like the Segment is active and we have it in our Metadata. Return the result from there.
SegmentMetadata sm = this.containerMetadata.getStreamSegmentMetadata(streamSegmentId);
if (sm.isDeleted()) {
result = Futures.failedFuture(new StreamSegmentNotExistsException(streamSegmentName));
} else {
result = CompletableFuture.completedFuture(sm.getSnapshot());
}
} else {
// The Segment is not yet active.
// First, check to see if we have a pending assignment. If so, piggyback on that.
QueuedCallback<SegmentProperties> queuedCallback = checkConcurrentAssignment(streamSegmentName, id -> CompletableFuture.completedFuture(this.containerMetadata.getStreamSegmentMetadata(id).getSnapshot()));
if (queuedCallback != null) {
result = queuedCallback.result;
} else {
// Not in metadata and no concurrent assignments. Go to Storage and get what's needed.
result = getSegmentInfoFromStorage(streamSegmentName, timeout);
}
}
return result;
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessMetadataOperation.
private void preProcessMetadataOperation(StreamSegmentMapOperation operation) throws ContainerException, StreamSegmentException {
if (operation.isTransaction()) {
// Verify Parent Segment Exists.
SegmentMetadata parentMetadata = getExistingMetadata(operation.getParentStreamSegmentId());
if (parentMetadata == null) {
throw new MetadataUpdateException(this.containerId, String.format("Operation %d wants to map a Segment to a Parent Segment Id that does " + "not exist. Parent SegmentId = %d, Transaction Name = %s.", operation.getSequenceNumber(), operation.getParentStreamSegmentId(), operation.getStreamSegmentName()));
}
}
// Verify that the segment is not already mapped. If it is mapped, then it needs to have the exact same
// segment id as the one the operation is trying to set.
checkExistingMapping(operation);
assignUniqueSegmentId(operation);
}
Aggregations