use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentMapperTests method testGetOrAssignStreamSegmentId.
/**
* Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
* retrieving existing attributes.
*/
@Test
public void testGetOrAssignStreamSegmentId() {
final long minSegmentLength = 1;
final int segmentCount = 10;
final int transactionsPerSegment = 5;
final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId : currentSegmentId.get();
Function<String, Long> getSegmentLength = segmentName -> minSegmentLength + (long) MathHelpers.abs(segmentName.hashCode());
Function<String, Long> getSegmentStartOffset = segmentName -> getSegmentLength.apply(segmentName) / 2;
@Cleanup TestContext context = new TestContext();
HashSet<String> storageSegments = new HashSet<>();
for (int i = 0; i < segmentCount; i++) {
String segmentName = getName(i);
storageSegments.add(segmentName);
setSavedState(segmentName, nextSegmentId.get(), getSegmentStartOffset.apply(segmentName), storageSegments.size() % ATTRIBUTE_COUNT, context);
for (int j = 0; j < transactionsPerSegment; j++) {
// There is a small chance of a name conflict here, but we don't care. As long as we get at least one
// Transaction per segment, we should be fine.
String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID());
storageSegments.add(transactionName);
setSavedState(transactionName, nextSegmentId.get(), getSegmentStartOffset.apply(transactionName), storageSegments.size() % ATTRIBUTE_COUNT, context);
}
}
// We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
setupOperationLog(context);
Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
setupStorageGetHandler(context, storageSegments, segmentName -> StreamSegmentInformation.builder().name(segmentName).length(getSegmentLength.apply(segmentName)).sealed(isSealed.test(segmentName)).build());
// First, map all the parents (stand-alone segments).
for (String name : storageSegments) {
if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for StreamSegment " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for StreamSegment " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
long expectedStartOffset = segmentState == null ? 0 : segmentState.getStartOffset();
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, expectedStartOffset, sm.getStartOffset());
}
}
// Now, map all the Transactions.
for (String name : storageSegments) {
String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
if (parentName != null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for Transaction " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for Transaction " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);
// For transactions we do not expect to see any non-zero start offsets.
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, 0, sm.getStartOffset());
// Check parenthood.
Assert.assertNotEquals("No parent defined in metadata for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
long parentId = context.metadata.getStreamSegmentId(parentName, false);
Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId, sm.getParentId());
}
}
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class MetadataCleaner method runOnceInternal.
private CompletableFuture<Void> runOnceInternal() {
long lastSeqNo = this.lastIterationSequenceNumber.getAndSet(this.metadata.getOperationSequenceNumber());
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "metadataCleanup", lastSeqNo);
// Get candidates.
Collection<SegmentMetadata> cleanupCandidates = this.metadata.getEvictionCandidates(lastSeqNo, this.config.getMaxConcurrentSegmentEvictionCount());
// Serialize only those segments that are still alive (not deleted or merged - those will get removed anyway).
val cleanupTasks = cleanupCandidates.stream().filter(sm -> !sm.isDeleted() || !sm.isMerged()).map(sm -> this.stateStore.put(sm.getName(), new SegmentState(sm.getId(), sm), this.config.getSegmentMetadataExpiration())).collect(Collectors.toList());
return Futures.allOf(cleanupTasks).thenRunAsync(() -> {
Collection<SegmentMetadata> evictedSegments = this.metadata.cleanup(cleanupCandidates, lastSeqNo);
this.cleanupCallback.accept(evictedSegments);
LoggerHelpers.traceLeave(log, this.traceObjectId, "metadataCleanup", traceId, evictedSegments.size());
}, this.executor);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainer method mergeTransaction.
@Override
public CompletableFuture<Void> mergeTransaction(String transactionName, Duration timeout) {
ensureRunning();
logRequest("mergeTransaction", transactionName);
this.metrics.mergeTxn();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.segmentMapper.getOrAssignStreamSegmentId(transactionName, timer.getRemaining(), transactionId -> {
SegmentMetadata transactionMetadata = this.metadata.getStreamSegmentMetadata(transactionId);
if (transactionMetadata == null) {
throw new CompletionException(new StreamSegmentNotExistsException(transactionName));
}
Operation op = new MergeTransactionOperation(transactionMetadata.getParentId(), transactionMetadata.getId());
return this.durableLog.add(op, timer.getRemaining());
}).thenComposeAsync(v -> this.stateStore.remove(transactionName, timer.getRemaining()), this.executor);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentMapper method submitToOperationLog.
/**
* Submits a StreamSegmentMapOperation to the OperationLog. Upon completion, this operation
* will have mapped the given Segment to a new internal Segment Id if none was provided in the given SegmentInfo.
* If the given SegmentInfo already has a SegmentId set, then all efforts will be made to map that Segment with the
* requested Segment Id.
*
* @param segmentInfo The SegmentInfo for the StreamSegment to generate and persist.
* @param parentStreamSegmentId If different from ContainerMetadata.NO_STREAM_SEGMENT_ID, the given streamSegmentInfo
* will be mapped as a transaction. Otherwise, this will be registered as a standalone StreamSegment.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed, will contain the internal SegmentId that was assigned (or the
* one supplied via SegmentInfo, if any). If the operation failed, then this Future will complete with that exception.
*/
private CompletableFuture<Long> submitToOperationLog(SegmentInfo segmentInfo, long parentStreamSegmentId, Duration timeout) {
SegmentProperties properties = segmentInfo.getProperties();
if (properties.isDeleted()) {
// Stream does not exist. Fail the request with the appropriate exception.
failAssignment(properties.getName(), new StreamSegmentNotExistsException("StreamSegment does not exist."));
return Futures.failedFuture(new StreamSegmentNotExistsException(properties.getName()));
}
long existingSegmentId = this.containerMetadata.getStreamSegmentId(properties.getName(), true);
if (isValidStreamSegmentId(existingSegmentId)) {
// Looks like someone else beat us to it.
completeAssignment(properties.getName(), existingSegmentId);
return CompletableFuture.completedFuture(existingSegmentId);
} else {
StreamSegmentMapOperation op;
if (isValidStreamSegmentId(parentStreamSegmentId)) {
// Transaction.
SegmentMetadata parentMetadata = this.containerMetadata.getStreamSegmentMetadata(parentStreamSegmentId);
assert parentMetadata != null : "parentMetadata is null";
op = new StreamSegmentMapOperation(parentStreamSegmentId, properties);
} else {
// Standalone StreamSegment.
op = new StreamSegmentMapOperation(properties);
}
if (segmentInfo.getSegmentId() != ContainerMetadata.NO_STREAM_SEGMENT_ID) {
op.setStreamSegmentId(segmentInfo.getSegmentId());
}
return this.durableLog.add(op, timeout).thenApply(seqNo -> completeAssignment(properties.getName(), op.getStreamSegmentId()));
}
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method copySegmentMetadata.
private void copySegmentMetadata(Collection<UpdateableSegmentMetadata> newSegments, Predicate<SegmentMetadata> filter, UpdateableContainerMetadata target) {
for (SegmentMetadata newMetadata : newSegments) {
if (!filter.test(newMetadata)) {
continue;
}
UpdateableSegmentMetadata existingMetadata;
if (newMetadata.isTransaction()) {
existingMetadata = target.mapStreamSegmentId(newMetadata.getName(), newMetadata.getId(), newMetadata.getParentId());
} else {
existingMetadata = target.mapStreamSegmentId(newMetadata.getName(), newMetadata.getId());
}
// Update real metadata with all the information from the new metadata.
existingMetadata.copyFrom(newMetadata);
}
}
Aggregations