use of io.pravega.segmentstore.contracts.AttributeUpdate in project pravega by pravega.
the class ContainerRecoveryUtils method updateCoreAttributes.
/**
* Updates Core Attributes for all Segments for the given Containers.
* This method iterates through all the back copies of Container Metadata Segments, interprets all entries as
* Segment-SegmentInfo mappings and extracts the Core Attributes for each. These Core Attributes are then applied
* to the same segments in the given Containers.
* @param backUpMetadataSegments A map of back copies of metadata segments along with their container Ids.
* @param containersMap A map of {@link DebugStreamSegmentContainer} instances with their container Ids.
* @param executorService A thread pool for execution.
* @param timeout Timeout for the operation.
* @throws InterruptedException If the operation was interrupted while waiting.
* @throws TimeoutException If the timeout expired prior to being able to complete update attributes for all segments.
* @throws ExecutionException When execution of update attributes to all segments encountered an error.
*/
public static void updateCoreAttributes(Map<Integer, String> backUpMetadataSegments, Map<Integer, DebugStreamSegmentContainer> containersMap, ExecutorService executorService, Duration timeout) throws InterruptedException, ExecutionException, TimeoutException {
Preconditions.checkState(backUpMetadataSegments.size() == containersMap.size(), "The number of " + "back-up metadata segments = %s and the number of containers = %s should match.", backUpMetadataSegments.size(), containersMap.size());
val args = IteratorArgs.builder().fetchTimeout(timeout).build();
SegmentToContainerMapper segToConMapper = new SegmentToContainerMapper(containersMap.size(), true);
// Iterate through all back up metadata segments
for (val backUpMetadataSegmentEntry : backUpMetadataSegments.entrySet()) {
// Get the name of original metadata segment
val metadataSegment = NameUtils.getMetadataSegmentName(backUpMetadataSegmentEntry.getKey());
// Get the name of back up metadata segment
val backUpMetadataSegment = backUpMetadataSegmentEntry.getValue();
// Get the container for back up metadata segment
val containerForBackUpMetadataSegment = containersMap.get(segToConMapper.getContainerId(backUpMetadataSegment));
log.info("Back up container metadata segment name: {} and its container id: {}", backUpMetadataSegment, containerForBackUpMetadataSegment.getId());
// Get the container for segments inside back up metadata segment
val container = containersMap.get(backUpMetadataSegmentEntry.getKey());
// Make sure the backup segment is registered as a table segment.
val bmsInfo = containerForBackUpMetadataSegment.getStreamSegmentInfo(backUpMetadataSegment, timeout).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
if (bmsInfo.getAttributes().getOrDefault(TableAttributes.INDEX_OFFSET, Attributes.NULL_ATTRIBUTE_VALUE) == Attributes.NULL_ATTRIBUTE_VALUE) {
log.info("Back up container metadata segment name: {} does not have INDEX_OFFSET set; setting to 0 (forcing reindexing).", backUpMetadataSegment);
containerForBackUpMetadataSegment.forSegment(backUpMetadataSegment, timeout).thenCompose(s -> s.updateAttributes(AttributeUpdateCollection.from(new AttributeUpdate(TableAttributes.INDEX_OFFSET, AttributeUpdateType.Replace, 0)), timeout)).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
refreshDerivedProperties(backUpMetadataSegment, containerForBackUpMetadataSegment);
}
// Get the iterator to iterate through all segments in the back up metadata segment
val tableExtension = containerForBackUpMetadataSegment.getExtension(ContainerTableExtension.class);
val entryIterator = tableExtension.entryIterator(backUpMetadataSegment, args).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
val futures = new ArrayList<CompletableFuture<Void>>();
// Iterating through all segments in the back up metadata segment
entryIterator.forEachRemaining(item -> {
for (val entry : item.getEntries()) {
val segmentInfo = MetadataStore.SegmentInfo.deserialize(entry.getValue());
val properties = segmentInfo.getProperties();
// skip, if this is original metadata segment
if (properties.getName().equals(metadataSegment)) {
continue;
}
// Get the attributes for the current segment
val attributeUpdates = properties.getAttributes().entrySet().stream().map(e -> new AttributeUpdate(e.getKey(), AttributeUpdateType.Replace, e.getValue())).collect(Collectors.toCollection(AttributeUpdateCollection::new));
log.info("Segment Name: {} Attributes Updates: {}", properties.getName(), attributeUpdates);
// Update attributes for the current segment
futures.add(Futures.exceptionallyExpecting(container.updateAttributes(properties.getName(), attributeUpdates, timeout).thenRun(() -> refreshDerivedProperties(properties.getName(), container)), ex -> ex instanceof StreamSegmentNotExistsException, null));
}
}, executorService).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
// Waiting for update attributes for all segments in each back up metadata segment.
Futures.allOf(futures).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
}
}
use of io.pravega.segmentstore.contracts.AttributeUpdate in project pravega by pravega.
the class MetadataStore method createTransientSegment.
/**
* Creates a new Transient Segment with given name.
*
* @param segmentName The case-sensitive Segment Name.
* @param attributes The initial attributes for the StreamSegment, if any.
* @param timeout Timeout for the operation.
* @return A CompletableFuture that, when completed normally, will indicate the TransientSegment has been created.
*/
private CompletableFuture<Void> createTransientSegment(String segmentName, Collection<AttributeUpdate> attributes, Duration timeout) {
AttributeUpdateCollection attrs = AttributeUpdateCollection.from(attributes);
attrs.add(new AttributeUpdate(Attributes.CREATION_EPOCH, AttributeUpdateType.None, this.connector.containerMetadata.getContainerEpoch()));
return Futures.toVoid(submitAssignmentWithRetry(newSegment(segmentName, SegmentType.TRANSIENT_SEGMENT, attrs), timeout));
}
use of io.pravega.segmentstore.contracts.AttributeUpdate in project pravega by pravega.
the class IndexWriter method generateBucketDelete.
/**
* Generates one or more {@link AttributeUpdate}s that will delete a {@link TableBucket}.
*
* @param bucket The {@link TableBucket} to delete.
* @param update A {@link UpdateInstructions} object to collect updates into.
*/
private void generateBucketDelete(TableBucket bucket, UpdateInstructions update) {
if (bucket.exists()) {
update.withAttribute(new AttributeUpdate(AttributeId.fromUUID(bucket.getHash()), AttributeUpdateType.Replace, Attributes.NULL_ATTRIBUTE_VALUE));
update.bucketRemoved();
}
}
use of io.pravega.segmentstore.contracts.AttributeUpdate in project pravega by pravega.
the class IndexWriter method generateTableAttributeUpdates.
/**
* Generates conditional {@link AttributeUpdate}s that update the values for Core Attributes representing the indexing
* state of the Table Segment.
*
* @param currentOffset The offset from which this indexing batch began. This will be checked against
* {@link TableAttributes#INDEX_OFFSET}.
* @param newOffset The new offset to set for {@link TableAttributes#INDEX_OFFSET}.
* @param processedCount The total number of Table Entry updates processed (including overwritten ones).
* @param update A {@link UpdateInstructions} object to collect updates into.
*/
private void generateTableAttributeUpdates(long currentOffset, long newOffset, int processedCount, UpdateInstructions update) {
// Add an Update for the INDEX_OFFSET to indicate we have indexed everything up to this offset.
Preconditions.checkArgument(currentOffset <= newOffset, "newOffset must be larger than existingOffset");
update.withAttribute(new AttributeUpdate(TableAttributes.INDEX_OFFSET, AttributeUpdateType.ReplaceIfEquals, newOffset, currentOffset));
// Update Bucket and Entry counts.
if (update.getEntryCountDelta() != 0) {
update.withAttribute(new AttributeUpdate(TableAttributes.ENTRY_COUNT, AttributeUpdateType.Accumulate, update.getEntryCountDelta()));
}
if (update.getBucketCountDelta() != 0) {
update.withAttribute(new AttributeUpdate(TableAttributes.BUCKET_COUNT, AttributeUpdateType.Accumulate, update.getBucketCountDelta()));
}
if (processedCount > 0) {
update.withAttribute(new AttributeUpdate(TableAttributes.TOTAL_ENTRY_COUNT, AttributeUpdateType.Accumulate, processedCount));
}
}
use of io.pravega.segmentstore.contracts.AttributeUpdate in project pravega by pravega.
the class StreamSegmentContainerTests method testBasicConditionalMergeScenarios.
/**
* Test in detail the basic situations that a conditional segment merge can face.
*/
@Test
public void testBasicConditionalMergeScenarios() throws Exception {
@Cleanup TestContext context = createContext();
context.container.startAsync().awaitRunning();
final String parentSegment = "parentSegment";
// This will be the attribute update to execute against the parent segment.
Function<String, AttributeUpdateCollection> attributeUpdateForTxn = txnName -> AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes())), AttributeUpdateType.ReplaceIfEquals, txnName.hashCode() + 1, txnName.hashCode()));
Function<String, Long> getAttributeValue = txnName -> {
AttributeId attributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
return context.container.getAttributes(parentSegment, Collections.singletonList(attributeId), true, TIMEOUT).join().get(attributeId);
};
// Create a parent Segment.
context.container.createStreamSegment(parentSegment, getSegmentType(parentSegment), null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentType segmentType = getSegmentType(parentSegment);
// Case 1: Create and empty transaction that fails to merge conditionally due to bad attributes.
String txnName = NameUtils.getTransactionNameFromId(parentSegment, UUID.randomUUID());
AttributeId txnAttributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
context.container.createStreamSegment(txnName, segmentType, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
AttributeUpdateCollection attributeUpdates = attributeUpdateForTxn.apply(txnName);
AssertExtensions.assertFutureThrows("Transaction was expected to fail on attribute update", context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT), ex -> ex instanceof BadAttributeUpdateException);
Assert.assertEquals(Attributes.NULL_ATTRIBUTE_VALUE, (long) getAttributeValue.apply(txnName));
// Case 2: Now, we prepare the attributes in the parent segment so the merge of the empty transaction succeeds.
context.container.updateAttributes(parentSegment, AttributeUpdateCollection.from(new AttributeUpdate(txnAttributeId, AttributeUpdateType.Replace, txnName.hashCode())), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// As the source segment is empty, the amount of merged data should be 0.
Assert.assertEquals(0L, context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).getMergedDataLength());
// But the attribute related to that transaction merge on the parent segment should have been updated.
Assert.assertEquals(txnName.hashCode() + 1L, (long) context.container.getAttributes(parentSegment, Collections.singletonList(txnAttributeId), true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(txnAttributeId));
// Case 3: Create a non-empty transaction that should fail due to a conditional attribute update failure.
txnName = NameUtils.getTransactionNameFromId(parentSegment, UUID.randomUUID());
txnAttributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
attributeUpdates = attributeUpdateForTxn.apply(txnName);
context.container.createStreamSegment(txnName, segmentType, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Add some appends to the transaction.
RefCountByteArraySegment appendData = getAppendData(txnName, 1);
context.container.append(txnName, appendData, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Attempt the conditional merge.
AssertExtensions.assertFutureThrows("Transaction was expected to fail on attribute update", context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT), ex -> ex instanceof BadAttributeUpdateException);
Assert.assertEquals(Attributes.NULL_ATTRIBUTE_VALUE, (long) getAttributeValue.apply(txnName));
// Case 4: Now, we prepare the attributes in the parent segment so the merge of the non-empty transaction succeeds.
context.container.updateAttributes(parentSegment, AttributeUpdateCollection.from(new AttributeUpdate(txnAttributeId, AttributeUpdateType.Replace, txnName.hashCode())), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// As the source segment is non-empty, the amount of merged data should be greater than 0.
Assert.assertTrue(context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).getMergedDataLength() > 0);
// The attribute related to that transaction merge on the parent segment should have been updated as well.
Assert.assertEquals(txnName.hashCode() + 1L, (long) context.container.getAttributes(parentSegment, Collections.singletonList(txnAttributeId), true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(txnAttributeId));
context.container.stopAsync().awaitTerminated();
}
Aggregations