use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class TableEntryDeltaIterator method parseEntries.
@SneakyThrows(IOException.class)
private List<Map.Entry<DeltaIteratorState, TableEntry>> parseEntries(BufferView data, long startOffset, int readLength) {
long currentOffset = startOffset;
final long maxOffset = startOffset + readLength;
BufferView.Reader input = data.getBufferViewReader();
List<Map.Entry<DeltaIteratorState, TableEntry>> entries = new ArrayList<>();
try {
while (currentOffset < maxOffset) {
val entry = AsyncTableEntryReader.readEntryComponents(input, currentOffset, this.entrySerializer);
boolean reachedEnd = currentOffset + entry.getHeader().getTotalLength() >= this.maxBytesToRead + startOffset;
// We must preserve deletions to accurately construct a delta.
BufferView value = entry.getValue() == null ? BufferView.empty() : entry.getValue();
currentOffset += entry.getHeader().getTotalLength();
entries.add(new AbstractMap.SimpleEntry<>(new DeltaIteratorState(currentOffset, reachedEnd, this.shouldClear, entry.getHeader().isDeletion()), TableEntry.versioned(entry.getKey(), value, entry.getVersion())));
}
} catch (BufferView.Reader.OutOfBoundsException ex) {
// Handles the event that our computed maxOffset lies within (but not on the boundary) of a TableEntry, or
// reaches the end the TableSegment. Silently handling this exception is sufficient because it acknowledges
// that we have processed the maximal set of TableEntries and thus is safe to return.
}
this.currentBatchOffset = currentOffset;
return entries;
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class StreamSegmentContainerTests method testForceFlush.
/**
* Tests the {@link SegmentContainer#flushToStorage} method.
*/
@Test
public void testForceFlush() throws Exception {
final AttributeId attributeReplace = AttributeId.randomUUID();
final long expectedAttributeValue = APPENDS_PER_SEGMENT + ATTRIBUTE_UPDATES_PER_SEGMENT;
final int entriesPerSegment = 10;
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, NO_TRUNCATIONS_DURABLE_LOG_CONFIG, INFREQUENT_FLUSH_WRITER_CONFIG, null);
val durableLog = new AtomicReference<OperationLog>();
val durableLogFactory = new WatchableOperationLogFactory(context.operationLogFactory, durableLog::set);
@Cleanup val container = new StreamSegmentContainer(CONTAINER_ID, DEFAULT_CONFIG, durableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
container.startAsync().awaitRunning();
Assert.assertNotNull(durableLog.get());
val tableStore = container.getExtension(ContainerTableExtension.class);
// 1. Create the StreamSegments and Table Segments.
ArrayList<String> segmentNames = new ArrayList<>();
ArrayList<String> tableSegmentNames = new ArrayList<>();
ArrayList<CompletableFuture<Void>> opFutures = new ArrayList<>();
for (int i = 0; i < SEGMENT_COUNT; i++) {
String segmentName = getSegmentName(i);
segmentNames.add(segmentName);
opFutures.add(container.createStreamSegment(segmentName, getSegmentType(segmentName), null, TIMEOUT));
}
for (int i = 0; i < SEGMENT_COUNT; i++) {
String segmentName = getSegmentName(i) + "_Table";
tableSegmentNames.add(segmentName);
val type = SegmentType.builder(getSegmentType(segmentName)).tableSegment().build();
opFutures.add(tableStore.createSegment(segmentName, type, TIMEOUT));
}
// 1.1 Wait for all segments to be created prior to using them.
Futures.allOf(opFutures).join();
opFutures.clear();
// 2. Add some appends and update some of the attributes.
HashMap<String, Long> lengths = new HashMap<>();
HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
for (String segmentName : segmentNames) {
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
val attributeUpdates = AttributeUpdateCollection.from(new AttributeUpdate(attributeReplace, AttributeUpdateType.Replace, i + 1));
val appendData = getAppendData(segmentName, i);
long expectedLength = lengths.getOrDefault(segmentName, 0L) + appendData.getLength();
val append = (i % 2 == 0) ? container.append(segmentName, appendData, attributeUpdates, TIMEOUT) : container.append(segmentName, lengths.get(segmentName), appendData, attributeUpdates, TIMEOUT);
opFutures.add(Futures.toVoid(append));
lengths.put(segmentName, expectedLength);
recordAppend(segmentName, appendData, segmentContents, null);
}
for (int i = 0; i < ATTRIBUTE_UPDATES_PER_SEGMENT; i++) {
val attributeUpdates = AttributeUpdateCollection.from(new AttributeUpdate(attributeReplace, AttributeUpdateType.Replace, APPENDS_PER_SEGMENT + i + 1));
opFutures.add(container.updateAttributes(segmentName, attributeUpdates, TIMEOUT));
}
}
// 2.2 Add some entries to the table segments.
final BiFunction<String, Integer, TableEntry> createTableEntry = (segmentName, entryId) -> TableEntry.unversioned(new ByteArraySegment(String.format("Key_%s_%s", segmentName, entryId).getBytes()), new ByteArraySegment(String.format("Value_%s_%s", segmentName, entryId).getBytes()));
for (String segmentName : tableSegmentNames) {
for (int i = 0; i < entriesPerSegment; i++) {
opFutures.add(Futures.toVoid(tableStore.put(segmentName, Collections.singletonList(createTableEntry.apply(segmentName, i)), TIMEOUT)));
}
}
Futures.allOf(opFutures).join();
// 3. Instead of waiting for the Writer to move data to Storage, we invoke the flushToStorage to verify that all
// operations have been applied to Storage.
val forceFlush = container.flushToStorage(TIMEOUT);
forceFlush.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
checkStorage(segmentContents, lengths, container, context.storage);
// 4. Truncate all the data in the DurableLog and immediately shut down the container.
val truncateSeqNo = container.metadata.getClosestValidTruncationPoint(container.metadata.getOperationSequenceNumber());
durableLog.get().truncate(truncateSeqNo, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
container.close();
// 5. Create a new container instance (from the nearly empty DurableLog) and with an empty cache.
@Cleanup val container2 = new StreamSegmentContainer(CONTAINER_ID, DEFAULT_CONFIG, durableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
container2.startAsync().awaitRunning();
// 5.1 Verify Segment Data.
for (val sc : segmentContents.entrySet()) {
// Contents.
byte[] expectedData = sc.getValue().toByteArray();
byte[] actualData = new byte[expectedData.length];
container2.read(sc.getKey(), 0, actualData.length, TIMEOUT).join().readRemaining(actualData, TIMEOUT);
Assert.assertArrayEquals("Unexpected contents for " + sc.getKey(), expectedData, actualData);
// Length.
val si = container2.getStreamSegmentInfo(sc.getKey(), TIMEOUT).join();
Assert.assertEquals("Unexpected length for " + sc.getKey(), expectedData.length, si.getLength());
// Attributes.
val attributes = container2.getAttributes(sc.getKey(), Collections.singleton(attributeReplace), false, TIMEOUT).join();
Assert.assertEquals("Unexpected attribute for " + sc.getKey(), expectedAttributeValue, (long) attributes.get(attributeReplace));
}
// 5.2 Verify table segment data.
val tableStore2 = container2.getExtension(ContainerTableExtension.class);
for (String segmentName : tableSegmentNames) {
for (int i = 0; i < entriesPerSegment; i++) {
val expected = createTableEntry.apply(segmentName, i);
val actual = tableStore2.get(segmentName, Collections.singletonList(expected.getKey().getKey()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(0);
Assert.assertTrue("Unexpected Table Entry for " + segmentName + " at position " + i, expected.getKey().getKey().equals(actual.getKey().getKey()) && expected.getValue().equals(actual.getValue()));
}
}
// Ending Note: if all the above tests passed, we have implicitly validated that the Container Metadata Segment has also
// been properly flushed.
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class TableSegmentLayoutTestBase method checkIterators.
@SneakyThrows
protected void checkIterators(Map<BufferView, BufferView> expectedEntries, ContainerTableExtension ext) {
val emptyIteratorArgs = createEmptyIteratorArgs();
// Check that invalid serializer state is handled properly.
val emptyEntryIterator = ext.entryIterator(SEGMENT_NAME, emptyIteratorArgs).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val actualEmptyEntries = collectIteratorItems(emptyEntryIterator);
Assert.assertEquals("Unexpected entries returned.", 0, actualEmptyEntries.size());
val iteratorArgs = IteratorArgs.builder().fetchTimeout(TIMEOUT).build();
// Collect and verify all Table Entries.
val entryIterator = ext.entryIterator(SEGMENT_NAME, iteratorArgs).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val actualEntries = collectIteratorItems(entryIterator);
// When we check Entry Iterator, we order by Version and verify that versions match. Entry Iterators also return
// versions so it's important that we check those.
actualEntries.sort(Comparator.comparingLong(e -> e.getKey().getVersion()));
// Get the existing keys. We will use this to check Key Versions.
val existingEntries = ext.get(SEGMENT_NAME, new ArrayList<>(expectedEntries.keySet()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
existingEntries.sort(Comparator.comparingLong(e -> e.getKey().getVersion()));
AssertExtensions.assertListEquals("Unexpected Table Entries from entryIterator().", existingEntries, actualEntries, TableEntry::equals);
// Collect and verify all Table Keys. We now need to sort by Key, as Key Iterators do not return Version.
val c = BufferViewComparator.create();
val existingKeys = existingEntries.stream().sorted((e1, e2) -> c.compare(e1.getKey().getKey(), e2.getKey().getKey())).map(TableEntry::getKey).collect(Collectors.toList());
val keyIterator = ext.keyIterator(SEGMENT_NAME, iteratorArgs).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val actualKeys = collectIteratorItems(keyIterator);
actualKeys.sort((e1, e2) -> c.compare(e1.getKey(), e2.getKey()));
AssertExtensions.assertListEquals("Unexpected Table Keys from keyIterator().", existingKeys, actualKeys, (k1, k2) -> k1.getKey().equals(k2.getKey()));
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class TableServiceTests method generateUpdates.
private HashMap<String, ArrayList<TableEntry>> generateUpdates(HashMap<BufferView, EntryData> keyInfo, boolean conditional, Random rnd) {
val result = new HashMap<String, ArrayList<TableEntry>>();
for (val e : keyInfo.entrySet()) {
val ed = e.getValue();
val newValue = generateValue(rnd);
TableEntry te = conditional ? TableEntry.versioned(e.getKey(), newValue, ed.getVersion()) : TableEntry.unversioned(e.getKey(), newValue);
val segmentUpdate = result.computeIfAbsent(ed.segmentName, ignored -> new ArrayList<>());
segmentUpdate.add(te);
}
return result;
}
use of io.pravega.segmentstore.contracts.tables.TableEntry in project pravega by pravega.
the class TableServiceTests method processDeltaIteratorItems.
private List<TableEntry> processDeltaIteratorItems(List<IteratorItem<TableEntry>> entries) {
Map<BufferView, TableEntry> result = new HashMap<>();
for (val item : entries) {
TableEntry entry = item.getEntries().iterator().next();
DeltaIteratorState state = DeltaIteratorState.deserialize(item.getState());
if (state.isDeletionRecord() && result.containsKey(entry.getKey().getKey())) {
result.remove(entry.getKey().getKey());
} else {
result.compute(entry.getKey().getKey(), (key, value) -> {
if (value == null) {
return entry;
} else {
return value.getKey().getVersion() < entry.getKey().getVersion() ? entry : value;
}
});
}
}
return new ArrayList<>(result.values());
}
Aggregations