use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class RollingStorage method serializeHandle.
private void serializeHandle(RollingSegmentHandle handle) throws StreamSegmentException {
ByteArraySegment handleData = HandleSerializer.serialize(handle);
try {
this.headerStorage.write(handle.getHeaderHandle(), 0, handleData.getReader(), handleData.getLength());
handle.setHeaderLength(handleData.getLength());
log.debug("Header for '{}' fully serialized to '{}'.", handle.getSegmentName(), handle.getHeaderHandle().getSegmentName());
} catch (BadOffsetException ex) {
// If we get BadOffsetException when writing the Handle, it means it was modified externally.
throw new StorageNotPrimaryException(handle.getSegmentName(), ex);
}
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class RollingStorage method truncateHandle.
private void truncateHandle(RollingSegmentHandle handle) throws StreamSegmentException {
handle.excludeInexistentChunks();
ByteArraySegment handleData = HandleSerializer.serialize(handle);
this.headerStorage.replace(handle.getHeaderHandle(), handleData);
handle.setHeaderLength(handleData.getLength());
log.debug("Header for '{}' fully serialized (replaced) to '{}'.", handle.getSegmentName(), handle.getHeaderHandle().getSegmentName());
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class KeyValueTableTest method testCreateListKeyValueTable.
/**
* Smoke Test. Verify that the KeyValueTable can be created and listed.
*/
@Test
public void testCreateListKeyValueTable() {
val kvt1 = newKeyValueTableName();
boolean created = this.controller.createKeyValueTable(kvt1.getScope(), kvt1.getKeyValueTableName(), DEFAULT_CONFIG).join();
Assert.assertTrue(created);
val segments = this.controller.getCurrentSegmentsForKeyValueTable(kvt1.getScope(), kvt1.getKeyValueTableName()).join();
Assert.assertEquals(DEFAULT_CONFIG.getPartitionCount(), segments.getSegments().size());
for (val s : segments.getSegments()) {
// We know there's nothing in these segments. But if the segments hadn't been created, then this will throw
// an exception.
this.tableStore.get(s.getKVTScopedName(), Collections.singletonList(new ByteArraySegment(new byte[DEFAULT_CONFIG.getTotalKeyLength()])), TIMEOUT).join();
}
// Verify re-creation does not work.
Assert.assertFalse(this.controller.createKeyValueTable(kvt1.getScope(), kvt1.getKeyValueTableName(), DEFAULT_CONFIG).join());
// Try to create a KVTable with 0 partitions, and it should fail
val kvtZero = newKeyValueTableName();
assertThrows(IllegalArgumentException.class, () -> this.controller.createKeyValueTable(kvtZero.getScope(), kvtZero.getKeyValueTableName(), KeyValueTableConfiguration.builder().partitionCount(0).build()).join());
// Create 2 more KVTables
val kvt2 = newKeyValueTableName();
created = this.controller.createKeyValueTable(kvt2.getScope(), kvt2.getKeyValueTableName(), DEFAULT_CONFIG).join();
Assert.assertTrue(created);
val kvt3 = newKeyValueTableName();
created = this.controller.createKeyValueTable(kvt3.getScope(), kvt3.getKeyValueTableName(), DEFAULT_CONFIG).join();
Assert.assertTrue(created);
// Check list tables...
AsyncIterator<KeyValueTableInfo> kvTablesIterator = this.controller.listKeyValueTables(SCOPE);
Iterator<KeyValueTableInfo> iter = kvTablesIterator.asIterator();
Map<String, Integer> countMap = new HashMap<String, Integer>(3);
while (iter.hasNext()) {
KeyValueTableInfo kvtInfo = iter.next();
if (kvtInfo.getScope().equals(SCOPE)) {
if (countMap.containsKey(kvtInfo.getKeyValueTableName())) {
Integer newCount = Integer.valueOf(countMap.get(kvtInfo.getKeyValueTableName()).intValue() + 1);
countMap.put(iter.next().getKeyValueTableName(), newCount);
} else {
countMap.put(kvtInfo.getKeyValueTableName(), 1);
}
}
}
Assert.assertEquals(3, countMap.size());
Assert.assertEquals(1, countMap.get(kvt1.getKeyValueTableName()).intValue());
Assert.assertEquals(1, countMap.get(kvt2.getKeyValueTableName()).intValue());
Assert.assertEquals(1, countMap.get(kvt3.getKeyValueTableName()).intValue());
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class NoOpCacheTests method testFunctionality.
@Test
public void testFunctionality() {
@Cleanup val c = new NoOpCache();
Assert.assertEquals(4096, c.getBlockAlignment());
Assert.assertEquals(CacheLayout.MAX_ENTRY_SIZE, c.getMaxEntryLength());
val a = c.insert(new ByteArraySegment(new byte[1]));
val b = c.replace(a, new ByteArraySegment(new byte[2]));
c.append(a, 1, new ByteArraySegment(new byte[1]));
Assert.assertEquals(c.getBlockAlignment() - 1, c.getAppendableLength(1));
c.delete(b);
Assert.assertNull(c.get(a));
val s = c.getState();
Assert.assertEquals(0, s.getStoredBytes() + s.getUsedBytes() + s.getReservedBytes() + s.getAllocatedBytes());
Assert.assertEquals(CacheLayout.MAX_TOTAL_SIZE, s.getMaxBytes());
}
use of io.pravega.common.util.ByteArraySegment in project pravega by pravega.
the class ContainerReadIndexTests method testConcurrentReadTransactionStorageReadCacheFull.
/**
* Tests the following scenario:
* 1. Segment B has been merged into A
* 2. We are executing a read on Segment A over a portion where B was merged into A.
* 3. Concurrently with 2, a read on Segment B that went to LTS (possibly from the same result as before) wants to
* insert into the Cache, but the cache is full. The Cache Manager would want to clean up the cache.
* <p>
* We want to ensure that there is no deadlock for this scenario.
*/
@Test
public void testConcurrentReadTransactionStorageReadCacheFull() throws Exception {
// Must equal Cache Block size for easy eviction.
val appendLength = 4 * 1024;
val maxCacheSize = 2 * 1024 * 1024;
// We set the policy's max size to a much higher value to avoid entering "essential-only" state.
CachePolicy cachePolicy = new CachePolicy(2 * maxCacheSize, Duration.ZERO, Duration.ofMillis(1));
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG, cachePolicy, maxCacheSize);
val rnd = new Random(0);
// Create parent segment and one transaction
long targetId = createSegment(0, context);
long sourceId = createTransaction(1, context);
val targetMetadata = context.metadata.getStreamSegmentMetadata(targetId);
val sourceMetadata = context.metadata.getStreamSegmentMetadata(sourceId);
createSegmentsInStorage(context);
// Write something to the transaction; and immediately evict it.
val append1 = new byte[appendLength];
val append2 = new byte[appendLength];
rnd.nextBytes(append1);
rnd.nextBytes(append2);
val allData = BufferView.builder().add(new ByteArraySegment(append1)).add(new ByteArraySegment(append2)).build();
appendSingleWrite(sourceId, new ByteArraySegment(append1), context);
sourceMetadata.setStorageLength(sourceMetadata.getLength());
// Increment the generation.
context.cacheManager.applyCachePolicy();
// Write a second thing to the transaction, and do not evict it.
appendSingleWrite(sourceId, new ByteArraySegment(append2), context);
context.storage.openWrite(sourceMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, allData.getReader(), allData.getLength(), TIMEOUT)).join();
// Seal & Begin-merge the transaction (do not seal in storage).
sourceMetadata.markSealed();
targetMetadata.setLength(sourceMetadata.getLength());
context.readIndex.beginMerge(targetId, 0L, sourceId);
sourceMetadata.markMerged();
sourceMetadata.markDeleted();
// At this point, the first append in the transaction should be evicted, while the second one should still be there.
@Cleanup val rr = context.readIndex.read(targetId, 0, (int) targetMetadata.getLength(), TIMEOUT);
@Cleanup val cacheCleanup = new AutoCloseObject();
@Cleanup("release") val insertingInCache = new ReusableLatch();
@Cleanup("release") val finishInsertingInCache = new ReusableLatch();
context.cacheStorage.beforeInsert = () -> {
// Prevent a stack overflow.
context.cacheStorage.beforeInsert = null;
// Fill up the cache with garbage - this will cause an unrecoverable Cache Full event (which is what we want).
int toFill = (int) (context.cacheStorage.getState().getMaxBytes() - context.cacheStorage.getState().getUsedBytes());
int address = context.cacheStorage.insert(new ByteArraySegment(new byte[toFill]));
cacheCleanup.onClose = () -> context.cacheStorage.delete(address);
// Notify that we have inserted.
insertingInCache.release();
// Block (while holding locks) until notified.
Exceptions.handleInterrupted(finishInsertingInCache::await);
};
// Begin a read process.
// First read must be a storage read.
val storageRead = rr.next();
Assert.assertEquals(ReadResultEntryType.Storage, storageRead.getType());
storageRead.requestContent(TIMEOUT);
// Copy contents out; this is not affected by our cache insert block.
byte[] readData1 = storageRead.getContent().join().slice(0, appendLength).getCopy();
// Wait for the insert callback to be blocked on our latch.
insertingInCache.await();
// Continue with the read. We are now expecting a Cache Read. Do it asynchronously (new thread).
val cacheReadFuture = CompletableFuture.supplyAsync(rr::next, executorService());
// Notify the cache insert that it's time to release now.
finishInsertingInCache.release();
// Wait for the async read to finish and grab its contents.
val cacheRead = cacheReadFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals(ReadResultEntryType.Cache, cacheRead.getType());
byte[] readData2 = cacheRead.getContent().join().slice(0, appendLength).getCopy();
// Validate data was read correctly.
val readData = BufferView.builder().add(new ByteArraySegment(readData1)).add(new ByteArraySegment(readData2)).build();
Assert.assertEquals("Unexpected data written.", allData, readData);
}
Aggregations