use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class LargeRecordHandlerITCase method testRecordHandlerCompositeKey.
@Test
public void testRecordHandlerCompositeKey() {
final int PAGE_SIZE = 4 * 1024;
final int NUM_PAGES = 1000;
final int NUM_RECORDS = 10;
try (final IOManager ioMan = new IOManagerAsync()) {
final MemoryManager memMan = MemoryManagerBuilder.newBuilder().setMemorySize(NUM_PAGES * PAGE_SIZE).setPageSize(PAGE_SIZE).build();
final AbstractInvokable owner = new DummyInvokable();
final List<MemorySegment> initialMemory = memMan.allocatePages(owner, 6);
final List<MemorySegment> sortMemory = memMan.allocatePages(owner, NUM_PAGES - 6);
final TypeInformation<?>[] types = new TypeInformation<?>[] { BasicTypeInfo.LONG_TYPE_INFO, new ValueTypeInfo<SomeVeryLongValue>(SomeVeryLongValue.class), BasicTypeInfo.BYTE_TYPE_INFO };
final TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>> typeInfo = new TupleTypeInfo<Tuple3<Long, SomeVeryLongValue, Byte>>(types);
final TypeSerializer<Tuple3<Long, SomeVeryLongValue, Byte>> serializer = typeInfo.createSerializer(new ExecutionConfig());
final TypeComparator<Tuple3<Long, SomeVeryLongValue, Byte>> comparator = typeInfo.createComparator(new int[] { 2, 0 }, new boolean[] { true, true }, 0, new ExecutionConfig());
LargeRecordHandler<Tuple3<Long, SomeVeryLongValue, Byte>> handler = new LargeRecordHandler<Tuple3<Long, SomeVeryLongValue, Byte>>(serializer, comparator, ioMan, memMan, initialMemory, owner, 128, owner.getExecutionConfig());
assertFalse(handler.hasData());
// add the test data
Random rnd = new Random();
for (int i = 0; i < NUM_RECORDS; i++) {
long val = rnd.nextLong();
handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(val, new SomeVeryLongValue((int) val), (byte) val));
assertTrue(handler.hasData());
}
MutableObjectIterator<Tuple3<Long, SomeVeryLongValue, Byte>> sorted = handler.finishWriteAndSortKeys(sortMemory);
try {
handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(92L, null, (byte) 1));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
Tuple3<Long, SomeVeryLongValue, Byte> previous = null;
Tuple3<Long, SomeVeryLongValue, Byte> next;
while ((next = sorted.next(null)) != null) {
// key and value must be equal
assertTrue(next.f0.intValue() == next.f1.val());
assertTrue(next.f0.byteValue() == next.f2);
// order must be correct
if (previous != null) {
assertTrue(previous.f2 <= next.f2);
assertTrue(previous.f2.byteValue() != next.f2.byteValue() || previous.f0 <= next.f0);
}
previous = next;
}
handler.close();
assertFalse(handler.hasData());
handler.close();
try {
handler.addRecord(new Tuple3<Long, SomeVeryLongValue, Byte>(92L, null, (byte) 1));
fail("should throw an exception");
} catch (IllegalStateException e) {
// expected
}
assertTrue(memMan.verifyEmpty());
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class InPlaceMutableHashTableTest method testHashTableGrowthWithInsertOrReplace.
/**
* This test validates that records are not lost via "insertOrReplace()" as in bug [FLINK-2361]
*
* <p>This has to be duplicated in InPlaceMutableHashTableTest and CompactingHashTableTest
* because of the different constructor calls.
*/
@Test
public void testHashTableGrowthWithInsertOrReplace() {
try {
final int numElements = 1000000;
List<MemorySegment> memory = getMemory(1000, 32 * 1024);
InPlaceMutableHashTable<Tuple2<Long, String>> table = new InPlaceMutableHashTable<Tuple2<Long, String>>(serializer, comparator, memory);
table.open();
for (long i = 0; i < numElements; i++) {
table.insertOrReplaceRecord(Tuple2.of(i, String.valueOf(i)));
}
// make sure that all elements are contained via the entry iterator
{
BitSet bitSet = new BitSet(numElements);
MutableObjectIterator<Tuple2<Long, String>> iter = table.getEntryIterator();
Tuple2<Long, String> next;
while ((next = iter.next()) != null) {
assertNotNull(next.f0);
assertNotNull(next.f1);
assertEquals(next.f0.longValue(), Long.parseLong(next.f1));
bitSet.set(next.f0.intValue());
}
assertEquals(numElements, bitSet.cardinality());
}
// make sure all entries are contained via the prober
{
InPlaceMutableHashTable<Tuple2<Long, String>>.HashTableProber<Long> proper = table.getProber(probeComparator, pairComparator);
Tuple2<Long, String> reuse = new Tuple2<>();
for (long i = 0; i < numElements; i++) {
assertNotNull(proper.getMatchFor(i, reuse));
assertNull(proper.getMatchFor(i + numElements, reuse));
}
}
table.close();
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class SkipListUtils method helpGetNodeLatestVersion.
/**
* Return of the newest version of value for the node.
*
* @param node the node.
* @param spaceAllocator the space allocator.
*/
static int helpGetNodeLatestVersion(long node, Allocator spaceAllocator) {
Chunk chunk = spaceAllocator.getChunkById(SpaceUtils.getChunkIdByAddress(node));
int offsetInChunk = SpaceUtils.getChunkOffsetByAddress(node);
MemorySegment segment = chunk.getMemorySegment(offsetInChunk);
int offsetInByteBuffer = chunk.getOffsetInSegment(offsetInChunk);
long valuePointer = getValuePointer(segment, offsetInByteBuffer);
return helpGetValueVersion(valuePointer, spaceAllocator);
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class CopyOnWriteSkipListStateMapBasicOpTest method testNamespaceNodeIteratorIllegalNextInvocation.
/**
* Test state map iterator illegal next call.
*/
@Test
public void testNamespaceNodeIteratorIllegalNextInvocation() {
SkipListKeySerializer<Integer, Long> skipListKeySerializer = new SkipListKeySerializer<>(IntSerializer.INSTANCE, LongSerializer.INSTANCE);
byte[] namespaceBytes = skipListKeySerializer.serializeNamespace(namespace);
MemorySegment namespaceSegment = MemorySegmentFactory.wrap(namespaceBytes);
Iterator<Long> iterator = stateMap.new NamespaceNodeIterator(namespaceSegment, 0, namespaceBytes.length);
while (iterator.hasNext()) {
iterator.next();
}
try {
iterator.next();
fail("Should have thrown NoSuchElementException.");
} catch (NoSuchElementException e) {
// expected
}
}
use of org.apache.flink.core.memory.MemorySegment in project flink by apache.
the class CopyOnWriteSkipListStateMapBasicOpTest method testPutAndGetNodeWithNoneZeroOffset.
/**
* This tests the internal capability of using partial {@link ByteBuffer}, making sure the
* internal methods works when put/get state with a key stored at a none-zero offset of a
* ByteBuffer.
*/
@Test
public void testPutAndGetNodeWithNoneZeroOffset() {
final int key = 10;
final long namespace = 0L;
final String valueString = "test";
SkipListKeySerializer<Integer, Long> skipListKeySerializer = new SkipListKeySerializer<>(IntSerializer.INSTANCE, LongSerializer.INSTANCE);
SkipListValueSerializer<String> skipListValueSerializer = new SkipListValueSerializer<>(StringSerializer.INSTANCE);
byte[] keyBytes = skipListKeySerializer.serialize(key, namespace);
byte[] constructedKeyBytes = new byte[keyBytes.length + 1];
System.arraycopy(keyBytes, 0, constructedKeyBytes, 1, keyBytes.length);
MemorySegment keySegment = MemorySegmentFactory.wrap(constructedKeyBytes);
int keyLen = keyBytes.length;
byte[] value = skipListValueSerializer.serialize(valueString);
stateMap.putValue(keySegment, 1, keyLen, value, false);
String state = stateMap.getNode(keySegment, 1, keyLen);
assertThat(state, is(valueString));
}
Aggregations