use of com.hazelcast.jet.impl.util.AsyncSnapshotWriterImpl.SnapshotDataKey in project hazelcast by hazelcast.
the class AsyncSnapshotWriterImplTest method assertTargetMapEntry.
private void assertTargetMapEntry(String key, int sequence, int entryLength) {
int partitionKey = writer.partitionKey(partitionService.getPartitionId(key));
SnapshotDataKey mapKey = new SnapshotDataKey(partitionKey, 1, "vertex", sequence);
int entryLengthWithTerminator = entryLength + writer.valueTerminator.length;
assertTrueEventually(() -> assertEquals(entryLengthWithTerminator, map.get(mapKey).length), 3);
}
use of com.hazelcast.jet.impl.util.AsyncSnapshotWriterImpl.SnapshotDataKey in project hazelcast by hazelcast.
the class AsyncSnapshotWriterImplTest method when_singleLargeEntry_then_flushedImmediatelyAndDeserializesCorrectly.
@Test
public void when_singleLargeEntry_then_flushedImmediatelyAndDeserializesCorrectly() throws IOException {
// When
String key = "k";
String value = generate(() -> "a").limit(128).collect(joining());
Entry<Data, Data> entry = entry(serialize(key), serialize(value));
assertTrue("entry not longer than usable chunk size", serializedLength(entry) > writer.usableChunkCapacity);
assertTrue(writer.offer(entry));
// Then
assertTargetMapEntry(key, 0, serializedLength(entry));
assertEquals(1, writer.getTotalChunks());
assertEquals(1, writer.getTotalKeys());
// Then2 - try to deserialize the entry
int partitionKey = writer.partitionKey(partitionService.getPartitionId(key));
byte[] data = map.get(new SnapshotDataKey(partitionKey, 1, "vertex", 0));
assertEquals(data.length + Bits.INT_SIZE_IN_BYTES, writer.getTotalPayloadBytes());
BufferObjectDataInput in = serializationService.createObjectDataInput(data);
assertEquals(key, in.readObject());
assertEquals(value, in.readObject());
assertEquals(SnapshotDataValueTerminator.INSTANCE, in.readObject());
}
Aggregations