use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class ChannelStateCheckpointWriterTest method testSmallFilesNotWritten.
@Test
@SuppressWarnings("ConstantConditions")
public void testSmallFilesNotWritten() throws Exception {
int threshold = 100;
File checkpointsDir = temporaryFolder.newFolder("checkpointsDir");
File sharedStateDir = temporaryFolder.newFolder("sharedStateDir");
FsCheckpointStreamFactory checkpointStreamFactory = new FsCheckpointStreamFactory(getSharedInstance(), fromLocalFile(checkpointsDir), fromLocalFile(sharedStateDir), threshold, threshold);
ChannelStateWriteResult result = new ChannelStateWriteResult();
ChannelStateCheckpointWriter writer = createWriter(result, checkpointStreamFactory.createCheckpointStateOutputStream(EXCLUSIVE));
NetworkBuffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(threshold / 2), FreeingBufferRecycler.INSTANCE);
writer.writeInput(new InputChannelInfo(1, 2), buffer);
writer.completeOutput();
writer.completeInput();
assertTrue(result.isDone());
assertEquals(0, checkpointsDir.list().length);
assertEquals(0, sharedStateDir.list().length);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class ChannelStateCheckpointWriterTest method write.
private void write(ChannelStateCheckpointWriter writer, InputChannelInfo channelInfo, byte[] data) throws Exception {
MemorySegment segment = wrap(data);
NetworkBuffer buffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, Buffer.DataType.DATA_BUFFER, segment.size());
writer.writeInput(channelInfo, buffer);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class ChannelStateSerializerImplTest method readAndCheck.
private void readAndCheck(byte[] data, ChannelStateSerializerImpl serializer, ByteArrayInputStream is) throws IOException {
serializer.readHeader(is);
int size = serializer.readLength(is);
assertEquals(data.length, size);
NetworkBuffer buffer = new NetworkBuffer(MemorySegmentFactory.allocateUnpooledSegment(data.length), FreeingBufferRecycler.INSTANCE);
try {
int read = serializer.readData(is, wrap(buffer), size);
assertEquals(size, read);
assertArrayEquals(data, readBytes(buffer));
} finally {
buffer.release();
}
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SortMergeResultPartition method writeLargeRecord.
/**
* Spills the large record into the target {@link PartitionedFile} as a separate data region.
*/
private void writeLargeRecord(ByteBuffer record, int targetSubpartition, DataType dataType, boolean isBroadcast) throws IOException {
// for the hash-based data buffer implementation, a large record will be appended to the
// data buffer directly and spilled to multiple data regions
checkState(!useHashBuffer, "No buffers available for writing.");
fileWriter.startNewRegion(isBroadcast);
List<BufferWithChannel> toWrite = new ArrayList<>();
Queue<MemorySegment> segments = new ArrayDeque<>(writeSegments);
while (record.hasRemaining()) {
if (segments.isEmpty()) {
fileWriter.writeBuffers(toWrite);
toWrite.clear();
segments = new ArrayDeque<>(writeSegments);
}
int toCopy = Math.min(record.remaining(), networkBufferSize);
MemorySegment writeBuffer = checkNotNull(segments.poll());
writeBuffer.put(0, record, toCopy);
NetworkBuffer buffer = new NetworkBuffer(writeBuffer, (buf) -> {
}, dataType, toCopy);
BufferWithChannel bufferWithChannel = new BufferWithChannel(buffer, targetSubpartition);
updateStatistics(buffer, isBroadcast);
toWrite.add(compressBufferIfPossible(bufferWithChannel));
}
fileWriter.writeBuffers(toWrite);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class HashBasedDataBuffer method writeEvent.
private void writeEvent(ByteBuffer source, int targetChannel, Buffer.DataType dataType) {
BufferBuilder builder = builders[targetChannel];
if (builder != null) {
builder.finish();
buffers[targetChannel].add(builder.createBufferConsumerFromBeginning());
builder.close();
builders[targetChannel] = null;
}
MemorySegment segment = MemorySegmentFactory.allocateUnpooledOffHeapMemory(source.remaining());
segment.put(0, source, segment.size());
BufferConsumer consumer = new BufferConsumer(new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, dataType), segment.size());
buffers[targetChannel].add(consumer);
}
Aggregations