use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class ControlRecordUtilsTest method testDeserializeRecord.
private void testDeserializeRecord(ControlRecordType controlRecordType) {
final int leaderId = 1;
final int voterId = 2;
LeaderChangeMessage data = new LeaderChangeMessage().setLeaderId(leaderId).setVoters(Collections.singletonList(new Voter().setVoterId(voterId)));
ByteBuffer valueBuffer = ByteBuffer.allocate(256);
data.write(new ByteBufferAccessor(valueBuffer), new ObjectSerializationCache(), data.highestSupportedVersion());
valueBuffer.flip();
byte[] keyData = new byte[] { 0, 0, 0, (byte) controlRecordType.type };
DefaultRecord record = new DefaultRecord(256, (byte) 0, 0, 0L, 0, ByteBuffer.wrap(keyData), valueBuffer, null);
LeaderChangeMessage deserializedData = ControlRecordUtils.deserializeLeaderChangeMessage(record);
assertEquals(leaderId, deserializedData.leaderId());
assertEquals(Collections.singletonList(new Voter().setVoterId(voterId)), deserializedData.voters());
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class SimpleExampleMessageTest method shouldRoundTripFieldThroughBufferWithNullable.
@Test
public void shouldRoundTripFieldThroughBufferWithNullable() {
final Uuid uuid = Uuid.randomUuid();
final ByteBuffer buf1 = ByteBuffer.wrap(new byte[] { 1, 2, 3 });
final ByteBuffer buf2 = ByteBuffer.wrap(new byte[] { 4, 5, 6 });
final SimpleExampleMessageData out = new SimpleExampleMessageData();
out.setProcessId(uuid);
out.setZeroCopyByteBuffer(buf1);
out.setNullableZeroCopyByteBuffer(buf2);
final ByteBuffer buffer = MessageUtil.toByteBuffer(out, (short) 1);
final SimpleExampleMessageData in = new SimpleExampleMessageData();
in.read(new ByteBufferAccessor(buffer), (short) 1);
buf1.rewind();
buf2.rewind();
assertEquals(uuid, in.processId());
assertEquals(buf1, in.zeroCopyByteBuffer());
assertEquals(buf2, in.nullableZeroCopyByteBuffer());
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class SimpleExampleMessageTest method deserialize.
private SimpleExampleMessageData deserialize(ByteBuffer buf, short version) {
SimpleExampleMessageData message = new SimpleExampleMessageData();
message.read(new ByteBufferAccessor(buf.duplicate()), version);
return message;
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class SimpleExampleMessageTest method shouldRoundTripFieldThroughBuffer.
@Test
public void shouldRoundTripFieldThroughBuffer() {
final Uuid uuid = Uuid.randomUuid();
final ByteBuffer buf = ByteBuffer.wrap(new byte[] { 1, 2, 3 });
final SimpleExampleMessageData out = new SimpleExampleMessageData();
out.setProcessId(uuid);
out.setZeroCopyByteBuffer(buf);
final ByteBuffer buffer = MessageUtil.toByteBuffer(out, (short) 1);
final SimpleExampleMessageData in = new SimpleExampleMessageData();
in.read(new ByteBufferAccessor(buffer), (short) 1);
buf.rewind();
assertEquals(uuid, in.processId());
assertEquals(buf, in.zeroCopyByteBuffer());
assertEquals(ByteUtils.EMPTY_BUF, in.nullableZeroCopyByteBuffer());
}
use of org.apache.kafka.common.protocol.ByteBufferAccessor in project kafka by apache.
the class ConsumerProtocol method deserializeAssignment.
public static Assignment deserializeAssignment(final ByteBuffer buffer, short version) {
version = checkAssignmentVersion(version);
try {
ConsumerProtocolAssignment data = new ConsumerProtocolAssignment(new ByteBufferAccessor(buffer), version);
List<TopicPartition> assignedPartitions = new ArrayList<>();
for (ConsumerProtocolAssignment.TopicPartition tp : data.assignedPartitions()) {
for (Integer partition : tp.partitions()) {
assignedPartitions.add(new TopicPartition(tp.topic(), partition));
}
}
return new Assignment(assignedPartitions, data.userData() != null ? data.userData().duplicate() : null);
} catch (BufferUnderflowException e) {
throw new SchemaException("Buffer underflow while parsing consumer protocol's assignment", e);
}
}
Aggregations