use of org.apache.kafka.common.protocol.types.Struct in project kafka by apache.
the class ConnectProtocol method serializeAssignment.
public static ByteBuffer serializeAssignment(Assignment assignment) {
Struct struct = new Struct(ASSIGNMENT_V0);
struct.set(ERROR_KEY_NAME, assignment.error());
struct.set(LEADER_KEY_NAME, assignment.leader());
struct.set(LEADER_URL_KEY_NAME, assignment.leaderUrl());
struct.set(CONFIG_OFFSET_KEY_NAME, assignment.offset());
List<Struct> taskAssignments = new ArrayList<>();
for (Map.Entry<String, List<Integer>> connectorEntry : assignment.asMap().entrySet()) {
Struct taskAssignment = new Struct(CONNECTOR_ASSIGNMENT_V0);
taskAssignment.set(CONNECTOR_KEY_NAME, connectorEntry.getKey());
List<Integer> tasks = connectorEntry.getValue();
taskAssignment.set(TASKS_KEY_NAME, tasks.toArray());
taskAssignments.add(taskAssignment);
}
struct.set(ASSIGNMENT_KEY_NAME, taskAssignments.toArray());
ByteBuffer buffer = ByteBuffer.allocate(CONNECT_PROTOCOL_HEADER_V0.sizeOf() + ASSIGNMENT_V0.sizeOf(struct));
CONNECT_PROTOCOL_HEADER_V0.writeTo(buffer);
ASSIGNMENT_V0.write(buffer, struct);
buffer.flip();
return buffer;
}
use of org.apache.kafka.common.protocol.types.Struct in project kafka by apache.
the class RequestResponseTest method checkRequest.
private void checkRequest(AbstractRequest req) throws Exception {
// Check that we can serialize, deserialize and serialize again
// We don't check for equality or hashCode because it is likely to fail for any request containing a HashMap
Struct struct = req.toStruct();
AbstractRequest deserialized = (AbstractRequest) deserialize(req, struct, req.version());
deserialized.toStruct();
}
use of org.apache.kafka.common.protocol.types.Struct in project kafka by apache.
the class RequestResponseTest method testRequestHeaderWithNullClientId.
@Test
public void testRequestHeaderWithNullClientId() {
RequestHeader header = new RequestHeader((short) 10, (short) 1, null, 10);
Struct headerStruct = header.toStruct();
ByteBuffer buffer = toBuffer(headerStruct);
RequestHeader deserialized = RequestHeader.parse(buffer);
assertEquals(header.apiKey(), deserialized.apiKey());
assertEquals(header.apiVersion(), deserialized.apiVersion());
assertEquals(header.correlationId(), deserialized.correlationId());
// null is defaulted to ""
assertEquals("", deserialized.clientId());
}
use of org.apache.kafka.common.protocol.types.Struct in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewAssignmentVersion.
@Test
public void deserializeNewAssignmentVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema assignmentSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct assignmentV100 = new Struct(assignmentSchemaV100);
assignmentV100.set(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new Object[] { new Struct(ConsumerProtocol.TOPIC_ASSIGNMENT_V0).set(ConsumerProtocol.TOPIC_KEY_NAME, "foo").set(ConsumerProtocol.PARTITIONS_KEY_NAME, new Object[] { 1 }) });
assignmentV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
assignmentV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
assignmentV100.writeTo(buffer);
buffer.flip();
PartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer);
assertEquals(toSet(Arrays.asList(new TopicPartition("foo", 1))), toSet(assignment.partitions()));
}
use of org.apache.kafka.common.protocol.types.Struct in project kafka by apache.
the class AbstractRequest method getRequest.
/**
* Factory method for getting a request object based on ApiKey ID and a buffer
*/
public static RequestAndSize getRequest(int requestId, short version, ByteBuffer buffer) {
ApiKeys apiKey = ApiKeys.forId(requestId);
Struct struct = apiKey.parseRequest(version, buffer);
AbstractRequest request;
switch(apiKey) {
case PRODUCE:
request = new ProduceRequest(struct, version);
break;
case FETCH:
request = new FetchRequest(struct, version);
break;
case LIST_OFFSETS:
request = new ListOffsetRequest(struct, version);
break;
case METADATA:
request = new MetadataRequest(struct, version);
break;
case OFFSET_COMMIT:
request = new OffsetCommitRequest(struct, version);
break;
case OFFSET_FETCH:
request = new OffsetFetchRequest(struct, version);
break;
case GROUP_COORDINATOR:
request = new GroupCoordinatorRequest(struct, version);
break;
case JOIN_GROUP:
request = new JoinGroupRequest(struct, version);
break;
case HEARTBEAT:
request = new HeartbeatRequest(struct, version);
break;
case LEAVE_GROUP:
request = new LeaveGroupRequest(struct, version);
break;
case SYNC_GROUP:
request = new SyncGroupRequest(struct, version);
break;
case STOP_REPLICA:
request = new StopReplicaRequest(struct, version);
break;
case CONTROLLED_SHUTDOWN_KEY:
request = new ControlledShutdownRequest(struct, version);
break;
case UPDATE_METADATA_KEY:
request = new UpdateMetadataRequest(struct, version);
break;
case LEADER_AND_ISR:
request = new LeaderAndIsrRequest(struct, version);
break;
case DESCRIBE_GROUPS:
request = new DescribeGroupsRequest(struct, version);
break;
case LIST_GROUPS:
request = new ListGroupsRequest(struct, version);
break;
case SASL_HANDSHAKE:
request = new SaslHandshakeRequest(struct, version);
break;
case API_VERSIONS:
request = new ApiVersionsRequest(struct, version);
break;
case CREATE_TOPICS:
request = new CreateTopicsRequest(struct, version);
break;
case DELETE_TOPICS:
request = new DeleteTopicsRequest(struct, version);
break;
default:
throw new AssertionError(String.format("ApiKey %s is not currently handled in `getRequest`, the " + "code should be updated to do so.", apiKey));
}
return new RequestAndSize(request, struct.sizeOf());
}
Aggregations