use of org.apache.kafka.common.protocol.types.Schema in project kafka by apache.
the class Protocol method schemaToBnfHtml.
private static void schemaToBnfHtml(Schema schema, StringBuilder b, int indentSize) {
final String indentStr = indentString(indentSize);
final Map<String, Type> subTypes = new LinkedHashMap<>();
// Top level fields
for (Field field : schema.fields()) {
if (field.type instanceof ArrayOf) {
b.append("[");
b.append(field.name);
b.append("] ");
Type innerType = ((ArrayOf) field.type).type();
if (!subTypes.containsKey(field.name))
subTypes.put(field.name, innerType);
} else {
b.append(field.name);
b.append(" ");
if (!subTypes.containsKey(field.name))
subTypes.put(field.name, field.type);
}
}
b.append("\n");
// Sub Types/Schemas
for (Map.Entry<String, Type> entry : subTypes.entrySet()) {
if (entry.getValue() instanceof Schema) {
// Complex Schema Type
b.append(indentStr);
b.append(entry.getKey());
b.append(" => ");
schemaToBnfHtml((Schema) entry.getValue(), b, indentSize + 2);
} else {
// Standard Field Type
b.append(indentStr);
b.append(entry.getKey());
b.append(" => ");
b.append(entry.getValue());
b.append("\n");
}
}
}
use of org.apache.kafka.common.protocol.types.Schema in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewSubscriptionVersion.
@Test
public void deserializeNewSubscriptionVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema subscriptionSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct subscriptionV100 = new Struct(subscriptionSchemaV100);
subscriptionV100.set(ConsumerProtocol.TOPICS_KEY_NAME, new Object[] { "topic" });
subscriptionV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
subscriptionV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(subscriptionV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
subscriptionV100.writeTo(buffer);
buffer.flip();
Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer);
assertEquals(Arrays.asList("topic"), subscription.topics());
}
use of org.apache.kafka.common.protocol.types.Schema in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewAssignmentVersion.
@Test
public void deserializeNewAssignmentVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema assignmentSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct assignmentV100 = new Struct(assignmentSchemaV100);
assignmentV100.set(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new Object[] { new Struct(ConsumerProtocol.TOPIC_ASSIGNMENT_V0).set(ConsumerProtocol.TOPIC_KEY_NAME, "foo").set(ConsumerProtocol.PARTITIONS_KEY_NAME, new Object[] { 1 }) });
assignmentV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
assignmentV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
assignmentV100.writeTo(buffer);
buffer.flip();
PartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer);
assertEquals(toSet(Arrays.asList(new TopicPartition("foo", 1))), toSet(assignment.partitions()));
}
use of org.apache.kafka.common.protocol.types.Schema in project kafka by apache.
the class Protocol method toHtml.
public static String toHtml() {
final StringBuilder b = new StringBuilder();
b.append("<h5>Headers:</h5>\n");
b.append("<pre>");
b.append("Request Header => ");
schemaToBnfHtml(REQUEST_HEADER, b, 2);
b.append("</pre>\n");
schemaToFieldTableHtml(REQUEST_HEADER, b);
b.append("<pre>");
b.append("Response Header => ");
schemaToBnfHtml(RESPONSE_HEADER, b, 2);
b.append("</pre>\n");
schemaToFieldTableHtml(RESPONSE_HEADER, b);
for (ApiKeys key : ApiKeys.values()) {
// Key
b.append("<h5>");
b.append("<a name=\"The_Messages_" + key.name + "\">");
b.append(key.name);
b.append(" API (Key: ");
b.append(key.id);
b.append("):</a></h5>\n\n");
// Requests
b.append("<b>Requests:</b><br>\n");
Schema[] requests = REQUESTS[key.id];
for (int i = 0; i < requests.length; i++) {
Schema schema = requests[i];
// Schema
if (schema != null) {
b.append("<p>");
// Version header
b.append("<pre>");
b.append(key.name);
b.append(" Request (Version: ");
b.append(i);
b.append(") => ");
schemaToBnfHtml(requests[i], b, 2);
b.append("</pre>");
schemaToFieldTableHtml(requests[i], b);
}
b.append("</p>\n");
}
// Responses
b.append("<b>Responses:</b><br>\n");
Schema[] responses = RESPONSES[key.id];
for (int i = 0; i < responses.length; i++) {
Schema schema = responses[i];
// Schema
if (schema != null) {
b.append("<p>");
// Version header
b.append("<pre>");
b.append(key.name);
b.append(" Response (Version: ");
b.append(i);
b.append(") => ");
schemaToBnfHtml(responses[i], b, 2);
b.append("</pre>");
schemaToFieldTableHtml(responses[i], b);
}
b.append("</p>\n");
}
}
return b.toString();
}
Aggregations