use of org.apache.kafka.common.protocol.types.ArrayOf in project kafka by apache.
the class Protocol method schemaToBnfHtml.
private static void schemaToBnfHtml(Schema schema, StringBuilder b, int indentSize) {
final String indentStr = indentString(indentSize);
final Map<String, Type> subTypes = new LinkedHashMap<>();
// Top level fields
for (Field field : schema.fields()) {
if (field.type instanceof ArrayOf) {
b.append("[");
b.append(field.name);
b.append("] ");
Type innerType = ((ArrayOf) field.type).type();
if (!subTypes.containsKey(field.name))
subTypes.put(field.name, innerType);
} else {
b.append(field.name);
b.append(" ");
if (!subTypes.containsKey(field.name))
subTypes.put(field.name, field.type);
}
}
b.append("\n");
// Sub Types/Schemas
for (Map.Entry<String, Type> entry : subTypes.entrySet()) {
if (entry.getValue() instanceof Schema) {
// Complex Schema Type
b.append(indentStr);
b.append(entry.getKey());
b.append(" => ");
schemaToBnfHtml((Schema) entry.getValue(), b, indentSize + 2);
} else {
// Standard Field Type
b.append(indentStr);
b.append(entry.getKey());
b.append(" => ");
b.append(entry.getValue());
b.append("\n");
}
}
}
use of org.apache.kafka.common.protocol.types.ArrayOf in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewSubscriptionVersion.
@Test
public void deserializeNewSubscriptionVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema subscriptionSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct subscriptionV100 = new Struct(subscriptionSchemaV100);
subscriptionV100.set(ConsumerProtocol.TOPICS_KEY_NAME, new Object[] { "topic" });
subscriptionV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
subscriptionV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(subscriptionV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
subscriptionV100.writeTo(buffer);
buffer.flip();
Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer);
assertEquals(Arrays.asList("topic"), subscription.topics());
}
use of org.apache.kafka.common.protocol.types.ArrayOf in project kafka by apache.
the class ConsumerProtocolTest method deserializeNewAssignmentVersion.
@Test
public void deserializeNewAssignmentVersion() {
// verify that a new version which adds a field is still parseable
short version = 100;
Schema assignmentSchemaV100 = new Schema(new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING));
Struct assignmentV100 = new Struct(assignmentSchemaV100);
assignmentV100.set(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new Object[] { new Struct(ConsumerProtocol.TOPIC_ASSIGNMENT_V0).set(ConsumerProtocol.TOPIC_KEY_NAME, "foo").set(ConsumerProtocol.PARTITIONS_KEY_NAME, new Object[] { 1 }) });
assignmentV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0]));
assignmentV100.set("foo", "bar");
Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA);
headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version);
ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf());
headerV100.writeTo(buffer);
assignmentV100.writeTo(buffer);
buffer.flip();
PartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer);
assertEquals(toSet(Arrays.asList(new TopicPartition("foo", 1))), toSet(assignment.partitions()));
}
Aggregations