use of org.apache.kafka.connect.data.Struct in project kafka by apache.
the class SchemaSourceTask method poll.
@Override
public List<SourceRecord> poll() throws InterruptedException {
if (count < maxNumMsgs) {
long sendStartMs = System.currentTimeMillis();
if (throttler.shouldThrottle(seqno - startingSeqno, sendStartMs)) {
throttler.throttle();
}
Map<String, Long> ccOffset = Collections.singletonMap(SEQNO_FIELD, seqno);
int partitionVal = (int) (seqno % partitionCount);
final Struct data;
final SourceRecord srcRecord;
if (!multipleSchema || count % 2 == 0) {
data = new Struct(valueSchema).put("boolean", true).put("int", 12).put("long", 12L).put("float", 12.2f).put("double", 12.2).put("partitioning", partitionVal).put("id", id).put("seqno", seqno);
srcRecord = new SourceRecord(partition, ccOffset, topic, id, Schema.STRING_SCHEMA, "key", valueSchema, data);
} else {
data = new Struct(valueSchema2).put("boolean", true).put("int", 12).put("long", 12L).put("float", 12.2f).put("double", 12.2).put("partitioning", partitionVal).put("string", "def").put("id", id).put("seqno", seqno);
srcRecord = new SourceRecord(partition, ccOffset, topic, id, Schema.STRING_SCHEMA, "key", valueSchema2, data);
}
System.out.println("{\"task\": " + id + ", \"seqno\": " + seqno + "}");
List<SourceRecord> result = Arrays.asList(srcRecord);
seqno++;
count++;
return result;
} else {
synchronized (this) {
this.wait();
}
return new ArrayList<>();
}
}
use of org.apache.kafka.connect.data.Struct in project kafka by apache.
the class KafkaConfigBackingStore method putTargetState.
@Override
public void putTargetState(String connector, TargetState state) {
Struct connectTargetState = new Struct(TARGET_STATE_V0);
connectTargetState.put("state", state.name());
byte[] serializedTargetState = converter.fromConnectData(topic, TARGET_STATE_V0, connectTargetState);
log.debug("Writing target state {} for connector {}", state, connector);
configLog.send(TARGET_STATE_KEY(connector), serializedTargetState);
}
use of org.apache.kafka.connect.data.Struct in project kafka by apache.
the class KafkaConfigBackingStore method putTaskConfigs.
/**
* Write these task configurations and associated commit messages, unless an inconsistency is found that indicates
* that we would be leaving one of the referenced connectors with an inconsistent state.
*
* @param connector the connector to write task configuration
* @param configs list of task configurations for the connector
* @throws ConnectException if the task configurations do not resolve inconsistencies found in the existing root
* and task configurations.
*/
@Override
public void putTaskConfigs(String connector, List<Map<String, String>> configs) {
// any outstanding lagging data to consume.
try {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
int taskCount = configs.size();
// Start sending all the individual updates
int index = 0;
for (Map<String, String> taskConfig : configs) {
Struct connectConfig = new Struct(TASK_CONFIGURATION_V0);
connectConfig.put("properties", taskConfig);
byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig);
log.debug("Writing configuration for task " + index + " configuration: " + taskConfig);
ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index);
configLog.send(TASK_KEY(connectorTaskId), serializedConfig);
index++;
}
// the end of the log
try {
// Read to end to ensure all the task configs have been written
if (taskCount > 0) {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
// Write the commit message
Struct connectConfig = new Struct(CONNECTOR_TASKS_COMMIT_V0);
connectConfig.put("tasks", taskCount);
byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig);
log.debug("Writing commit for connector " + connector + " with " + taskCount + " tasks.");
configLog.send(COMMIT_TASKS_KEY(connector), serializedConfig);
// Read to end to ensure all the commit messages have been written
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
}
use of org.apache.kafka.connect.data.Struct in project kafka by apache.
the class KafkaConfigBackingStore method putConnectorConfig.
/**
* Write this connector configuration to persistent storage and wait until it has been acknowledged and read back by
* tailing the Kafka log with a consumer.
*
* @param connector name of the connector to write data for
* @param properties the configuration to write
*/
@Override
public void putConnectorConfig(String connector, Map<String, String> properties) {
log.debug("Writing connector configuration {} for connector {} configuration", properties, connector);
Struct connectConfig = new Struct(CONNECTOR_CONFIGURATION_V0);
connectConfig.put("properties", properties);
byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_CONFIGURATION_V0, connectConfig);
updateConnectorConfig(connector, serializedConfig);
}
use of org.apache.kafka.connect.data.Struct in project kafka by apache.
the class JsonConverterTest method structToJson.
@Test
public void structToJson() {
Schema schema = SchemaBuilder.struct().field("field1", Schema.BOOLEAN_SCHEMA).field("field2", Schema.STRING_SCHEMA).field("field3", Schema.STRING_SCHEMA).field("field4", Schema.BOOLEAN_SCHEMA).build();
Struct input = new Struct(schema).put("field1", true).put("field2", "string2").put("field3", "string3").put("field4", false);
JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, input));
validateEnvelope(converted);
assertEquals(parse("{ \"type\": \"struct\", \"optional\": false, \"fields\": [{ \"field\": \"field1\", \"type\": \"boolean\", \"optional\": false }, { \"field\": \"field2\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field3\", \"type\": \"string\", \"optional\": false }, { \"field\": \"field4\", \"type\": \"boolean\", \"optional\": false }] }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME));
assertEquals(JsonNodeFactory.instance.objectNode().put("field1", true).put("field2", "string2").put("field3", "string3").put("field4", false), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME));
}
Aggregations