use of org.apache.kafka.connect.source.SourceRecord in project kafka by apache.
the class FileStreamSourceTaskTest method testBatchSize.
@Test
public void testBatchSize() throws IOException, InterruptedException {
expectOffsetLookupReturnNone();
replay();
config.put(FileStreamSourceConnector.TASK_BATCH_SIZE_CONFIG, "5000");
task.start(config);
OutputStream os = Files.newOutputStream(tempFile.toPath());
writeTimesAndFlush(os, 10_000, "Neque porro quisquam est qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit...\n".getBytes());
assertEquals(2, task.bufferSize());
List<SourceRecord> records = task.poll();
assertEquals(5000, records.size());
assertEquals(128, task.bufferSize());
records = task.poll();
assertEquals(5000, records.size());
assertEquals(128, task.bufferSize());
os.close();
task.stop();
}
use of org.apache.kafka.connect.source.SourceRecord in project kafka by apache.
the class MirrorHeartbeatTask method poll.
@Override
public List<SourceRecord> poll() throws InterruptedException {
// pause to throttle, unless we've stopped
if (stopped.await(interval.toMillis(), TimeUnit.MILLISECONDS)) {
// SourceWorkerTask expects non-zero batches or null
return null;
}
long timestamp = System.currentTimeMillis();
Heartbeat heartbeat = new Heartbeat(sourceClusterAlias, targetClusterAlias, timestamp);
SourceRecord record = new SourceRecord(heartbeat.connectPartition(), MirrorUtils.wrapOffset(0), heartbeatsTopic, 0, Schema.BYTES_SCHEMA, heartbeat.recordKey(), Schema.BYTES_SCHEMA, heartbeat.recordValue(), timestamp);
return Collections.singletonList(record);
}
use of org.apache.kafka.connect.source.SourceRecord in project kafka by apache.
the class WorkerSourceTaskTest method testSendRecordsPropagatesTimestamp.
@Test
public void testSendRecordsPropagatesTimestamp() throws Exception {
final Long timestamp = System.currentTimeMillis();
createWorkerTask();
List<SourceRecord> records = Collections.singletonList(new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp));
Capture<ProducerRecord<byte[], byte[]>> sent = expectSendRecordAnyTimes();
expectTopicCreation(TOPIC);
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", records);
Whitebox.invokeMethod(workerTask, "sendRecords");
assertEquals(timestamp, sent.getValue().timestamp());
PowerMock.verifyAll();
}
use of org.apache.kafka.connect.source.SourceRecord in project kafka by apache.
the class WorkerSourceTaskTest method testTopicCreateWhenTopicExists.
@Test
public void testTopicCreateWhenTopicExists() throws Exception {
if (!enableTopicCreation)
// should only test with topic creation enabled
return;
createWorkerTask();
SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
expectPreliminaryCalls();
TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList());
TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo));
EasyMock.expect(admin.describeTopics(TOPIC)).andReturn(Collections.singletonMap(TOPIC, topicDesc));
expectSendRecordTaskCommitRecordSucceed(false);
expectSendRecordTaskCommitRecordSucceed(false);
PowerMock.replayAll();
Whitebox.setInternalState(workerTask, "toSend", Arrays.asList(record1, record2));
Whitebox.invokeMethod(workerTask, "sendRecords");
}
use of org.apache.kafka.connect.source.SourceRecord in project kafka by apache.
the class CastTest method castNullKeyRecordSchemaless.
@Test
public void castNullKeyRecordSchemaless() {
xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64"));
SourceRecord original = new SourceRecord(null, null, "topic", 0, null, null, Schema.STRING_SCHEMA, "value");
SourceRecord transformed = xformKey.apply(original);
assertEquals(original, transformed);
}
Aggregations