Search in sources :

Example 1 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project connect-utils by jcustenborder.

the class SourceRecordConcurrentLinkedDeque method drain.

/**
 * Method is used to drain the records from the deque in order and add them to the supplied list.
 *
 * @param records list to add the records to.
 * @param timeout amount of time to sleep if no records are added.
 * @return true if records were added to the list, false if not.
 * @throws InterruptedException     Thrown if the thread is killed while sleeping.
 * @throws IllegalArgumentException Thrown if timeout is less than 0.
 */
public boolean drain(List<SourceRecord> records, int timeout) throws InterruptedException {
    Preconditions.checkNotNull(records, "records cannot be null");
    Preconditions.checkArgument(timeout >= 0, "timeout should be greater than or equal to 0.");
    if (log.isDebugEnabled()) {
        log.debug("determining size for this run. batchSize={}, records.size()={}", this.batchSize, records.size());
    }
    int count = Math.min(this.batchSize, this.size());
    if (log.isDebugEnabled()) {
        log.debug("Draining {} record(s).", count);
    }
    for (int i = 0; i < count; i++) {
        SourceRecord record = this.poll();
        if (null != record) {
            records.add(record);
        } else {
            if (log.isDebugEnabled()) {
                log.debug("Poll returned null. exiting");
                break;
            }
        }
    }
    if (records.isEmpty() && timeout > 0) {
        if (log.isDebugEnabled()) {
            log.debug("Found no records, sleeping {} ms.", timeout);
        }
        Thread.sleep(timeout);
    }
    return !records.isEmpty();
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord)

Example 2 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project connect-utils by jcustenborder.

the class SourceRecordDequeImpl method drain.

@Override
public boolean drain(List<SourceRecord> records, int emptyWaitMs) {
    Preconditions.checkNotNull(records, "records cannot be null");
    Preconditions.checkArgument(emptyWaitMs >= 0, "emptyWaitMs should be greater than or equal to 0.");
    log.trace("drain() - Determining size for this run. batchSize={}, records.size()={}", this.batchSize, records.size());
    final int count = Math.min(this.batchSize, this.size());
    log.trace("drain() - Attempting to draining {} record(s).", count);
    for (int i = 0; i < count; i++) {
        SourceRecord record = this.poll();
        if (null != record) {
            records.add(record);
        } else {
            log.trace("drain() - Poll returned null. exiting");
            break;
        }
    }
    if (records.isEmpty() && emptyWaitMs > 0) {
        log.trace("drain() - Found no records, sleeping {} ms.", emptyWaitMs);
        this.time.sleep(emptyWaitMs);
    }
    return !records.isEmpty();
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord)

Example 3 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project connect-utils by jcustenborder.

the class SourceRecordConcurrentLinkedDequeTest method drain.

@Test
public void drain() throws InterruptedException {
    List<SourceRecord> records = new ArrayList<>(256);
    assertFalse(this.sourceRecords.drain(records), "drain should return false");
    assertTrue(records.isEmpty(), "records should be empty");
    final int EXPECTED_COUNT = 5;
    for (int i = 0; i < EXPECTED_COUNT; i++) {
        SourceRecord record = new SourceRecord(null, null, null, null, null);
        this.sourceRecords.add(record);
    }
    assertEquals(EXPECTED_COUNT, this.sourceRecords.size(), "sourceRecords.size() should match.");
    assertTrue(this.sourceRecords.drain(records), "drain should return true");
    assertTrue(this.sourceRecords.isEmpty(), "drain should have emptied the deque.");
    assertEquals(EXPECTED_COUNT, records.size(), "records.size()");
}
Also used : ArrayList(java.util.ArrayList) SourceRecord(org.apache.kafka.connect.source.SourceRecord) Test(org.junit.jupiter.api.Test)

Example 4 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project ignite by apache.

the class IgniteSourceTask method poll.

/**
 * {@inheritDoc}
 */
@Override
public List<SourceRecord> poll() throws InterruptedException {
    ArrayList<SourceRecord> records = new ArrayList<>(evtBatchSize);
    ArrayList<CacheEvent> evts = new ArrayList<>(evtBatchSize);
    if (stopped)
        return records;
    try {
        if (evtBuf.drainTo(evts, evtBatchSize) > 0) {
            for (CacheEvent evt : evts) {
                // schema and keys are ignored.
                for (String topic : topics) records.add(new SourceRecord(srcPartition, offset, topic, null, evt));
            }
            return records;
        }
    } catch (IgniteException e) {
        log.error("Error when polling event queue!", e);
    }
    // for shutdown.
    return null;
}
Also used : IgniteException(org.apache.ignite.IgniteException) ArrayList(java.util.ArrayList) CacheEvent(org.apache.ignite.events.CacheEvent) SourceRecord(org.apache.kafka.connect.source.SourceRecord)

Example 5 with SourceRecord

use of org.apache.kafka.connect.source.SourceRecord in project apache-kafka-on-k8s by banzaicloud.

the class WorkerSourceTaskTest method testSendRecordsRetries.

@Test
public void testSendRecordsRetries() throws Exception {
    createWorkerTask();
    // Differentiate only by Kafka partition so we can reuse conversion expectations
    SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, "topic", 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
    SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, "topic", 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
    SourceRecord record3 = new SourceRecord(PARTITION, OFFSET, "topic", 3, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
    // First round
    expectSendRecordOnce(false);
    // Any Producer retriable exception should work here
    expectSendRecordSyncFailure(new org.apache.kafka.common.errors.TimeoutException("retriable sync failure"));
    // Second round
    expectSendRecordOnce(true);
    expectSendRecordOnce(false);
    PowerMock.replayAll();
    // Try to send 3, make first pass, second fail. Should save last two
    Whitebox.setInternalState(workerTask, "toSend", Arrays.asList(record1, record2, record3));
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(true, Whitebox.getInternalState(workerTask, "lastSendFailed"));
    assertEquals(Arrays.asList(record2, record3), Whitebox.getInternalState(workerTask, "toSend"));
    // Next they all succeed
    Whitebox.invokeMethod(workerTask, "sendRecords");
    assertEquals(false, Whitebox.getInternalState(workerTask, "lastSendFailed"));
    assertNull(Whitebox.getInternalState(workerTask, "toSend"));
    PowerMock.verifyAll();
}
Also used : SourceRecord(org.apache.kafka.connect.source.SourceRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) Test(org.junit.Test)

Aggregations

SourceRecord (org.apache.kafka.connect.source.SourceRecord)308 Test (org.junit.Test)148 Test (org.junit.jupiter.api.Test)98 Struct (org.apache.kafka.connect.data.Struct)68 HashMap (java.util.HashMap)60 Schema (org.apache.kafka.connect.data.Schema)45 ThreadedTest (org.apache.kafka.connect.util.ThreadedTest)27 ParameterizedTest (org.apache.kafka.connect.util.ParameterizedTest)23 ArrayList (java.util.ArrayList)22 RetryWithToleranceOperatorTest (org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest)21 Map (java.util.Map)15 SchemaBuilder (org.apache.kafka.connect.data.SchemaBuilder)13 ConnectException (org.apache.kafka.connect.errors.ConnectException)13 Document (org.bson.Document)13 FixFor (io.debezium.doc.FixFor)12 List (java.util.List)12 RecordsForCollection (io.debezium.connector.mongodb.RecordMakers.RecordsForCollection)11 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)11 ConnectHeaders (org.apache.kafka.connect.header.ConnectHeaders)11 BsonTimestamp (org.bson.BsonTimestamp)11