use of com.datastax.oss.sink.pulsar.GenericRecordImpl in project pulsar-sink by datastax.
the class WriteTimestampAndTtlCCMIT method should_insert_record_with_timestamp_provided_via_mapping_and_validate_timestamp_of_table.
/**
* Test for KAF-46.
*/
@ParameterizedTest(name = "[{index}] schema={0}, timestampValue={1}, expectedTimestampValue={2}")
@MethodSource("timestampColProvider")
void should_insert_record_with_timestamp_provided_via_mapping_and_validate_timestamp_of_table(Schema schema, Number timestampValue, Number expectedTimestampValue) {
taskConfigs.add(makeConnectorProperties("bigintcol=value.bigint, doublecol=value.double, __timestamp = value.timestampcol", ImmutableMap.of(String.format("topic.mytopic.%s.%s.timestampTimeUnit", keyspaceName, "types"), "MILLISECONDS")));
GenericRecordImpl value = new GenericRecordImpl().put("bigint", 1234567L).put("double", 42.0).put("timestampcol", timestampValue);
PulsarRecordImpl record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, schema);
runTaskWithRecords(record);
// Verify that the record was inserted properly in the database.
List<Row> results = session.execute("SELECT bigintcol, doublecol, writetime(doublecol) FROM types").all();
assertThat(results.size()).isEqualTo(1);
Row row = results.get(0);
assertThat(row.getLong("bigintcol")).isEqualTo(1234567L);
assertThat(row.getDouble("doublecol")).isEqualTo(42.0);
assertThat(row.getLong(2)).isEqualTo(expectedTimestampValue.longValue());
}
use of com.datastax.oss.sink.pulsar.GenericRecordImpl in project pulsar-sink by datastax.
the class WriteTimestampAndTtlCCMIT method timestamp.
@Test
void timestamp() {
taskConfigs.add(makeConnectorProperties("bigintcol=value.bigint, doublecol=value.double"));
RecordSchemaBuilder builder = org.apache.pulsar.client.api.schema.SchemaBuilder.record("MyBean");
builder.field("bigint").type(SchemaType.INT64);
builder.field("double").type(SchemaType.DOUBLE);
Schema schema = org.apache.pulsar.client.api.Schema.generic(builder.build(SchemaType.AVRO));
GenericRecordImpl value = new GenericRecordImpl().put("bigint", 1234567L).put("double", 42.0);
PulsarRecordImpl record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, schema, 153000987L);
runTaskWithRecords(record);
// Verify that the record was inserted properly in the database.
List<Row> results = session.execute("SELECT bigintcol, doublecol, writetime(doublecol) FROM types").all();
assertThat(results.size()).isEqualTo(1);
Row row = results.get(0);
assertThat(row.getLong("bigintcol")).isEqualTo(1234567L);
assertThat(row.getDouble("doublecol")).isEqualTo(42.0);
assertThat(row.getLong(2)).isEqualTo(153000987000L);
}
use of com.datastax.oss.sink.pulsar.GenericRecordImpl in project pulsar-sink by datastax.
the class WriteTimestampAndTtlCCMIT method should_insert_record_with_ttl_provided_via_mapping_and_validate_ttl_of_table.
/**
* Test for KAF-107.
*/
@ParameterizedTest(name = "[{index}] schema={0}, ttlValue={1}, expectedTtlValue={2}")
@MethodSource("ttlColProvider")
void should_insert_record_with_ttl_provided_via_mapping_and_validate_ttl_of_table(Schema schema, Number ttlValue, Number expectedTtlValue) {
taskConfigs.add(makeConnectorProperties("bigintcol=value.bigint, doublecol=value.double, __ttl = value.ttlcol", ImmutableMap.of(String.format("topic.mytopic.%s.%s.ttlTimeUnit", keyspaceName, "types"), "MILLISECONDS")));
GenericRecordImpl value = new GenericRecordImpl().put("bigint", 1234567L).put("double", 42.0).put("ttlcol", ttlValue);
PulsarRecordImpl record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, schema, 153000987L);
runTaskWithRecords(record);
// Verify that the record was inserted properly in the database.
List<Row> results = session.execute("SELECT bigintcol, doublecol, ttl(doublecol) FROM types").all();
assertThat(results.size()).isEqualTo(1);
Row row = results.get(0);
assertThat(row.getLong("bigintcol")).isEqualTo(1234567L);
assertThat(row.getDouble("doublecol")).isEqualTo(42.0);
assertTtl(row.getInt(2), expectedTtlValue);
}
use of com.datastax.oss.sink.pulsar.GenericRecordImpl in project pulsar-sink by datastax.
the class WriteTimestampAndTtlCCMIT method should_use_ttl_from_config_and_use_as_ttl.
@Test
void should_use_ttl_from_config_and_use_as_ttl() {
// given
taskConfigs.add(makeConnectorProperties("bigintcol=value.bigint, doublecol=value.double", ImmutableMap.of(String.format("topic.mytopic.%s.%s.ttl", keyspaceName, "types"), "100")));
// when
GenericRecordImpl value = new GenericRecordImpl().put("bigint", 1234567L).put("double", 1000.0);
PulsarRecordImpl record = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", null, value, recordType);
runTaskWithRecords(record);
// then
List<Row> results = session.execute("SELECT bigintcol, doublecol, ttl(doublecol) FROM types").all();
assertThat(results.size()).isEqualTo(1);
Row row = results.get(0);
assertThat(row.getLong("bigintcol")).isEqualTo(1234567L);
assertThat(row.getDouble("doublecol")).isEqualTo(1000.0);
assertTtl(row.getInt(2), 100);
}
use of com.datastax.oss.sink.pulsar.GenericRecordImpl in project pulsar-sink by datastax.
the class SimpleEndToEndSimulacronIT method makeRecord.
private Record<GenericRecord> makeRecord(int partition, Object key, String value, long timestamp, long offset) {
PulsarRecordImpl res = new PulsarRecordImpl("persistent://tenant/namespace/mytopic", key != null ? key.toString() : null, new GenericRecordImpl().put("field1", value), recordType, timestamp);
// does not matter to the sink
res.setPartitionId(partition + "");
// does not matter to the sink
res.setRecordSequence(offset);
return res;
}
Aggregations