use of com.google.bigtable.v2.Mutation in project beam by apache.
the class BigtableServiceImplTest method testWrite.
/**
* This test ensures that protobuf creation and interactions with {@link BulkMutation} work as
* expected.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testWrite() throws IOException, InterruptedException {
BigtableService.Writer underTest = new BigtableServiceImpl.BigtableWriterImpl(mockSession, TABLE_NAME);
Mutation mutation = Mutation.newBuilder().setSetCell(SetCell.newBuilder().setFamilyName("Family").build()).build();
ByteString key = ByteString.copyFromUtf8("key");
SettableFuture<MutateRowResponse> fakeResponse = SettableFuture.create();
when(mockBulkMutation.add(any(MutateRowsRequest.Entry.class))).thenReturn(fakeResponse);
underTest.writeRecord(KV.of(key, ImmutableList.of(mutation)));
Entry expected = MutateRowsRequest.Entry.newBuilder().setRowKey(key).addMutations(mutation).build();
verify(mockBulkMutation, times(1)).add(expected);
underTest.close();
verify(mockBulkMutation, times(1)).flush();
}
use of com.google.bigtable.v2.Mutation in project SpinalTap by airbnb.
the class UpdateMutationMapper method map.
public Mutation map(MysqlUpdateMutation mutation) {
MysqlMutationMetadata metadata = mutation.getMetadata();
Mutation thriftMutation = new Mutation(MutationType.UPDATE, metadata.getTimestamp(), sourceId, metadata.getDataSource().getThriftDataSource(), createBinlogHeader(metadata, mutation.getType().getCode()), metadata.getTable().getThriftTable(), transformToEntity(mutation.getRow()));
thriftMutation.setPreviousEntity(transformToEntity(mutation.getPreviousRow()));
return thriftMutation;
}
use of com.google.bigtable.v2.Mutation in project SpinalTap by airbnb.
the class KafkaDestinationTest method KafkaDestination.
@Test
public void KafkaDestination() throws Exception {
createKafkaTopic(TOPIC);
KafkaProducerConfiguration configs = new KafkaProducerConfiguration(this.bootstrapServers());
KafkaDestination kafkaDestination = new KafkaDestination(null, configs, null, null, 0L);
List<Mutation> messages = new ArrayList<>();
messages.add(createMutation(MutationType.INSERT));
messages.add(createMutation(MutationType.UPDATE));
messages.add(createMutation(MutationType.DELETE));
kafkaDestination.publish(messages);
Properties props = new Properties();
props.setProperty("bootstrap.servers", this.bootstrapServers());
props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");
KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<>(props);
kafkaConsumer.assign(Collections.singletonList(new TopicPartition(TOPIC, 0)));
kafkaConsumer.seekToBeginning(new TopicPartition(TOPIC, 0));
List<ConsumerRecords<byte[], byte[]>> records = new ArrayList<>();
ConsumerRecords<byte[], byte[]> record;
long startMs = current();
while (current() - startMs <= 10000L) {
record = kafkaConsumer.poll(1000L);
records.add(record);
if (records.size() == 3)
break;
}
Assert.assertEquals(records.size(), 3);
for (ConsumerRecords<byte[], byte[]> consumerRecords : records) {
for (ConsumerRecord<byte[], byte[]> consumerRecord : consumerRecords) {
com.airbnb.jitney.event.spinaltap.v1.Mutation mutation = getMutation(consumerRecord.value());
switch(mutation.getType()) {
case INSERT:
Assert.assertEquals(mutation, createMutation(MutationType.INSERT));
break;
case UPDATE:
Assert.assertEquals(mutation, createMutation(MutationType.UPDATE));
break;
case DELETE:
Assert.assertEquals(mutation, createMutation(MutationType.DELETE));
break;
}
}
}
kafkaDestination.close();
kafkaConsumer.close();
}
use of com.google.bigtable.v2.Mutation in project SpinalTap by airbnb.
the class KafkaDestinationTest method createMutation.
private Mutation createMutation(MutationType type) {
Mapper<com.airbnb.spinaltap.Mutation<?>, ? extends TBase<?, ?>> thriftMutationMapper = ThriftMutationMapper.create("spinaltap");
Table table = new Table(0L, TABLE, DATABASE, ImmutableList.of(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0)), ImmutableList.of("id"));
MysqlMutationMetadata metadata = new MysqlMutationMetadata(new DataSource(HOSTNAME, 0, "service"), new BinlogFilePos(), table, 0L, 0L, 0L, null, null, 0L, 0);
Row row = new Row(table, ImmutableMap.of("id", new Column(new ColumnMetadata("id", ColumnDataType.LONGLONG, true, 0), 1L)));
MysqlMutation mutation;
switch(type) {
case INSERT:
mutation = new MysqlInsertMutation(metadata, row);
break;
case UPDATE:
mutation = new MysqlUpdateMutation(metadata, row, row);
break;
case DELETE:
mutation = new MysqlDeleteMutation(metadata, row);
break;
default:
mutation = null;
}
return (Mutation) (thriftMutationMapper.map(mutation));
}
use of com.google.bigtable.v2.Mutation in project SpinalTap by airbnb.
the class KafkaDestinationTest method getMutation.
private Mutation getMutation(byte[] payload) throws Exception {
Mutation mutation = new Mutation();
deserializer.get().deserialize(mutation, payload);
return mutation;
}
Aggregations