use of org.apache.accumulo.core.replication.thrift.WalEdits in project accumulo by apache.
the class AccumuloReplicaSystem method getWalEdits.
protected WalReplication getWalEdits(ReplicationTarget target, DataInputStream wal, Path p, Status status, long sizeLimit, Set<Integer> desiredTids) throws IOException {
WalEdits edits = new WalEdits();
edits.edits = new ArrayList<>();
long size = 0l;
long entriesConsumed = 0l;
long numUpdates = 0l;
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
while (size < sizeLimit) {
try {
key.readFields(wal);
value.readFields(wal);
} catch (EOFException e) {
log.debug("Caught EOFException reading {}", p);
if (status.getInfiniteEnd() && status.getClosed()) {
log.debug("{} is closed and has unknown length, assuming entire file has been consumed", p);
entriesConsumed = Long.MAX_VALUE;
}
break;
}
entriesConsumed++;
switch(key.event) {
case DEFINE_TABLET:
// For new DEFINE_TABLETs, we also need to record the new tids we see
if (target.getSourceTableId().equals(key.tablet.getTableId())) {
desiredTids.add(key.tid);
}
break;
case MUTATION:
case MANY_MUTATIONS:
// Only write out mutations for tids that are for the desired tablet
if (desiredTids.contains(key.tid)) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
key.write(out);
// Only write out the mutations that don't have the given ReplicationTarget
// as a replicate source (this prevents infinite replication loops: a->b, b->a, repeat)
numUpdates += writeValueAvoidingReplicationCycles(out, value, target);
out.flush();
byte[] data = baos.toByteArray();
size += data.length;
edits.addToEdits(ByteBuffer.wrap(data));
}
break;
default:
log.trace("Ignorning WAL entry which doesn't contain mutations, should not have received such entries");
break;
}
}
return new WalReplication(edits, size, entriesConsumed, numUpdates);
}
use of org.apache.accumulo.core.replication.thrift.WalEdits in project accumulo by apache.
the class AccumuloReplicaSystemTest method dontSendEmptyDataToPeer.
@Test
public void dontSendEmptyDataToPeer() throws Exception {
Client replClient = createMock(Client.class);
AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
WalEdits edits = new WalEdits(Collections.emptyList());
WalReplication walReplication = new WalReplication(edits, 0, 0, 0);
ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
DataInputStream input = null;
Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
Status status = null;
long sizeLimit = Long.MAX_VALUE;
String remoteTableId = target.getRemoteIdentifier();
TCredentials tcreds = null;
Set<Integer> tids = new HashSet<>();
WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
replay(replClient, ars);
ReplicationStats stats = walClientExec.execute(replClient);
verify(replClient, ars);
Assert.assertEquals(new ReplicationStats(0l, 0l, 0l), stats);
}
use of org.apache.accumulo.core.replication.thrift.WalEdits in project accumulo by apache.
the class AccumuloReplicaSystemTest method consumedButNotSentDataShouldBeRecorded.
@Test
public void consumedButNotSentDataShouldBeRecorded() throws Exception {
Client replClient = createMock(Client.class);
AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
WalEdits edits = new WalEdits(Collections.emptyList());
WalReplication walReplication = new WalReplication(edits, 0, 5, 0);
ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
DataInputStream input = null;
Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
Status status = null;
long sizeLimit = Long.MAX_VALUE;
String remoteTableId = target.getRemoteIdentifier();
TCredentials tcreds = null;
Set<Integer> tids = new HashSet<>();
WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
replay(replClient, ars);
ReplicationStats stats = walClientExec.execute(replClient);
verify(replClient, ars);
Assert.assertEquals(new ReplicationStats(0l, 0l, 5l), stats);
}
use of org.apache.accumulo.core.replication.thrift.WalEdits in project accumulo by apache.
the class BatchWriterReplicationReplayerTest method systemTimestampsAreSetOnUpdates.
@Test
public void systemTimestampsAreSetOnUpdates() throws Exception {
final BatchWriterReplicationReplayer replayer = new BatchWriterReplicationReplayer();
final String tableName = "foo";
final long systemTimestamp = 1000;
final BatchWriterConfig bwCfg = new BatchWriterConfig();
bwCfg.setMaxMemory(1l);
LogFileKey key = new LogFileKey();
key.event = LogEvents.MANY_MUTATIONS;
key.seq = 1;
key.tid = 1;
WalEdits edits = new WalEdits();
// Make a mutation without timestamps
Mutation m = new Mutation("row");
m.put("cf", "cq1", "value");
m.put("cf", "cq2", "value");
m.put("cf", "cq3", "value");
m.put("cf", "cq4", "value");
m.put("cf", "cq5", "value");
// Make it a TMutation
TMutation tMutation = m.toThrift();
// And then make a ServerMutation from the TMutation, adding in our systemTimestamp
ServerMutation sMutation = new ServerMutation(tMutation);
sMutation.setSystemTimestamp(systemTimestamp);
// Serialize the ServerMutation (what AccumuloReplicaSystem will be doing)
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
key.write(out);
out.writeInt(1);
sMutation.write(out);
out.close();
// Add it to our "input" to BatchWriterReplicationReplayer
edits.addToEdits(ByteBuffer.wrap(baos.toByteArray()));
Mutation expectedMutation = new Mutation("row");
expectedMutation.put("cf", "cq1", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq2", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq3", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq4", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq5", sMutation.getSystemTimestamp(), "value");
expect(conf.getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY)).andReturn(bwCfg.getMaxMemory());
expect(conn.createBatchWriter(tableName, bwCfg)).andReturn(bw);
bw.addMutations(Lists.newArrayList(expectedMutation));
expectLastCall().once();
bw.close();
expectLastCall().once();
replay(conn, conf, bw);
replayer.replicateLog(context, tableName, edits);
}
use of org.apache.accumulo.core.replication.thrift.WalEdits in project accumulo by apache.
the class BatchWriterReplicationReplayerTest method replicationSourcesArePreserved.
@Test
public void replicationSourcesArePreserved() throws Exception {
final BatchWriterReplicationReplayer replayer = new BatchWriterReplicationReplayer();
final String tableName = "foo";
final long systemTimestamp = 1000;
final String peerName = "peer";
final BatchWriterConfig bwCfg = new BatchWriterConfig();
bwCfg.setMaxMemory(1l);
LogFileKey key = new LogFileKey();
key.event = LogEvents.MANY_MUTATIONS;
key.seq = 1;
key.tid = 1;
WalEdits edits = new WalEdits();
// Make a mutation without timestamps
Mutation m = new Mutation("row");
m.put("cf", "cq1", "value");
m.put("cf", "cq2", "value");
m.put("cf", "cq3", "value");
m.put("cf", "cq4", "value");
m.put("cf", "cq5", "value");
// This Mutation "came" from a system called "peer"
m.addReplicationSource(peerName);
// Make it a TMutation
TMutation tMutation = m.toThrift();
// And then make a ServerMutation from the TMutation, adding in our systemTimestamp
ServerMutation sMutation = new ServerMutation(tMutation);
sMutation.setSystemTimestamp(systemTimestamp);
// Serialize the ServerMutation (what AccumuloReplicaSystem will be doing)
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream out = new DataOutputStream(baos);
key.write(out);
out.writeInt(1);
sMutation.write(out);
out.close();
// Add it to our "input" to BatchWriterReplicationReplayer
edits.addToEdits(ByteBuffer.wrap(baos.toByteArray()));
Mutation expectedMutation = new Mutation("row");
expectedMutation.put("cf", "cq1", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq2", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq3", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq4", sMutation.getSystemTimestamp(), "value");
expectedMutation.put("cf", "cq5", sMutation.getSystemTimestamp(), "value");
// We expect our peer name to be preserved in the mutation that gets written
expectedMutation.addReplicationSource(peerName);
expect(conf.getAsBytes(Property.TSERV_REPLICATION_BW_REPLAYER_MEMORY)).andReturn(bwCfg.getMaxMemory());
expect(conn.createBatchWriter(tableName, bwCfg)).andReturn(bw);
bw.addMutations(Lists.newArrayList(expectedMutation));
expectLastCall().once();
bw.close();
expectLastCall().once();
replay(conn, conf, bw);
replayer.replicateLog(context, tableName, edits);
}
Aggregations