use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method consumedButNotSentDataShouldBeRecorded.
@Test
public void consumedButNotSentDataShouldBeRecorded() throws Exception {
Client replClient = createMock(Client.class);
AccumuloReplicaSystem ars = createMock(AccumuloReplicaSystem.class);
WalEdits edits = new WalEdits(Collections.emptyList());
WalReplication walReplication = new WalReplication(edits, 0, 5, 0);
ReplicationTarget target = new ReplicationTarget("peer", "2", Table.ID.of("1"));
DataInputStream input = null;
Path p = new Path("/accumulo/wals/tserver+port/" + UUID.randomUUID().toString());
Status status = null;
long sizeLimit = Long.MAX_VALUE;
String remoteTableId = target.getRemoteIdentifier();
TCredentials tcreds = null;
Set<Integer> tids = new HashSet<>();
WalClientExecReturn walClientExec = ars.new WalClientExecReturn(target, input, p, status, sizeLimit, remoteTableId, tcreds, tids);
expect(ars.getWalEdits(target, input, p, status, sizeLimit, tids)).andReturn(walReplication);
replay(replClient, ars);
ReplicationStats stats = walClientExec.execute(replClient);
verify(replClient, ars);
Assert.assertEquals(new ReplicationStats(0l, 0l, 5l), stats);
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method testUserKeytab.
@Test
public void testUserKeytab() throws Exception {
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ReplicationTarget target = new ReplicationTarget("peer", "peer_table", Table.ID.of("1"));
String user = "user", keytab = "/etc/security/keytabs/replication.keytab";
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_PEER_USER.getKey() + target.getPeerName(), user);
confMap.put(Property.REPLICATION_PEER_KEYTAB.getKey() + target.getPeerName(), keytab);
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
assertEquals(user, ars.getPrincipal(conf, target));
assertEquals(keytab, ars.getKeytab(conf, target));
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method endOfFileExceptionOnOpenWalImpliesMoreReplication.
@Test
public void endOfFileExceptionOnOpenWalImpliesMoreReplication() throws Exception {
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
// Setting the file to be closed with the infinite end implies that we need to bump the begin up to Long.MAX_VALUE
// If it were still open, more data could be appended that we need to process
Status status = Status.newBuilder().setBegin(100).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(new byte[0]));
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, Long.MAX_VALUE, new HashSet<>());
// We stopped because we got to the end of the file
Assert.assertEquals(0, repl.entriesConsumed);
Assert.assertEquals(0, repl.walEdits.getEditsSize());
Assert.assertEquals(0, repl.sizeInRecords);
Assert.assertEquals(0, repl.sizeInBytes);
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class AccumuloReplicaSystemTest method restartInFileKnowsAboutPreviousTableDefines.
@Test
public void restartInFileKnowsAboutPreviousTableDefines() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// What is seq used for?
key.seq = 1l;
/*
* Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
* look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
*/
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
key.tid = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.tid = 1;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
dos.close();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
HashSet<Integer> tids = new HashSet<>();
// Only consume the first mutation, not the second
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
// We stopped because we got to the end of the file
Assert.assertEquals(2, repl.entriesConsumed);
Assert.assertEquals(1, repl.walEdits.getEditsSize());
Assert.assertEquals(1, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
status = Status.newBuilder(status).setBegin(2).build();
// Consume the rest of the mutations
repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
// We stopped because we got to the end of the file
Assert.assertEquals(1, repl.entriesConsumed);
Assert.assertEquals(1, repl.walEdits.getEditsSize());
Assert.assertEquals(1, repl.sizeInRecords);
Assert.assertNotEquals(0, repl.sizeInBytes);
}
use of org.apache.accumulo.core.replication.ReplicationTarget in project accumulo by apache.
the class DistributedWorkQueueWorkAssigner method createWork.
/**
* Scan over the {@link WorkSection} of the replication table adding work for entries that have data to replicate and have not already been queued.
*/
protected void createWork() {
// Create a scanner over the replication table's order entries
Scanner s;
try {
s = ReplicationTable.getScanner(conn);
} catch (ReplicationTableOfflineException e) {
// no work to do; replication is off
return;
}
OrderSection.limit(s);
Text buffer = new Text();
for (Entry<Key, Value> orderEntry : s) {
// to add more work entries
if (getQueueSize() > maxQueueSize) {
log.warn("Queued replication work exceeds configured maximum ({}), sleeping to allow work to occur", maxQueueSize);
return;
}
String file = OrderSection.getFile(orderEntry.getKey(), buffer);
OrderSection.getTableId(orderEntry.getKey(), buffer);
String sourceTableId = buffer.toString();
log.info("Determining if {} from {} needs to be replicated", file, sourceTableId);
Scanner workScanner;
try {
workScanner = ReplicationTable.getScanner(conn);
} catch (ReplicationTableOfflineException e) {
log.warn("Replication table is offline. Will retry...");
sleepUninterruptibly(5, TimeUnit.SECONDS);
return;
}
WorkSection.limit(workScanner);
workScanner.setRange(Range.exact(file));
int newReplicationTasksSubmitted = 0, workEntriesRead = 0;
// For a file, we can concurrently replicate it to multiple targets
for (Entry<Key, Value> workEntry : workScanner) {
workEntriesRead++;
Status status;
try {
status = StatusUtil.fromValue(workEntry.getValue());
} catch (InvalidProtocolBufferException e) {
log.warn("Could not deserialize protobuf from work entry for {} to {}, will retry", file, ReplicationTarget.from(workEntry.getKey().getColumnQualifier()), e);
continue;
}
// Get the ReplicationTarget for this Work record
ReplicationTarget target = WorkSection.getTarget(workEntry.getKey(), buffer);
// Get the file (if any) currently being replicated to the given peer for the given source table
Collection<String> keysBeingReplicated = getQueuedWork(target);
Path p = new Path(file);
String filename = p.getName();
String key = DistributedWorkQueueWorkAssignerHelper.getQueueKey(filename, target);
if (!shouldQueueWork(target)) {
if (!isWorkRequired(status) && keysBeingReplicated.contains(key)) {
log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName());
this.removeQueuedWork(target, key);
}
continue;
}
// If there is work to do
if (isWorkRequired(status)) {
if (queueWork(p, target)) {
newReplicationTasksSubmitted++;
}
} else {
log.debug("Not queueing work for {} to {} because {} doesn't need replication", file, target, ProtobufUtil.toString(status));
if (keysBeingReplicated.contains(key)) {
log.debug("Removing {} from replication state to {} because replication is complete", key, target.getPeerName());
this.removeQueuedWork(target, key);
}
}
}
log.debug("Read {} replication entries from the WorkSection of the replication table", workEntriesRead);
log.info("Assigned {} replication work entries for {}", newReplicationTasksSubmitted, file);
}
}
Aggregations