use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class KerberosReplicationIT method dataReplicatedToCorrectTable.
@Test
public void dataReplicatedToCorrectTable() throws Exception {
// Login as the root user
final UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().toURI().toString());
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
log.info("testing {}", ugi);
final KerberosToken token = new KerberosToken();
final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);
ClusterUser replicationUser = kdc.getClientPrincipal(0);
// Create user for replication to the peer
peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);
primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME, replicationUser.getPrincipal());
primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME, replicationUser.getKeytab().getAbsolutePath());
// ...peer = AccumuloReplicaSystem,instanceName,zookeepers
primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(), peerConn.getInstance().getZooKeepers())));
String primaryTable1 = "primary", peerTable1 = "peer";
// Create tables
primaryConn.tableOperations().create(primaryTable1);
String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
Assert.assertNotNull(masterTableId1);
peerConn.tableOperations().create(peerTable1);
String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
Assert.assertNotNull(peerTableId1);
// Grant write permission
peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1, TablePermission.WRITE);
// Replicate this table to the peerClusterName in a table with the peerTableId table id
primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(), "true");
primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);
// Write some data to table1
BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
long masterTable1Records = 0l;
for (int rows = 0; rows < 2500; rows++) {
Mutation m = new Mutation(primaryTable1 + rows);
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
masterTable1Records++;
}
bw.addMutation(m);
}
bw.close();
log.info("Wrote all data to primary cluster");
Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);
// Restart the tserver to force a close on the WAL
for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
primary.killProcess(ServerType.TABLET_SERVER, proc);
}
primary.exec(TabletServer.class);
log.info("Restarted the tserver");
// Read the data -- the tserver is back up and running and tablets are assigned
Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());
// Wait for both tables to be replicated
log.info("Waiting for {} for {}", filesFor1, primaryTable1);
primaryConn.replicationOperations().drain(primaryTable1, filesFor1);
long countTable = 0l;
for (Entry<Key, Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
countTable++;
Assert.assertTrue("Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " " + entry.getValue(), entry.getKey().getRow().toString().startsWith(primaryTable1));
}
log.info("Found {} records in {}", countTable, peerTable1);
Assert.assertEquals(masterTable1Records, countTable);
return null;
}
});
}
Aggregations