use of org.apache.accumulo.miniclusterImpl.ZooKeeperBindException in project accumulo by apache.
the class ConfigurableMacBase method setUp.
@Before
public void setUp() throws Exception {
createMiniAccumulo();
Exception lastException = null;
for (int i = 0; i < 3; i++) {
try {
cluster.start();
return;
} catch (ZooKeeperBindException e) {
lastException = e;
log.warn("Failed to start MiniAccumuloCluster, assumably due to ZooKeeper issues", lastException);
Thread.sleep(3000);
createMiniAccumulo();
}
}
throw new RuntimeException("Failed to start MiniAccumuloCluster after three attempts", lastException);
}
use of org.apache.accumulo.miniclusterImpl.ZooKeeperBindException in project accumulo by apache.
the class CyclicReplicationIT method dataIsNotOverReplicated.
@Test
public void dataIsNotOverReplicated() throws Exception {
File manager1Dir = createTestDir("manager1"), manager2Dir = createTestDir("manager2");
String password = "password";
MiniAccumuloConfigImpl manager1Cfg;
MiniAccumuloClusterImpl manager1Cluster;
while (true) {
manager1Cfg = new MiniAccumuloConfigImpl(manager1Dir, password);
manager1Cfg.setNumTservers(1);
manager1Cfg.setInstanceName("manager1");
// Set up SSL if needed
ConfigurableMacBase.configureForEnvironment(manager1Cfg, ConfigurableMacBase.getSslDir(manager1Dir));
manager1Cfg.setProperty(Property.REPLICATION_NAME, manager1Cfg.getInstanceName());
manager1Cfg.setProperty(Property.TSERV_WAL_MAX_SIZE, "5M");
manager1Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
manager1Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
manager1Cfg.setProperty(Property.MANAGER_REPLICATION_SCAN_INTERVAL, "1s");
manager1Cluster = new MiniAccumuloClusterImpl(manager1Cfg);
setCoreSite(manager1Cluster);
try {
manager1Cluster.start();
break;
} catch (ZooKeeperBindException e) {
log.warn("Failed to start ZooKeeper on {}, will retry", manager1Cfg.getZooKeeperPort());
}
}
MiniAccumuloConfigImpl manager2Cfg;
MiniAccumuloClusterImpl manager2Cluster;
while (true) {
manager2Cfg = new MiniAccumuloConfigImpl(manager2Dir, password);
manager2Cfg.setNumTservers(1);
manager2Cfg.setInstanceName("manager2");
// Set up SSL if needed. Need to share the same SSL truststore as manager1
this.updatePeerConfigFromPrimary(manager1Cfg, manager2Cfg);
manager2Cfg.setProperty(Property.REPLICATION_NAME, manager2Cfg.getInstanceName());
manager2Cfg.setProperty(Property.TSERV_WAL_MAX_SIZE, "5M");
manager2Cfg.setProperty(Property.REPLICATION_THREADCHECK, "5m");
manager2Cfg.setProperty(Property.REPLICATION_WORK_ASSIGNMENT_SLEEP, "1s");
manager2Cfg.setProperty(Property.MANAGER_REPLICATION_SCAN_INTERVAL, "1s");
manager2Cluster = new MiniAccumuloClusterImpl(manager2Cfg);
setCoreSite(manager2Cluster);
try {
manager2Cluster.start();
break;
} catch (ZooKeeperBindException e) {
log.warn("Failed to start ZooKeeper on {}, will retry", manager2Cfg.getZooKeeperPort());
}
}
try {
AccumuloClient clientManager1 = manager1Cluster.createAccumuloClient("root", new PasswordToken(password)), clientManager2 = manager2Cluster.createAccumuloClient("root", new PasswordToken(password));
String manager1UserName = "manager1", manager1Password = "foo";
String manager2UserName = "manager2", manager2Password = "bar";
String manager1Table = manager1Cluster.getInstanceName(), manager2Table = manager2Cluster.getInstanceName();
clientManager1.securityOperations().createLocalUser(manager1UserName, new PasswordToken(manager1Password));
clientManager2.securityOperations().createLocalUser(manager2UserName, new PasswordToken(manager2Password));
// Configure the credentials we should use to authenticate ourselves to the peer for
// replication
clientManager1.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + manager2Cluster.getInstanceName(), manager2UserName);
clientManager1.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + manager2Cluster.getInstanceName(), manager2Password);
clientManager2.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + manager1Cluster.getInstanceName(), manager1UserName);
clientManager2.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + manager1Cluster.getInstanceName(), manager1Password);
clientManager1.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + manager2Cluster.getInstanceName(), ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(manager2Cluster.getInstanceName(), manager2Cluster.getZooKeepers())));
clientManager2.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + manager1Cluster.getInstanceName(), ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(manager1Cluster.getInstanceName(), manager1Cluster.getZooKeepers())));
clientManager1.tableOperations().create(manager1Table, new NewTableConfiguration().withoutDefaultIterators());
String manager1TableId = clientManager1.tableOperations().tableIdMap().get(manager1Table);
assertNotNull(manager1TableId);
clientManager2.tableOperations().create(manager2Table, new NewTableConfiguration().withoutDefaultIterators());
String manager2TableId = clientManager2.tableOperations().tableIdMap().get(manager2Table);
assertNotNull(manager2TableId);
// Replicate manager1 in the manager1 cluster to manager2 in the manager2 cluster
clientManager1.tableOperations().setProperty(manager1Table, Property.TABLE_REPLICATION.getKey(), "true");
clientManager1.tableOperations().setProperty(manager1Table, Property.TABLE_REPLICATION_TARGET.getKey() + manager2Cluster.getInstanceName(), manager2TableId);
// Replicate manager2 in the manager2 cluster to manager1 in the manager2 cluster
clientManager2.tableOperations().setProperty(manager2Table, Property.TABLE_REPLICATION.getKey(), "true");
clientManager2.tableOperations().setProperty(manager2Table, Property.TABLE_REPLICATION_TARGET.getKey() + manager1Cluster.getInstanceName(), manager1TableId);
// Give our replication user the ability to write to the respective table
clientManager1.securityOperations().grantTablePermission(manager1UserName, manager1Table, TablePermission.WRITE);
clientManager2.securityOperations().grantTablePermission(manager2UserName, manager2Table, TablePermission.WRITE);
IteratorSetting summingCombiner = new IteratorSetting(50, SummingCombiner.class);
SummingCombiner.setEncodingType(summingCombiner, Type.STRING);
SummingCombiner.setCombineAllColumns(summingCombiner, true);
// Set a combiner on both instances that will sum multiple values
// We can use this to verify that the mutation was not sent multiple times
clientManager1.tableOperations().attachIterator(manager1Table, summingCombiner);
clientManager2.tableOperations().attachIterator(manager2Table, summingCombiner);
// Write a single entry
try (BatchWriter bw = clientManager1.createBatchWriter(manager1Table)) {
Mutation m = new Mutation("row");
m.put("count", "", "1");
bw.addMutation(m);
}
Set<String> files = clientManager1.replicationOperations().referencedFiles(manager1Table);
log.info("Found {} that need replication from manager1", files);
// Kill and restart the tserver to close the WAL on manager1
for (ProcessReference proc : manager1Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
manager1Cluster.killProcess(ServerType.TABLET_SERVER, proc);
}
manager1Cluster.exec(TabletServer.class);
log.info("Restarted tserver on manager1");
// Try to avoid ACCUMULO-2964
Thread.sleep(1000);
// Sanity check that the element is there on manager1
Entry<Key, Value> entry;
try (Scanner s = clientManager1.createScanner(manager1Table, Authorizations.EMPTY)) {
entry = Iterables.getOnlyElement(s);
assertEquals("1", entry.getValue().toString());
// Wait for this table to replicate
clientManager1.replicationOperations().drain(manager1Table, files);
Thread.sleep(5000);
}
// Check that the element made it to manager2 only once
try (Scanner s = clientManager2.createScanner(manager2Table, Authorizations.EMPTY)) {
entry = Iterables.getOnlyElement(s);
assertEquals("1", entry.getValue().toString());
// Wait for manager2 to finish replicating it back
files = clientManager2.replicationOperations().referencedFiles(manager2Table);
// Kill and restart the tserver to close the WAL on manager2
for (ProcessReference proc : manager2Cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
manager2Cluster.killProcess(ServerType.TABLET_SERVER, proc);
}
manager2Cluster.exec(TabletServer.class);
// Try to avoid ACCUMULO-2964
Thread.sleep(1000);
}
// Check that the element made it to manager2 only once
try (Scanner s = clientManager2.createScanner(manager2Table, Authorizations.EMPTY)) {
entry = Iterables.getOnlyElement(s);
assertEquals("1", entry.getValue().toString());
clientManager2.replicationOperations().drain(manager2Table, files);
Thread.sleep(5000);
}
// Verify that the entry wasn't sent back to manager1
try (Scanner s = clientManager1.createScanner(manager1Table, Authorizations.EMPTY)) {
entry = Iterables.getOnlyElement(s);
assertEquals("1", entry.getValue().toString());
}
} finally {
manager1Cluster.stop();
manager2Cluster.stop();
}
}
Aggregations