use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class UnorderedWorkAssignerReplicationIT method dataWasReplicatedToThePeer.
@Test
public void dataWasReplicatedToThePeer() throws Exception {
MiniAccumuloConfigImpl peerCfg = new MiniAccumuloConfigImpl(createTestDir(this.getClass().getName() + "_" + this.testName.getMethodName() + "_peer"), ROOT_PASSWORD);
peerCfg.setNumTservers(1);
peerCfg.setInstanceName("peer");
updatePeerConfigFromPrimary(getCluster().getConfig(), peerCfg);
peerCfg.setProperty(Property.REPLICATION_NAME, "peer");
MiniAccumuloClusterImpl peerCluster = new MiniAccumuloClusterImpl(peerCfg);
peerCluster.start();
try {
final Connector connMaster = getConnector();
final Connector connPeer = peerCluster.getConnector("root", new PasswordToken(ROOT_PASSWORD));
ReplicationTable.setOnline(connMaster);
String peerUserName = "peer", peerPassword = "foo";
String peerClusterName = "peer";
connPeer.securityOperations().createLocalUser(peerUserName, new PasswordToken(peerPassword));
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + peerClusterName, peerUserName);
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEER_PASSWORD.getKey() + peerClusterName, peerPassword);
// ...peer = AccumuloReplicaSystem,instanceName,zookeepers
connMaster.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + peerClusterName, ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class, AccumuloReplicaSystem.buildConfiguration(peerCluster.getInstanceName(), peerCluster.getZooKeepers())));
final String masterTable = "master", peerTable = "peer";
connMaster.tableOperations().create(masterTable);
String masterTableId = connMaster.tableOperations().tableIdMap().get(masterTable);
Assert.assertNotNull(masterTableId);
connPeer.tableOperations().create(peerTable);
String peerTableId = connPeer.tableOperations().tableIdMap().get(peerTable);
Assert.assertNotNull(peerTableId);
connPeer.securityOperations().grantTablePermission(peerUserName, peerTable, TablePermission.WRITE);
// Replicate this table to the peerClusterName in a table with the peerTableId table id
connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION.getKey(), "true");
connMaster.tableOperations().setProperty(masterTable, Property.TABLE_REPLICATION_TARGET.getKey() + peerClusterName, peerTableId);
// Wait for zookeeper updates (configuration) to propagate
sleepUninterruptibly(3, TimeUnit.SECONDS);
// Write some data to table1
BatchWriter bw = connMaster.createBatchWriter(masterTable, new BatchWriterConfig());
for (int rows = 0; rows < 5000; rows++) {
Mutation m = new Mutation(Integer.toString(rows));
for (int cols = 0; cols < 100; cols++) {
String value = Integer.toString(cols);
m.put(value, "", value);
}
bw.addMutation(m);
}
bw.close();
log.info("Wrote all data to master cluster");
final Set<String> filesNeedingReplication = connMaster.replicationOperations().referencedFiles(masterTable);
for (ProcessReference proc : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
cluster.killProcess(ServerType.TABLET_SERVER, proc);
}
cluster.exec(TabletServer.class);
log.info("TabletServer restarted");
Iterators.size(ReplicationTable.getScanner(connMaster).iterator());
log.info("TabletServer is online");
log.info("");
log.info("Fetching metadata records:");
for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
Future<Boolean> future = executor.submit(new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
connMaster.replicationOperations().drain(masterTable, filesNeedingReplication);
log.info("Drain completed");
return true;
}
});
long timeoutSeconds = timeoutFactor * 30;
try {
future.get(timeoutSeconds, TimeUnit.SECONDS);
} catch (TimeoutException e) {
future.cancel(true);
Assert.fail("Drain did not finish within " + timeoutSeconds + " seconds");
}
log.info("drain completed");
log.info("");
log.info("Fetching metadata records:");
for (Entry<Key, Value> kv : connMaster.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
if (ReplicationSection.COLF.equals(kv.getKey().getColumnFamily())) {
log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
} else {
log.info("{} {}", kv.getKey().toStringNoTruncate(), kv.getValue());
}
}
log.info("");
log.info("Fetching replication records:");
for (Entry<Key, Value> kv : ReplicationTable.getScanner(connMaster)) {
log.info("{} {}", kv.getKey().toStringNoTruncate(), ProtobufUtil.toString(Status.parseFrom(kv.getValue().get())));
}
try (Scanner master = connMaster.createScanner(masterTable, Authorizations.EMPTY);
Scanner peer = connPeer.createScanner(peerTable, Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> masterIter = master.iterator(), peerIter = peer.iterator();
Entry<Key, Value> masterEntry = null, peerEntry = null;
while (masterIter.hasNext() && peerIter.hasNext()) {
masterEntry = masterIter.next();
peerEntry = peerIter.next();
Assert.assertEquals(masterEntry.getKey() + " was not equal to " + peerEntry.getKey(), 0, masterEntry.getKey().compareTo(peerEntry.getKey(), PartialKey.ROW_COLFAM_COLQUAL_COLVIS));
Assert.assertEquals(masterEntry.getValue(), peerEntry.getValue());
}
log.info("Last master entry: {}", masterEntry);
log.info("Last peer entry: {}", peerEntry);
Assert.assertFalse("Had more data to read from the master", masterIter.hasNext());
Assert.assertFalse("Had more data to read from the peer", peerIter.hasNext());
}
} finally {
peerCluster.stop();
}
}
use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class ZookeeperRestartIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
c.tableOperations().create("test_ingest");
BatchWriter bw = c.createBatchWriter("test_ingest", null);
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
bw.close();
// kill zookeeper
for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER)) cluster.killProcess(ServerType.ZOOKEEPER, proc);
// give the servers time to react
sleepUninterruptibly(1, TimeUnit.SECONDS);
// start zookeeper back up
cluster.start();
// use the tservers
try (Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> i = s.iterator();
assertTrue(i.hasNext());
assertEquals("row", i.next().getKey().getRow().toString());
assertFalse(i.hasNext());
// use the master
c.tableOperations().delete("test_ingest");
}
}
use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class ExistingMacIT method testExistingInstance.
@Test
public void testExistingInstance() throws Exception {
Connector conn = getCluster().getConnector("root", new PasswordToken(ROOT_PASSWORD));
conn.tableOperations().create("table1");
BatchWriter bw = conn.createBatchWriter("table1", new BatchWriterConfig());
Mutation m1 = new Mutation("00081");
m1.put("math", "sqroot", "9");
m1.put("math", "sq", "6560");
bw.addMutation(m1);
bw.close();
conn.tableOperations().flush("table1", null, null, true);
// TOOD use constants
conn.tableOperations().flush(MetadataTable.NAME, null, null, true);
conn.tableOperations().flush(RootTable.NAME, null, null, true);
Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
if (entry.getKey() == ServerType.ZOOKEEPER)
continue;
for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
}
final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
IZooReaderWriter zrw = new ZooReaderWriterFactory().getZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
final String zInstanceRoot = Constants.ZROOT + "/" + conn.getInstance().getInstanceID();
while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
log.debug("Accumulo services still have their ZK locks held");
Thread.sleep(1000);
}
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo-site.xml"), hadoopConfDir);
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
accumulo2.start();
conn = accumulo2.getConnector("root", new PasswordToken(ROOT_PASSWORD));
try (Scanner scanner = conn.createScanner("table1", Authorizations.EMPTY)) {
int sum = 0;
for (Entry<Key, Value> entry : scanner) {
sum += Integer.parseInt(entry.getValue().toString());
}
Assert.assertEquals(6569, sum);
}
accumulo2.stop();
}
use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class GarbageCollectorIT method dontGCRootLog.
@Test
public void dontGCRootLog() throws Exception {
killMacGc();
// dirty metadata
Connector c = getConnector();
String table = getUniqueNames(1)[0];
c.tableOperations().create(table);
// let gc run for a bit
cluster.start();
sleepUninterruptibly(20, TimeUnit.SECONDS);
killMacGc();
// kill tservers
for (ProcessReference ref : cluster.getProcesses().get(ServerType.TABLET_SERVER)) {
cluster.killProcess(ServerType.TABLET_SERVER, ref);
}
// run recovery
cluster.start();
// did it recover?
try (Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
Iterators.size(scanner.iterator());
}
}
use of org.apache.accumulo.minicluster.impl.ProcessReference in project accumulo by apache.
the class ThriftServerBindsBeforeZooKeeperLockIT method testMonitorService.
@Test
public void testMonitorService() throws Exception {
final MiniAccumuloClusterImpl cluster = (MiniAccumuloClusterImpl) getCluster();
Collection<ProcessReference> monitors = cluster.getProcesses().get(ServerType.MONITOR);
// Need to start one monitor and let it become active.
if (null == monitors || 0 == monitors.size()) {
getClusterControl().start(ServerType.MONITOR, "localhost");
}
final ZooKeeperInstance inst = new ZooKeeperInstance(cluster.getClientConfig());
while (true) {
try {
MonitorUtil.getLocation(inst);
break;
} catch (Exception e) {
LOG.debug("Failed to find active monitor location, retrying", e);
Thread.sleep(1000);
}
}
LOG.debug("Found active monitor");
while (true) {
int freePort = PortUtils.getRandomFreePort();
String monitorUrl = "http://localhost:" + freePort;
Process monitor = null;
try {
LOG.debug("Starting standby monitor on {}", freePort);
monitor = startProcess(cluster, ServerType.MONITOR, freePort);
while (true) {
URL url = new URL(monitorUrl);
try {
HttpURLConnection cnxn = (HttpURLConnection) url.openConnection();
final int responseCode = cnxn.getResponseCode();
String errorText;
// This is our "assertion", but we want to re-check it if it's not what we expect
if (HttpURLConnection.HTTP_OK == responseCode) {
return;
} else {
errorText = FunctionalTestUtils.readAll(cnxn.getErrorStream());
}
LOG.debug("Unexpected responseCode and/or error text, will retry: '{}' '{}'", responseCode, errorText);
} catch (Exception e) {
LOG.debug("Caught exception trying to fetch monitor info", e);
}
// Wait before trying again
Thread.sleep(1000);
// died trying to bind it. Pick a new port and restart it in that case.
if (!monitor.isAlive()) {
freePort = PortUtils.getRandomFreePort();
monitorUrl = "http://localhost:" + freePort;
LOG.debug("Monitor died, restarting it listening on {}", freePort);
monitor = startProcess(cluster, ServerType.MONITOR, freePort);
}
}
} finally {
if (null != monitor) {
monitor.destroyForcibly();
}
}
}
}
Aggregations