use of org.apache.accumulo.miniclusterImpl.ProcessReference in project accumulo by apache.
the class SuspendedTabletsIT method crashAndResumeTserver.
@Test
public void crashAndResumeTserver() throws Exception {
// Run the test body. When we get to the point where we need a tserver to go away, get rid of it
// via crashing
suspensionTestBody((ctx, locs, count) -> {
// Exclude the tablet server hosting the metadata table from the list and only
// kill tablet servers that are not hosting the metadata table.
List<ProcessReference> procs = getCluster().getProcesses().get(ServerType.TABLET_SERVER).stream().filter(p -> !metadataTserverProcess.equals(p)).collect(Collectors.toList());
Collections.shuffle(procs, random);
assertEquals("Not enough tservers exist", TSERVERS - 1, procs.size());
assertTrue("Attempting to kill more tservers (" + count + ") than exist in the cluster (" + procs.size() + ")", procs.size() >= count);
for (int i = 0; i < count; ++i) {
ProcessReference pr = procs.get(i);
log.info("Crashing {}", pr.getProcess());
getCluster().killProcess(ServerType.TABLET_SERVER, pr);
}
});
}
use of org.apache.accumulo.miniclusterImpl.ProcessReference in project accumulo by apache.
the class SuspendedTabletsIT method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
// Wait for all tablet servers to come online and then choose the first server in the list.
// Update the balancer configuration to assign all metadata tablets to that server (and
// everything else to other servers).
InstanceOperations iops = client.instanceOperations();
List<String> tservers = iops.getTabletServers();
while (tservers == null || tservers.size() < 1) {
Thread.sleep(1000L);
tservers = client.instanceOperations().getTabletServers();
}
HostAndPort metadataServer = HostAndPort.fromString(tservers.get(0));
log.info("Configuring balancer to assign all metadata tablets to {}", metadataServer);
iops.setProperty(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + MetadataTable.NAME, metadataServer.toString());
// Wait for the balancer to assign all metadata tablets to the chosen server.
ClientContext ctx = (ClientContext) client;
TabletLocations tl = TabletLocations.retrieve(ctx, MetadataTable.NAME, RootTable.NAME);
while (tl.hosted.keySet().size() != 1 || !tl.hosted.containsKey(metadataServer)) {
log.info("Metadata tablets are not hosted on the correct server. Waiting for balancer...");
Thread.sleep(1000L);
tl = TabletLocations.retrieve(ctx, MetadataTable.NAME, RootTable.NAME);
}
log.info("Metadata tablets are now hosted on {}", metadataServer);
}
// Since we started only a single tablet server, we know it's the one hosting the
// metadata table. Save its process reference off so we can exclude it later when
// killing tablet servers.
Collection<ProcessReference> procs = getCluster().getProcesses().get(ServerType.TABLET_SERVER);
assertEquals("Expected a single tserver process", 1, procs.size());
metadataTserverProcess = procs.iterator().next();
// Update the number of tservers and start the new tservers.
getCluster().getConfig().setNumTservers(TSERVERS);
getCluster().start();
}
use of org.apache.accumulo.miniclusterImpl.ProcessReference in project accumulo by apache.
the class ZookeeperRestartIT method test.
@Test
public void test() throws Exception {
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
c.tableOperations().create("test_ingest");
try (BatchWriter bw = c.createBatchWriter("test_ingest")) {
Mutation m = new Mutation("row");
m.put("cf", "cq", "value");
bw.addMutation(m);
}
// kill zookeeper
for (ProcessReference proc : cluster.getProcesses().get(ServerType.ZOOKEEPER)) cluster.killProcess(ServerType.ZOOKEEPER, proc);
// give the servers time to react
sleepUninterruptibly(1, TimeUnit.SECONDS);
// start zookeeper back up
cluster.start();
// use the tservers
try (Scanner s = c.createScanner("test_ingest", Authorizations.EMPTY)) {
Iterator<Entry<Key, Value>> i = s.iterator();
assertTrue(i.hasNext());
assertEquals("row", i.next().getKey().getRow().toString());
assertFalse(i.hasNext());
// use the manager
c.tableOperations().delete("test_ingest");
}
}
}
use of org.apache.accumulo.miniclusterImpl.ProcessReference in project accumulo by apache.
the class ExistingMacIT method testExistingInstance.
@Test
public void testExistingInstance() throws Exception {
AccumuloClient client = getCluster().createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
client.tableOperations().create("table1");
try (BatchWriter bw = client.createBatchWriter("table1")) {
Mutation m1 = new Mutation("00081");
m1.put("math", "sqroot", "9");
m1.put("math", "sq", "6560");
bw.addMutation(m1);
}
client.tableOperations().flush("table1", null, null, true);
// TODO use constants
client.tableOperations().flush(MetadataTable.NAME, null, null, true);
client.tableOperations().flush(RootTable.NAME, null, null, true);
Set<Entry<ServerType, Collection<ProcessReference>>> procs = getCluster().getProcesses().entrySet();
for (Entry<ServerType, Collection<ProcessReference>> entry : procs) {
if (entry.getKey() == ServerType.ZOOKEEPER)
continue;
for (ProcessReference pr : entry.getValue()) getCluster().killProcess(entry.getKey(), pr);
}
final DefaultConfiguration defaultConfig = DefaultConfiguration.getInstance();
final long zkTimeout = ConfigurationTypeHelper.getTimeInMillis(getCluster().getConfig().getSiteConfig().get(Property.INSTANCE_ZK_TIMEOUT.getKey()));
ZooReaderWriter zrw = new ZooReaderWriter(getCluster().getZooKeepers(), (int) zkTimeout, defaultConfig.get(Property.INSTANCE_SECRET));
final String zInstanceRoot = Constants.ZROOT + "/" + client.instanceOperations().getInstanceId();
while (!AccumuloStatus.isAccumuloOffline(zrw, zInstanceRoot)) {
log.debug("Accumulo services still have their ZK locks held");
Thread.sleep(1000);
}
File hadoopConfDir = createTestDir(ExistingMacIT.class.getSimpleName() + "_hadoop_conf");
FileUtils.deleteQuietly(hadoopConfDir);
assertTrue(hadoopConfDir.mkdirs());
createEmptyConfig(new File(hadoopConfDir, "core-site.xml"));
createEmptyConfig(new File(hadoopConfDir, "hdfs-site.xml"));
File testDir2 = createTestDir(ExistingMacIT.class.getSimpleName() + "_2");
FileUtils.deleteQuietly(testDir2);
MiniAccumuloConfigImpl macConfig2 = new MiniAccumuloConfigImpl(testDir2, "notused");
macConfig2.useExistingInstance(new File(getCluster().getConfig().getConfDir(), "accumulo.properties"), hadoopConfDir);
MiniAccumuloClusterImpl accumulo2 = new MiniAccumuloClusterImpl(macConfig2);
accumulo2.start();
client = accumulo2.createAccumuloClient("root", new PasswordToken(ROOT_PASSWORD));
try (Scanner scanner = client.createScanner("table1", Authorizations.EMPTY)) {
int sum = 0;
for (Entry<Key, Value> entry : scanner) {
sum += Integer.parseInt(entry.getValue().toString());
}
assertEquals(6569, sum);
}
accumulo2.stop();
}
use of org.apache.accumulo.miniclusterImpl.ProcessReference in project accumulo by apache.
the class VerifySerialRecoveryIT method testSerializedRecovery.
@Test
public void testSerializedRecovery() throws Exception {
// make a table with many splits
String tableName = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
// create splits
SortedSet<Text> splits = new TreeSet<>();
for (int i = 0; i < 200; i++) {
splits.add(new Text(randomHex(8)));
}
// create table with config
NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
c.tableOperations().create(tableName, ntc);
// load data to give the recovery something to do
try (BatchWriter bw = c.createBatchWriter(tableName)) {
for (int i = 0; i < 50000; i++) {
Mutation m = new Mutation(randomHex(8));
m.put("", "", "");
bw.addMutation(m);
}
}
// kill the tserver
for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
final ProcessInfo ts = cluster.exec(TabletServer.class);
// wait for recovery
Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
ts.getProcess().waitFor();
String result = ts.readStdOut();
for (String line : result.split("\n")) {
System.out.println(line);
}
// walk through the output, verifying that only a single normal recovery was running at one
// time
boolean started = false;
int recoveries = 0;
var pattern = Pattern.compile(".*recovered \\d+ mutations creating \\d+ entries from \\d+ walogs.*");
for (String line : result.split("\n")) {
// ignore metadata tables
if (line.contains("!0") || line.contains("+r"))
continue;
if (line.contains("recovering data from walogs")) {
assertFalse(started);
started = true;
recoveries++;
}
if (pattern.matcher(line).matches()) {
assertTrue(started);
started = false;
}
}
assertFalse(started);
assertTrue(recoveries > 0);
}
}
Aggregations