use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.ProcessInfo in project accumulo by apache.
the class VerifySerialRecoveryIT method testSerializedRecovery.
@Test
public void testSerializedRecovery() throws Exception {
// make a table with many splits
String tableName = getUniqueNames(1)[0];
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
// create splits
SortedSet<Text> splits = new TreeSet<>();
for (int i = 0; i < 200; i++) {
splits.add(new Text(randomHex(8)));
}
// create table with config
NewTableConfiguration ntc = new NewTableConfiguration().withSplits(splits);
c.tableOperations().create(tableName, ntc);
// load data to give the recovery something to do
try (BatchWriter bw = c.createBatchWriter(tableName)) {
for (int i = 0; i < 50000; i++) {
Mutation m = new Mutation(randomHex(8));
m.put("", "", "");
bw.addMutation(m);
}
}
// kill the tserver
for (ProcessReference ref : getCluster().getProcesses().get(ServerType.TABLET_SERVER)) getCluster().killProcess(ServerType.TABLET_SERVER, ref);
final ProcessInfo ts = cluster.exec(TabletServer.class);
// wait for recovery
Iterators.size(c.createScanner(tableName, Authorizations.EMPTY).iterator());
assertEquals(0, cluster.exec(Admin.class, "stopAll").getProcess().waitFor());
ts.getProcess().waitFor();
String result = ts.readStdOut();
for (String line : result.split("\n")) {
System.out.println(line);
}
// walk through the output, verifying that only a single normal recovery was running at one
// time
boolean started = false;
int recoveries = 0;
var pattern = Pattern.compile(".*recovered \\d+ mutations creating \\d+ entries from \\d+ walogs.*");
for (String line : result.split("\n")) {
// ignore metadata tables
if (line.contains("!0") || line.contains("+r"))
continue;
if (line.contains("recovering data from walogs")) {
assertFalse(started);
started = true;
recoveries++;
}
if (pattern.matcher(line).matches()) {
assertTrue(started);
started = false;
}
}
assertFalse(started);
assertTrue(recoveries > 0);
}
}
use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.ProcessInfo in project accumulo by apache.
the class MiniAccumuloClusterControl method execWithStdout.
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "code runs in same security context as user who provided input file name")
@Override
public Entry<Integer, String> execWithStdout(Class<?> clz, String[] args) throws IOException {
ProcessInfo pi = cluster.exec(clz, args);
int exitCode;
try {
exitCode = pi.getProcess().waitFor();
} catch (InterruptedException e) {
log.warn("Interrupted waiting for process to exit", e);
Thread.currentThread().interrupt();
throw new IOException(e);
}
return Maps.immutableEntry(exitCode, pi.readStdOut());
}
use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.ProcessInfo in project accumulo by apache.
the class ExternalCompaction_1_IT method testCompactionAndCompactorDies.
@Test
public void testCompactionAndCompactorDies() throws Exception {
String table1 = this.getUniqueNames(1)[0];
try (AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
// Stop the TabletServer so that it does not commit the compaction
getCluster().getProcesses().get(TABLET_SERVER).forEach(p -> {
try {
getCluster().killProcess(TABLET_SERVER, p);
} catch (Exception e) {
fail("Failed to shutdown tablet server");
}
});
// Start our TServer that will not commit the compaction
ProcessInfo tserverProcess = getCluster().exec(ExternalCompactionTServer.class);
createTable(client, table1, "cs3", 2);
writeData(client, table1);
getCluster().getClusterControl().startCompactors(ExternalDoNothingCompactor.class, 1, QUEUE3);
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
compact(client, table1, 2, QUEUE3, false);
TableId tid = getCluster().getServerContext().getTableId(table1);
// Wait for the compaction to start by waiting for 1 external compaction column
ExternalCompactionTestUtils.waitForCompactionStartAndReturnEcids(getCluster().getServerContext(), tid);
// Kill the compactor
getCluster().getClusterControl().stop(ServerType.COMPACTOR);
// DeadCompactionDetector in the CompactionCoordinator should fail the compaction.
long count = 0;
while (count == 0) {
count = getFinalStatesForTable(getCluster(), tid).filter(state -> state.getFinalState().equals(FinalState.FAILED)).count();
UtilWaitThread.sleep(250);
}
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table1);
getCluster().stopProcessWithTimeout(tserverProcess.getProcess(), 30, TimeUnit.SECONDS);
getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
} finally {
// We stopped the TServer and started our own, restart the original TabletServers
getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
}
}
use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.ProcessInfo in project accumulo by apache.
the class ExternalCompaction_1_IT method testExternalCompactionDeadTServer.
@Test
public void testExternalCompactionDeadTServer() throws Exception {
// Shut down the normal TServers
getCluster().getProcesses().get(TABLET_SERVER).forEach(p -> {
try {
getCluster().killProcess(TABLET_SERVER, p);
} catch (Exception e) {
fail("Failed to shutdown tablet server");
}
});
// Start our TServer that will not commit the compaction
ProcessInfo tserverProcess = getCluster().exec(ExternalCompactionTServer.class);
final String table3 = this.getUniqueNames(1)[0];
try (final AccumuloClient client = Accumulo.newClient().from(getCluster().getClientProperties()).build()) {
createTable(client, table3, "cs7");
writeData(client, table3);
getCluster().getClusterControl().startCompactors(Compactor.class, 1, QUEUE7);
getCluster().getClusterControl().startCoordinator(CompactionCoordinator.class);
compact(client, table3, 2, QUEUE7, false);
// ExternalCompactionTServer will not commit the compaction. Wait for the
// metadata table entries to show up.
LOG.info("Waiting for external compaction to complete.");
TableId tid = getCluster().getServerContext().getTableId(table3);
Stream<ExternalCompactionFinalState> fs = getFinalStatesForTable(getCluster(), tid);
while (fs.count() == 0) {
LOG.info("Waiting for compaction completed marker to appear");
UtilWaitThread.sleep(250);
fs = getFinalStatesForTable(getCluster(), tid);
}
LOG.info("Validating metadata table contents.");
TabletsMetadata tm = getCluster().getServerContext().getAmple().readTablets().forTable(tid).fetch(ColumnType.ECOMP).build();
List<TabletMetadata> md = new ArrayList<>();
tm.forEach(t -> md.add(t));
assertEquals(1, md.size());
TabletMetadata m = md.get(0);
Map<ExternalCompactionId, ExternalCompactionMetadata> em = m.getExternalCompactions();
assertEquals(1, em.size());
List<ExternalCompactionFinalState> finished = new ArrayList<>();
getFinalStatesForTable(getCluster(), tid).forEach(f -> finished.add(f));
assertEquals(1, finished.size());
assertEquals(em.entrySet().iterator().next().getKey(), finished.get(0).getExternalCompactionId());
tm.close();
// Force a flush on the metadata table before killing our tserver
client.tableOperations().flush("accumulo.metadata");
// Stop our TabletServer. Need to perform a normal shutdown so that the WAL is closed
// normally.
LOG.info("Stopping our tablet server");
getCluster().stopProcessWithTimeout(tserverProcess.getProcess(), 30, TimeUnit.SECONDS);
getCluster().getClusterControl().stop(ServerType.TABLET_SERVER);
// Start a TabletServer to commit the compaction.
LOG.info("Starting normal tablet server");
getCluster().getClusterControl().start(ServerType.TABLET_SERVER);
// Wait for the compaction to be committed.
LOG.info("Waiting for compaction completed marker to disappear");
Stream<ExternalCompactionFinalState> fs2 = getFinalStatesForTable(getCluster(), tid);
while (fs2.count() != 0) {
LOG.info("Waiting for compaction completed marker to disappear");
UtilWaitThread.sleep(500);
fs2 = getFinalStatesForTable(getCluster(), tid);
}
verify(client, table3, 2);
// We need to cancel the compaction or delete the table here because we initiate a user
// compaction above in the test. Even though the external compaction was cancelled
// because we split the table, FaTE will continue to queue up a compaction
client.tableOperations().cancelCompaction(table3);
}
}
use of org.apache.accumulo.miniclusterImpl.MiniAccumuloClusterImpl.ProcessInfo in project accumulo by apache.
the class GarbageCollectorIT method gcLotsOfCandidatesIT.
@Test
public void gcLotsOfCandidatesIT() throws Exception {
killMacGc();
log.info("Filling metadata table with bogus delete flags");
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
addEntries(c);
cluster.getConfig().setDefaultMemory(32, MemoryUnit.MEGABYTE);
ProcessInfo gc = cluster.exec(SimpleGarbageCollector.class);
sleepUninterruptibly(20, TimeUnit.SECONDS);
String output = "";
while (!output.contains("has exceeded the threshold")) {
try {
output = gc.readStdOut();
} catch (UncheckedIOException ex) {
log.error("IO error reading the IT's accumulo-gc STDOUT", ex);
break;
}
}
gc.getProcess().destroy();
assertTrue(output.contains("has exceeded the threshold"));
}
}
Aggregations