use of org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo in project accumulo by apache.
the class BalanceInPresenceOfOfflineTableIT method test.
@Test
public void test() throws Exception {
log.info("Test that balancing is not stopped by an offline table with outstanding migrations.");
log.debug("starting test ingestion");
VerifyParams params = new VerifyParams(getClientProps(), TEST_TABLE, 200_000);
TestIngest.ingest(accumuloClient, params);
accumuloClient.tableOperations().flush(TEST_TABLE, null, null, true);
VerifyIngest.verifyIngest(accumuloClient, params);
log.debug("waiting for balancing, up to ~5 minutes to allow for migration cleanup.");
final long startTime = System.currentTimeMillis();
long currentWait = 10_000;
boolean balancingWorked = false;
Credentials creds = new Credentials(getAdminPrincipal(), getAdminToken());
while (!balancingWorked && (System.currentTimeMillis() - startTime) < ((5 * 60 + 15) * 1000)) {
Thread.sleep(currentWait);
currentWait *= 2;
log.debug("fetch the list of tablets assigned to each tserver.");
ManagerClientService.Iface client = null;
ManagerMonitorInfo stats;
while (true) {
try {
client = ManagerClient.getConnectionWithRetry((ClientContext) accumuloClient);
stats = client.getManagerStats(TraceUtil.traceInfo(), creds.toThrift(accumuloClient.instanceOperations().getInstanceId()));
break;
} catch (ThriftSecurityException exception) {
throw new AccumuloSecurityException(exception);
} catch (ThriftNotActiveServiceException e) {
// Let it loop, fetching a new location
log.debug("Contacted a Manager which is no longer active, retrying");
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
} catch (TException exception) {
throw new AccumuloException(exception);
} finally {
if (client != null) {
ManagerClient.close(client, (ClientContext) accumuloClient);
}
}
}
if (stats.getTServerInfoSize() < 2) {
log.debug("we need >= 2 servers. sleeping for {}ms", currentWait);
continue;
}
if (stats.getUnassignedTablets() != 0) {
log.debug("We shouldn't have unassigned tablets. sleeping for {}ms", currentWait);
continue;
}
long[] tabletsPerServer = new long[stats.getTServerInfoSize()];
Arrays.fill(tabletsPerServer, 0L);
for (int i = 0; i < stats.getTServerInfoSize(); i++) {
for (Map.Entry<String, TableInfo> entry : stats.getTServerInfo().get(i).getTableMap().entrySet()) {
tabletsPerServer[i] += entry.getValue().getTablets();
}
}
if (tabletsPerServer[0] <= 10) {
log.debug("We should have > 10 tablets. sleeping for {}ms", currentWait);
continue;
}
long min = NumberUtils.min(tabletsPerServer), max = NumberUtils.max(tabletsPerServer);
log.debug("Min={}, Max={}", min, max);
if ((min / ((double) max)) < 0.5) {
log.debug("ratio of min to max tablets per server should be roughly even. sleeping for {}ms", currentWait);
continue;
}
balancingWorked = true;
}
assertTrue("did not properly balance", balancingWorked);
}
use of org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo in project accumulo by apache.
the class MiniAccumuloClusterImplTest method saneMonitorInfo.
@Test(timeout = 60000)
public void saneMonitorInfo() throws Exception {
ManagerMonitorInfo stats;
while (true) {
stats = accumulo.getManagerMonitorInfo();
if (stats.tableMap.size() <= 2) {
continue;
}
if (null != stats.tServerInfo && stats.tServerInfo.size() == NUM_TSERVERS) {
break;
}
}
List<ManagerState> validStates = Arrays.asList(ManagerState.values());
List<ManagerGoalState> validGoals = Arrays.asList(ManagerGoalState.values());
assertTrue("manager state should be valid.", validStates.contains(stats.state));
assertTrue("manager goal state should be in " + validGoals + ". is " + stats.goalState, validGoals.contains(stats.goalState));
assertNotNull("should have a table map.", stats.tableMap);
assertTrue("root table should exist in " + stats.tableMap.keySet(), stats.tableMap.containsKey(RootTable.ID.canonical()));
assertTrue("meta table should exist in " + stats.tableMap.keySet(), stats.tableMap.containsKey(MetadataTable.ID.canonical()));
assertTrue("our test table should exist in " + stats.tableMap.keySet(), stats.tableMap.containsKey(testTableID));
assertNotNull("there should be tservers.", stats.tServerInfo);
assertEquals(NUM_TSERVERS, stats.tServerInfo.size());
}
use of org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo in project accumulo by apache.
the class CompactionsResource method getActiveCompactions.
/**
* Generates a new JSON object with compaction information
*
* @return JSON object
*/
@GET
public Compactions getActiveCompactions() {
Compactions compactions = new Compactions();
ManagerMonitorInfo mmi = monitor.getMmi();
if (mmi == null) {
return compactions;
}
Map<HostAndPort, Monitor.CompactionStats> entry = monitor.getCompactions();
for (TabletServerStatus tserverInfo : mmi.getTServerInfo()) {
var stats = entry.get(HostAndPort.fromString(tserverInfo.name));
if (stats != null) {
compactions.addCompaction(new CompactionInfo(tserverInfo, stats));
}
}
return compactions;
}
use of org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo in project accumulo by apache.
the class XMLResource method getInformation.
/**
* Generates summary of the Monitor
*
* @return SummaryInformation object
*/
public SummaryInformation getInformation() {
ManagerMonitorInfo mmi = monitor.getMmi();
if (mmi == null) {
throw new WebApplicationException(Status.INTERNAL_SERVER_ERROR);
}
// Add Monitor information
SummaryInformation xml = new SummaryInformation(mmi.tServerInfo.size(), ManagerResource.getTables(monitor), TablesResource.getTables(monitor));
// Add tserver information
for (TabletServerStatus status : mmi.tServerInfo) {
xml.addTabletServer(new TabletServer(monitor, status));
}
return xml;
}
use of org.apache.accumulo.core.manager.thrift.ManagerMonitorInfo in project accumulo by apache.
the class BulkImportMonitoringIT method test.
// suppress importDirectory deprecated since this tests legacy monitoring
@SuppressWarnings("deprecation")
@Test
public void test() throws Exception {
getCluster().getClusterControl().start(ServerType.MONITOR);
try (AccumuloClient c = Accumulo.newClient().from(getClientProperties()).build()) {
// creating table name
final String tableName = getUniqueNames(1)[0];
// creating splits
SortedSet<Text> splits = new TreeSet<>();
for (int i = 1; i < 0xf; i++) {
splits.add(new Text(Integer.toHexString(i)));
}
// creating properties
HashMap<String, String> props = new HashMap<>();
props.put(Property.TABLE_MAJC_RATIO.getKey(), "1");
// creating table with configuration
var ntc = new NewTableConfiguration().setProperties(props).withSplits(splits);
c.tableOperations().create(tableName, ntc);
ManagerMonitorInfo stats = getCluster().getManagerMonitorInfo();
assertEquals(1, stats.tServerInfo.size());
assertEquals(0, stats.bulkImports.size());
assertEquals(0, stats.tServerInfo.get(0).bulkImports.size());
log.info("Creating lots of bulk import files");
final FileSystem fs = getCluster().getFileSystem();
final Path basePath = getCluster().getTemporaryPath();
final Path base = new Path(basePath, "testBulkLoad" + tableName);
fs.delete(base, true);
fs.mkdirs(base);
ExecutorService es = Executors.newFixedThreadPool(5);
List<Future<Pair<String, String>>> futures = new ArrayList<>();
for (int i = 0; i < 10; i++) {
final int which = i;
futures.add(es.submit(() -> {
Path bulkFailures = new Path(base, "failures" + which);
Path files = new Path(base, "files" + which);
fs.mkdirs(bulkFailures);
fs.mkdirs(files);
for (int i1 = 0; i1 < 10; i1++) {
FileSKVWriter writer = FileOperations.getInstance().newWriterBuilder().forFile(files + "/bulk_" + i1 + "." + RFile.EXTENSION, fs, fs.getConf(), CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(DefaultConfiguration.getInstance()).build();
writer.startDefaultLocalityGroup();
for (int j = 0x100; j < 0xfff; j += 3) {
writer.append(new Key(Integer.toHexString(j)), new Value());
}
writer.close();
}
return new Pair<>(files.toString(), bulkFailures.toString());
}));
}
List<Pair<String, String>> dirs = new ArrayList<>();
for (Future<Pair<String, String>> f : futures) {
dirs.add(f.get());
}
log.info("Importing");
long now = System.currentTimeMillis();
List<Future<Object>> errs = new ArrayList<>();
for (Pair<String, String> entry : dirs) {
final String dir = entry.getFirst();
final String err = entry.getSecond();
errs.add(es.submit(() -> {
c.tableOperations().importDirectory(tableName, dir, err, false);
return null;
}));
}
es.shutdown();
while (!es.isTerminated() && stats.bulkImports.size() + stats.tServerInfo.get(0).bulkImports.size() == 0) {
es.awaitTermination(10, TimeUnit.MILLISECONDS);
stats = getCluster().getManagerMonitorInfo();
}
log.info(stats.bulkImports.toString());
assertTrue(!stats.bulkImports.isEmpty());
// look for exception
for (Future<Object> err : errs) {
err.get();
}
es.awaitTermination(2, TimeUnit.MINUTES);
assertTrue(es.isTerminated());
log.info(String.format("Completed in %.2f seconds", (System.currentTimeMillis() - now) / 1000.));
}
}
Aggregations