use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.
the class FileArchiveIT method testUnusuedFilesAndDeletedTable.
@Test
public void testUnusuedFilesAndDeletedTable() throws Exception {
final Connector conn = getConnector();
final String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
Assert.assertNotNull("Could not get table ID", tableId);
BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
Mutation m = new Mutation("row");
m.put("", "", "value");
bw.addMutation(m);
bw.close();
// Compact memory to disk
conn.tableOperations().compact(tableName, null, null, true, true);
Entry<Key, Value> entry;
Path fileArchiveDir;
FileSystem fs;
int i = 0;
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
entry = Iterables.getOnlyElement(s);
final String file = entry.getKey().getColumnQualifier().toString();
final Path p = new Path(file);
// Then force another to make an unreferenced file
conn.tableOperations().compact(tableName, null, null, true, true);
log.info("File for table: {}", file);
fs = getCluster().getFileSystem();
while (fs.exists(p)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
log.info("Waited {} iterations, file still exists", i);
}
}
log.info("File was removed");
String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
log.info("File relative to accumulo dir: {}", filePath);
fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
// Remove the leading '/' to make sure Path treats the 2nd arg as a child.
Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
// Offline the table so we can be sure there is a single file
conn.tableOperations().offline(tableName, true);
}
// See that the file in metadata currently is
try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
entry = Iterables.getOnlyElement(s);
final String finalFile = entry.getKey().getColumnQualifier().toString();
final Path finalPath = new Path(finalFile);
conn.tableOperations().delete(tableName);
log.info("File for table: {}", finalPath);
i = 0;
while (fs.exists(finalPath)) {
i++;
Thread.sleep(1000);
if (0 == i % 10) {
log.info("Waited {} iterations, file still exists", i);
}
}
log.info("File was removed");
String finalFilePath = finalPath.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
log.info("File relative to accumulo dir: {}", finalFilePath);
Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
// Remove the leading '/' to make sure Path treats the 2nd arg as a child.
Path finalArchivedFile = new Path(fileArchiveDir, finalFilePath.substring(1));
Assert.assertTrue("File doesn't exists in archive directory: " + finalArchivedFile, fs.exists(finalArchivedFile));
}
}
use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.
the class MissingWalHeaderCompletesRecoveryIT method testEmptyWalRecoveryCompletes.
@Test
public void testEmptyWalRecoveryCompletes() throws Exception {
Connector conn = getConnector();
MiniAccumuloClusterImpl cluster = getCluster();
FileSystem fs = cluster.getFileSystem();
// Fake out something that looks like host:port, it's irrelevant
String fakeServer = "127.0.0.1:12345";
File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
File emptyWalog = new File(walogServerDir, UUID.randomUUID().toString());
log.info("Created empty WAL at {}", emptyWalog.toURI());
fs.create(new Path(emptyWalog.toURI())).close();
Assert.assertTrue("root user did not have write permission to metadata table", conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
Assert.assertNotNull("Table ID was null", tableId);
LogEntry logEntry = new LogEntry(new KeyExtent(tableId, null, null), 0, "127.0.0.1:12345", emptyWalog.toURI().toString());
log.info("Taking {} offline", tableName);
conn.tableOperations().offline(tableName, true);
log.info("{} is offline", tableName);
Text row = MetadataSchema.TabletsSection.getRow(tableId, null);
Mutation m = new Mutation(row);
m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw.addMutation(m);
bw.close();
log.info("Bringing {} online", tableName);
conn.tableOperations().online(tableName, true);
log.info("{} is online", tableName);
// otherwise the tablet will never come online and we won't be able to read it.
try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
Assert.assertEquals(0, Iterables.size(s));
}
}
use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.
the class MissingWalHeaderCompletesRecoveryIT method testPartialHeaderWalRecoveryCompletes.
@Test
public void testPartialHeaderWalRecoveryCompletes() throws Exception {
Connector conn = getConnector();
MiniAccumuloClusterImpl cluster = getCluster();
FileSystem fs = getCluster().getFileSystem();
// Fake out something that looks like host:port, it's irrelevant
String fakeServer = "127.0.0.1:12345";
File walogs = new File(cluster.getConfig().getAccumuloDir(), ServerConstants.WAL_DIR);
File walogServerDir = new File(walogs, fakeServer.replace(':', '+'));
File partialHeaderWalog = new File(walogServerDir, UUID.randomUUID().toString());
log.info("Created WAL with malformed header at {}", partialHeaderWalog.toURI());
// Write half of the header
FSDataOutputStream wal = fs.create(new Path(partialHeaderWalog.toURI()));
wal.write(DfsLogger.LOG_FILE_HEADER_V3.getBytes(UTF_8), 0, DfsLogger.LOG_FILE_HEADER_V3.length() / 2);
wal.close();
Assert.assertTrue("root user did not have write permission to metadata table", conn.securityOperations().hasTablePermission("root", MetadataTable.NAME, TablePermission.WRITE));
String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
Assert.assertNotNull("Table ID was null", tableId);
LogEntry logEntry = new LogEntry(null, 0, "127.0.0.1:12345", partialHeaderWalog.toURI().toString());
log.info("Taking {} offline", tableName);
conn.tableOperations().offline(tableName, true);
log.info("{} is offline", tableName);
Text row = MetadataSchema.TabletsSection.getRow(tableId, null);
Mutation m = new Mutation(row);
m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
BatchWriter bw = conn.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw.addMutation(m);
bw.close();
log.info("Bringing {} online", tableName);
conn.tableOperations().online(tableName, true);
log.info("{} is online", tableName);
// otherwise the tablet will never come online and we won't be able to read it.
try (Scanner s = conn.createScanner(tableName, Authorizations.EMPTY)) {
Assert.assertEquals(0, Iterables.size(s));
}
}
use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.
the class HostRegexTableLoadBalancer method balance.
@Override
public long balance(SortedMap<TServerInstance, TabletServerStatus> current, Set<KeyExtent> migrations, List<TabletMigration> migrationsOut) {
long minBalanceTime = 20 * 1000;
// Iterate over the tables and balance each of them
TableOperations t = getTableOperations();
if (t == null)
return minBalanceTime;
Map<String, String> tableIdMap = t.tableIdMap();
long now = System.currentTimeMillis();
Map<String, SortedMap<TServerInstance, TabletServerStatus>> currentGrouped = splitCurrentByRegex(current);
if ((now - this.lastOOBCheck) > this.oobCheckMillis) {
try {
// Check to see if a tablet is assigned outside the bounds of the pool. If so, migrate it.
for (String table : t.list()) {
LOG.debug("Checking for out of bounds tablets for table {}", table);
String tablePoolName = getPoolNameForTable(table);
for (Entry<TServerInstance, TabletServerStatus> e : current.entrySet()) {
// pool names are the same as table names, except in the DEFAULT case.
// If this table is assigned to a pool for this host, then move on.
List<String> hostPools = getPoolNamesForHost(e.getKey().host());
if (hostPools.contains(tablePoolName)) {
continue;
}
String tid = tableIdMap.get(table);
if (null == tid) {
LOG.warn("Unable to check for out of bounds tablets for table {}, it may have been deleted or renamed.", table);
continue;
}
try {
List<TabletStats> outOfBoundsTablets = getOnlineTabletsForTable(e.getKey(), Table.ID.of(tid));
if (null == outOfBoundsTablets) {
continue;
}
Random random = new Random();
for (TabletStats ts : outOfBoundsTablets) {
KeyExtent ke = new KeyExtent(ts.getExtent());
if (migrations.contains(ke)) {
LOG.debug("Migration for out of bounds tablet {} has already been requested", ke);
continue;
}
String poolName = getPoolNameForTable(table);
SortedMap<TServerInstance, TabletServerStatus> currentView = currentGrouped.get(poolName);
if (null != currentView) {
int skip = random.nextInt(currentView.size());
Iterator<TServerInstance> iter = currentView.keySet().iterator();
for (int i = 0; i < skip; i++) {
iter.next();
}
TServerInstance nextTS = iter.next();
LOG.info("Tablet {} is currently outside the bounds of the regex, migrating from {} to {}", ke, e.getKey(), nextTS);
migrationsOut.add(new TabletMigration(ke, e.getKey(), nextTS));
if (migrationsOut.size() >= this.maxTServerMigrations) {
break;
}
} else {
LOG.warn("No tablet servers online for pool {}, unable to migrate out of bounds tablets", poolName);
}
}
} catch (TException e1) {
LOG.error("Error in OOB check getting tablets for table {} from server {}", tid, e.getKey().host(), e);
}
}
}
} finally {
// this could have taken a while...get a new time
this.lastOOBCheck = System.currentTimeMillis();
}
}
if (migrationsOut.size() > 0) {
LOG.warn("Not balancing tables due to moving {} out of bounds tablets", migrationsOut.size());
LOG.info("Migrating out of bounds tablets: {}", migrationsOut);
return minBalanceTime;
}
if (migrations != null && migrations.size() > 0) {
if (migrations.size() >= maxOutstandingMigrations) {
LOG.warn("Not balancing tables due to {} outstanding migrations", migrations.size());
if (LOG.isTraceEnabled()) {
LOG.trace("Sample up to 10 outstanding migrations: {}", Iterables.limit(migrations, 10));
}
return minBalanceTime;
}
LOG.debug("Current outstanding migrations of {} being applied", migrations.size());
if (LOG.isTraceEnabled()) {
LOG.trace("Sample up to 10 outstanding migrations: {}", Iterables.limit(migrations, 10));
}
migrationsFromLastPass.keySet().retainAll(migrations);
SortedMap<TServerInstance, TabletServerStatus> currentCopy = new TreeMap<>(current);
Multimap<TServerInstance, String> serverTableIdCopied = HashMultimap.create();
for (TabletMigration migration : migrationsFromLastPass.values()) {
TableInfo fromInfo = getTableInfo(currentCopy, serverTableIdCopied, migration.tablet.getTableId().toString(), migration.oldServer);
if (fromInfo != null) {
fromInfo.setOnlineTablets(fromInfo.getOnlineTablets() - 1);
}
TableInfo toInfo = getTableInfo(currentCopy, serverTableIdCopied, migration.tablet.getTableId().toString(), migration.newServer);
if (toInfo != null) {
toInfo.setOnlineTablets(toInfo.getOnlineTablets() + 1);
}
}
migrations = EMPTY_MIGRATIONS;
} else {
migrationsFromLastPass.clear();
}
for (String s : tableIdMap.values()) {
Table.ID tableId = Table.ID.of(s);
String tableName = tableIdToTableName.get(tableId);
String regexTableName = getPoolNameForTable(tableName);
SortedMap<TServerInstance, TabletServerStatus> currentView = currentGrouped.get(regexTableName);
if (null == currentView) {
LOG.warn("Skipping balance for table {} as no tablet servers are online.", tableName);
continue;
}
ArrayList<TabletMigration> newMigrations = new ArrayList<>();
getBalancerForTable(tableId).balance(currentView, migrations, newMigrations);
if (newMigrations.isEmpty()) {
tableToTimeSinceNoMigrations.remove(s);
} else if (tableToTimeSinceNoMigrations.containsKey(s)) {
if ((now - tableToTimeSinceNoMigrations.get(s)) > ONE_HOUR) {
LOG.warn("We have been consistently producing migrations for {}: {}", tableName, Iterables.limit(newMigrations, 10));
}
} else {
tableToTimeSinceNoMigrations.put(s, now);
}
migrationsOut.addAll(newMigrations);
if (migrationsOut.size() >= this.maxTServerMigrations) {
break;
}
}
for (TabletMigration migration : migrationsOut) {
migrationsFromLastPass.put(migration.tablet, migration);
}
LOG.info("Migrating tablets for balance: {}", migrationsOut);
return minBalanceTime;
}
use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.
the class Master method displayUnassigned.
// The number of unassigned tablets that should be assigned: displayed on the monitor page
int displayUnassigned() {
int result = 0;
switch(getMasterState()) {
case NORMAL:
// Count offline tablets for online tables
for (TabletGroupWatcher watcher : watchers) {
TableManager manager = TableManager.getInstance();
for (Entry<Table.ID, TableCounts> entry : watcher.getStats().entrySet()) {
Table.ID tableId = entry.getKey();
TableCounts counts = entry.getValue();
TableState tableState = manager.getTableState(tableId);
if (tableState != null && tableState.equals(TableState.ONLINE)) {
result += counts.unassigned() + counts.assignedToDeadServers() + counts.assigned() + counts.suspended();
}
}
}
break;
case SAFE_MODE:
// Count offline tablets for the metadata table
for (TabletGroupWatcher watcher : watchers) {
TableCounts counts = watcher.getStats(MetadataTable.ID);
result += counts.unassigned() + counts.suspended();
}
break;
case UNLOAD_METADATA_TABLETS:
case UNLOAD_ROOT_TABLET:
for (TabletGroupWatcher watcher : watchers) {
TableCounts counts = watcher.getStats(MetadataTable.ID);
result += counts.unassigned() + counts.suspended();
}
break;
default:
break;
}
return result;
}
Aggregations