use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class SetGoalState method main.
/**
* Utility program that will change the goal state for the master from the command line.
*/
public static void main(String[] args) throws Exception {
if (args.length != 1 || MasterGoalState.valueOf(args[0]) == null) {
System.err.println("Usage: accumulo " + SetGoalState.class.getName() + " [NORMAL|SAFE_MODE|CLEAN_STOP]");
System.exit(-1);
}
SecurityUtil.serverLogin(SiteConfiguration.getInstance());
VolumeManager fs = VolumeManagerImpl.get();
Accumulo.waitForZookeeperAndHdfs(fs);
ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class Monitor method main.
public static void main(String[] args) throws Exception {
final String app = "monitor";
ServerOpts opts = new ServerOpts();
opts.parseArgs(app, args);
String hostname = opts.getAddress();
SecurityUtil.serverLogin(SiteConfiguration.getInstance());
VolumeManager fs = VolumeManagerImpl.get();
instance = HdfsZooInstance.getInstance();
config = new ServerConfigurationFactory(instance);
context = new AccumuloServerContext(instance, config);
log.info("Version " + Constants.VERSION);
log.info("Instance " + instance.getInstanceID());
MetricsSystemHelper.configure(Monitor.class.getSimpleName());
Accumulo.init(fs, instance, config, app);
Monitor monitor = new Monitor();
// Servlets need access to limit requests when the monitor is not active, but Servlets are instantiated
// via reflection. Expose the service this way instead.
Monitor.HA_SERVICE_INSTANCE = monitor;
DistributedTrace.enable(hostname, app, config.getSystemConfiguration());
try {
monitor.run(hostname);
} finally {
DistributedTrace.disable();
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class CleanUp method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
master.clearMigrations(tableId);
int refCount = 0;
try {
// look for other tables that references this table's files
Connector conn = master.getConnector();
try (BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
Range allTables = MetadataSchema.TabletsSection.getRange();
Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
bs.setRanges(Arrays.asList(beforeTable, afterTable));
bs.fetchColumnFamily(DataFileColumnFamily.NAME);
IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
GrepIterator.setTerm(cfg, "/" + tableId + "/");
bs.addScanIterator(cfg);
for (Entry<Key, Value> entry : bs) {
if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
refCount++;
}
}
}
} catch (Exception e) {
refCount = -1;
log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
}
// remove metadata table entries
try {
// Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
// If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
// are dropped and the operation completes, then the deletes will not be repeated.
MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
} catch (Exception e) {
log.error("error deleting " + tableId + " from metadata table", e);
}
// remove any problem reports the table may have
try {
ProblemReports.getInstance(master).deleteProblemReports(tableId);
} catch (Exception e) {
log.error("Failed to delete problem reports for table " + tableId, e);
}
if (refCount == 0) {
final AccumuloConfiguration conf = master.getConfiguration();
boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
// delete the map files
try {
VolumeManager fs = master.getFileSystem();
for (String dir : ServerConstants.getTablesDirs()) {
if (archiveFiles) {
archiveFile(fs, dir, tableId);
} else {
fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
}
}
} catch (IOException e) {
log.error("Unable to remove deleted table directory", e);
} catch (IllegalArgumentException exception) {
if (exception.getCause() instanceof UnknownHostException) {
/* Thrown if HDFS encounters a DNS problem in some edge cases */
log.error("Unable to remove deleted table directory", exception);
} else {
throw exception;
}
}
}
// remove table from zookeeper
try {
TableManager.getInstance().removeTable(tableId);
Tables.clearCache(master.getInstance());
} catch (Exception e) {
log.error("Failed to find table id in zookeeper", e);
}
// remove any permissions associated with this table
try {
AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
} catch (ThriftSecurityException e) {
log.error("{}", e.getMessage(), e);
}
Utils.unreserveTable(tableId, tid, true);
Utils.unreserveNamespace(namespaceId, tid, false);
LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
return null;
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class Metrics2ReplicationMetricsTest method testAddReplicationQueueTimeMetrics.
@Test
public void testAddReplicationQueueTimeMetrics() throws Exception {
Master master = EasyMock.createMock(Master.class);
MetricsSystem system = EasyMock.createMock(MetricsSystem.class);
VolumeManager fileSystem = EasyMock.createMock(VolumeManager.class);
ReplicationUtil util = EasyMock.createMock(ReplicationUtil.class);
MutableStat stat = EasyMock.createMock(MutableStat.class);
MutableQuantiles quantiles = EasyMock.createMock(MutableQuantiles.class);
Path path1 = new Path("hdfs://localhost:9000/accumulo/wal/file1");
Path path2 = new Path("hdfs://localhost:9000/accumulo/wal/file2");
// First call will initialize the map of paths to modification time
EasyMock.expect(util.getPendingReplicationPaths()).andReturn(ImmutableSet.of(path1, path2));
EasyMock.expect(master.getFileSystem()).andReturn(fileSystem);
EasyMock.expect(fileSystem.getFileStatus(path1)).andReturn(createStatus(100));
EasyMock.expect(master.getFileSystem()).andReturn(fileSystem);
EasyMock.expect(fileSystem.getFileStatus(path2)).andReturn(createStatus(200));
// Second call will recognize the missing path1 and add the latency stat
EasyMock.expect(util.getPendingReplicationPaths()).andReturn(ImmutableSet.of(path2));
// Expect a call to reset the min/max
stat.resetMinMax();
EasyMock.expectLastCall();
// Expect the calls of adding the stats
quantiles.add(currentTime - 100);
EasyMock.expectLastCall();
stat.add(currentTime - 100);
EasyMock.expectLastCall();
EasyMock.replay(master, system, fileSystem, util, stat, quantiles);
Metrics2ReplicationMetrics metrics = new TestMetrics2ReplicationMetrics(master, system);
// Inject our mock objects
replaceField(metrics, "replicationUtil", util);
replaceField(metrics, "replicationQueueTimeQuantiles", quantiles);
replaceField(metrics, "replicationQueueTimeStat", stat);
// Two calls to this will initialize the map and then add metrics
metrics.addReplicationQueueTimeMetrics();
metrics.addReplicationQueueTimeMetrics();
EasyMock.verify(master, system, fileSystem, util, stat, quantiles);
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MapImportFileNames method call.
@Override
public Repo<Master> call(long tid, Master environment) throws Exception {
Path path = new Path(tableInfo.importDir, "mappings.txt");
BufferedWriter mappingsWriter = null;
try {
VolumeManager fs = environment.getFileSystem();
fs.mkdirs(new Path(tableInfo.importDir));
FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
for (FileStatus fileStatus : files) {
String fileName = fileStatus.getPath().getName();
log.info("filename " + fileStatus.getPath().toString());
String[] sa = fileName.split("\\.");
String extension = "";
if (sa.length > 1) {
extension = sa[sa.length - 1];
if (!FileOperations.getValidExtensions().contains(extension)) {
continue;
}
} else {
// assume it is a map file
extension = Constants.MAPFILE_EXTENSION;
}
String newName = "I" + namer.getNextName() + "." + extension;
mappingsWriter.append(fileName);
mappingsWriter.append(':');
mappingsWriter.append(newName);
mappingsWriter.newLine();
}
mappingsWriter.close();
mappingsWriter = null;
return new PopulateMetadataTable(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
} finally {
if (mappingsWriter != null)
try {
mappingsWriter.close();
} catch (IOException ioe) {
log.warn("Failed to close " + path, ioe);
}
}
}
Aggregations