Search in sources :

Example 16 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class SetGoalState method main.

/**
 * Utility program that will change the goal state for the master from the command line.
 */
public static void main(String[] args) throws Exception {
    if (args.length != 1 || MasterGoalState.valueOf(args[0]) == null) {
        System.err.println("Usage: accumulo " + SetGoalState.class.getName() + " [NORMAL|SAFE_MODE|CLEAN_STOP]");
        System.exit(-1);
    }
    SecurityUtil.serverLogin(SiteConfiguration.getInstance());
    VolumeManager fs = VolumeManagerImpl.get();
    Accumulo.waitForZookeeperAndHdfs(fs);
    ZooReaderWriter.getInstance().putPersistentData(ZooUtil.getRoot(HdfsZooInstance.getInstance()) + Constants.ZMASTER_GOAL_STATE, args[0].getBytes(UTF_8), NodeExistsPolicy.OVERWRITE);
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager)

Example 17 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class Monitor method main.

public static void main(String[] args) throws Exception {
    final String app = "monitor";
    ServerOpts opts = new ServerOpts();
    opts.parseArgs(app, args);
    String hostname = opts.getAddress();
    SecurityUtil.serverLogin(SiteConfiguration.getInstance());
    VolumeManager fs = VolumeManagerImpl.get();
    instance = HdfsZooInstance.getInstance();
    config = new ServerConfigurationFactory(instance);
    context = new AccumuloServerContext(instance, config);
    log.info("Version " + Constants.VERSION);
    log.info("Instance " + instance.getInstanceID());
    MetricsSystemHelper.configure(Monitor.class.getSimpleName());
    Accumulo.init(fs, instance, config, app);
    Monitor monitor = new Monitor();
    // Servlets need access to limit requests when the monitor is not active, but Servlets are instantiated
    // via reflection. Expose the service this way instead.
    Monitor.HA_SERVICE_INSTANCE = monitor;
    DistributedTrace.enable(hostname, app, config.getSystemConfiguration());
    try {
        monitor.run(hostname);
    } finally {
        DistributedTrace.disable();
    }
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) ServerOpts(org.apache.accumulo.server.ServerOpts) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory)

Example 18 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class CleanUp method call.

@Override
public Repo<Master> call(long tid, Master master) throws Exception {
    master.clearMigrations(tableId);
    int refCount = 0;
    try {
        // look for other tables that references this table's files
        Connector conn = master.getConnector();
        try (BatchScanner bs = conn.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 8)) {
            Range allTables = MetadataSchema.TabletsSection.getRange();
            Range tableRange = MetadataSchema.TabletsSection.getRange(tableId);
            Range beforeTable = new Range(allTables.getStartKey(), true, tableRange.getStartKey(), false);
            Range afterTable = new Range(tableRange.getEndKey(), false, allTables.getEndKey(), true);
            bs.setRanges(Arrays.asList(beforeTable, afterTable));
            bs.fetchColumnFamily(DataFileColumnFamily.NAME);
            IteratorSetting cfg = new IteratorSetting(40, "grep", GrepIterator.class);
            GrepIterator.setTerm(cfg, "/" + tableId + "/");
            bs.addScanIterator(cfg);
            for (Entry<Key, Value> entry : bs) {
                if (entry.getKey().getColumnQualifier().toString().contains("/" + tableId + "/")) {
                    refCount++;
                }
            }
        }
    } catch (Exception e) {
        refCount = -1;
        log.error("Failed to scan " + MetadataTable.NAME + " looking for references to deleted table " + tableId, e);
    }
    // remove metadata table entries
    try {
        // Intentionally do not pass master lock. If master loses lock, this operation may complete before master can kill itself.
        // If the master lock passed to deleteTable, it is possible that the delete mutations will be dropped. If the delete operations
        // are dropped and the operation completes, then the deletes will not be repeated.
        MetadataTableUtil.deleteTable(tableId, refCount != 0, master, null);
    } catch (Exception e) {
        log.error("error deleting " + tableId + " from metadata table", e);
    }
    // remove any problem reports the table may have
    try {
        ProblemReports.getInstance(master).deleteProblemReports(tableId);
    } catch (Exception e) {
        log.error("Failed to delete problem reports for table " + tableId, e);
    }
    if (refCount == 0) {
        final AccumuloConfiguration conf = master.getConfiguration();
        boolean archiveFiles = conf.getBoolean(Property.GC_FILE_ARCHIVE);
        // delete the map files
        try {
            VolumeManager fs = master.getFileSystem();
            for (String dir : ServerConstants.getTablesDirs()) {
                if (archiveFiles) {
                    archiveFile(fs, dir, tableId);
                } else {
                    fs.deleteRecursively(new Path(dir, tableId.canonicalID()));
                }
            }
        } catch (IOException e) {
            log.error("Unable to remove deleted table directory", e);
        } catch (IllegalArgumentException exception) {
            if (exception.getCause() instanceof UnknownHostException) {
                /* Thrown if HDFS encounters a DNS problem in some edge cases */
                log.error("Unable to remove deleted table directory", exception);
            } else {
                throw exception;
            }
        }
    }
    // remove table from zookeeper
    try {
        TableManager.getInstance().removeTable(tableId);
        Tables.clearCache(master.getInstance());
    } catch (Exception e) {
        log.error("Failed to find table id in zookeeper", e);
    }
    // remove any permissions associated with this table
    try {
        AuditedSecurityOperation.getInstance(master).deleteTable(master.rpcCreds(), tableId, namespaceId);
    } catch (ThriftSecurityException e) {
        log.error("{}", e.getMessage(), e);
    }
    Utils.unreserveTable(tableId, tid, true);
    Utils.unreserveNamespace(namespaceId, tid, false);
    LoggerFactory.getLogger(CleanUp.class).debug("Deleted table " + tableId);
    return null;
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) UnknownHostException(java.net.UnknownHostException) BatchScanner(org.apache.accumulo.core.client.BatchScanner) IOException(java.io.IOException) Range(org.apache.accumulo.core.data.Range) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ThriftSecurityException(org.apache.accumulo.core.client.impl.thrift.ThriftSecurityException) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 19 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class Metrics2ReplicationMetricsTest method testAddReplicationQueueTimeMetrics.

@Test
public void testAddReplicationQueueTimeMetrics() throws Exception {
    Master master = EasyMock.createMock(Master.class);
    MetricsSystem system = EasyMock.createMock(MetricsSystem.class);
    VolumeManager fileSystem = EasyMock.createMock(VolumeManager.class);
    ReplicationUtil util = EasyMock.createMock(ReplicationUtil.class);
    MutableStat stat = EasyMock.createMock(MutableStat.class);
    MutableQuantiles quantiles = EasyMock.createMock(MutableQuantiles.class);
    Path path1 = new Path("hdfs://localhost:9000/accumulo/wal/file1");
    Path path2 = new Path("hdfs://localhost:9000/accumulo/wal/file2");
    // First call will initialize the map of paths to modification time
    EasyMock.expect(util.getPendingReplicationPaths()).andReturn(ImmutableSet.of(path1, path2));
    EasyMock.expect(master.getFileSystem()).andReturn(fileSystem);
    EasyMock.expect(fileSystem.getFileStatus(path1)).andReturn(createStatus(100));
    EasyMock.expect(master.getFileSystem()).andReturn(fileSystem);
    EasyMock.expect(fileSystem.getFileStatus(path2)).andReturn(createStatus(200));
    // Second call will recognize the missing path1 and add the latency stat
    EasyMock.expect(util.getPendingReplicationPaths()).andReturn(ImmutableSet.of(path2));
    // Expect a call to reset the min/max
    stat.resetMinMax();
    EasyMock.expectLastCall();
    // Expect the calls of adding the stats
    quantiles.add(currentTime - 100);
    EasyMock.expectLastCall();
    stat.add(currentTime - 100);
    EasyMock.expectLastCall();
    EasyMock.replay(master, system, fileSystem, util, stat, quantiles);
    Metrics2ReplicationMetrics metrics = new TestMetrics2ReplicationMetrics(master, system);
    // Inject our mock objects
    replaceField(metrics, "replicationUtil", util);
    replaceField(metrics, "replicationQueueTimeQuantiles", quantiles);
    replaceField(metrics, "replicationQueueTimeStat", stat);
    // Two calls to this will initialize the map and then add metrics
    metrics.addReplicationQueueTimeMetrics();
    metrics.addReplicationQueueTimeMetrics();
    EasyMock.verify(master, system, fileSystem, util, stat, quantiles);
}
Also used : Master(org.apache.accumulo.master.Master) Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) MutableQuantiles(org.apache.hadoop.metrics2.lib.MutableQuantiles) ReplicationUtil(org.apache.accumulo.server.replication.ReplicationUtil) MutableStat(org.apache.hadoop.metrics2.lib.MutableStat) MetricsSystem(org.apache.hadoop.metrics2.MetricsSystem) Test(org.junit.Test)

Example 20 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MapImportFileNames method call.

@Override
public Repo<Master> call(long tid, Master environment) throws Exception {
    Path path = new Path(tableInfo.importDir, "mappings.txt");
    BufferedWriter mappingsWriter = null;
    try {
        VolumeManager fs = environment.getFileSystem();
        fs.mkdirs(new Path(tableInfo.importDir));
        FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
        UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
        mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
        for (FileStatus fileStatus : files) {
            String fileName = fileStatus.getPath().getName();
            log.info("filename " + fileStatus.getPath().toString());
            String[] sa = fileName.split("\\.");
            String extension = "";
            if (sa.length > 1) {
                extension = sa[sa.length - 1];
                if (!FileOperations.getValidExtensions().contains(extension)) {
                    continue;
                }
            } else {
                // assume it is a map file
                extension = Constants.MAPFILE_EXTENSION;
            }
            String newName = "I" + namer.getNextName() + "." + extension;
            mappingsWriter.append(fileName);
            mappingsWriter.append(':');
            mappingsWriter.append(newName);
            mappingsWriter.newLine();
        }
        mappingsWriter.close();
        mappingsWriter = null;
        return new PopulateMetadataTable(tableInfo);
    } catch (IOException ioe) {
        log.warn("{}", ioe.getMessage(), ioe);
        throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
    } finally {
        if (mappingsWriter != null)
            try {
                mappingsWriter.close();
            } catch (IOException ioe) {
                log.warn("Failed to close " + path, ioe);
            }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileStatus(org.apache.hadoop.fs.FileStatus) UniqueNameAllocator(org.apache.accumulo.server.tablets.UniqueNameAllocator) OutputStreamWriter(java.io.OutputStreamWriter) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter) AcceptableThriftTableOperationException(org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException)

Aggregations

VolumeManager (org.apache.accumulo.server.fs.VolumeManager)57 Path (org.apache.hadoop.fs.Path)30 IOException (java.io.IOException)17 Test (org.junit.Test)17 Key (org.apache.accumulo.core.data.Key)14 HashMap (java.util.HashMap)13 Value (org.apache.accumulo.core.data.Value)13 Scanner (org.apache.accumulo.core.client.Scanner)12 ArrayList (java.util.ArrayList)11 FileRef (org.apache.accumulo.server.fs.FileRef)10 Connector (org.apache.accumulo.core.client.Connector)9 Instance (org.apache.accumulo.core.client.Instance)9 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)7 ServerConfigurationFactory (org.apache.accumulo.server.conf.ServerConfigurationFactory)7 File (java.io.File)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 Text (org.apache.hadoop.io.Text)6