Search in sources :

Example 41 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class CollectTabletStats method main.

public static void main(String[] args) throws Exception {
    final CollectOptions opts = new CollectOptions();
    final ScannerOpts scanOpts = new ScannerOpts();
    opts.parseArgs(CollectTabletStats.class.getName(), args, scanOpts);
    String[] columnsTmp = new String[] {};
    if (opts.columns != null)
        columnsTmp = opts.columns.split(",");
    final String[] columns = columnsTmp;
    final VolumeManager fs = VolumeManagerImpl.get();
    Instance instance = opts.getInstance();
    final ServerConfigurationFactory sconf = new ServerConfigurationFactory(instance);
    Credentials creds = new Credentials(opts.getPrincipal(), opts.getToken());
    ClientContext context = new ClientContext(instance, creds, sconf.getSystemConfiguration());
    Table.ID tableId = Tables.getTableId(instance, opts.getTableName());
    if (tableId == null) {
        log.error("Unable to find table named {}", opts.getTableName());
        System.exit(-1);
    }
    TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
    List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.getTableName(), tabletLocations);
    if (candidates.size() < opts.numThreads) {
        System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
        System.exit(-1);
    }
    List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
    Map<KeyExtent, List<FileRef>> tabletFiles = new HashMap<>();
    for (KeyExtent ke : tabletsToTest) {
        List<FileRef> files = getTabletFiles(context, ke);
        tabletFiles.put(ke, files);
    }
    System.out.println();
    System.out.println("run location      : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
    System.out.println("num threads       : " + opts.numThreads);
    System.out.println("table             : " + opts.getTableName());
    System.out.println("table id          : " + tableId);
    for (KeyExtent ke : tabletsToTest) {
        System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
        System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
        System.out.println("\t\ttablet location   : " + tabletLocations.get(ke));
        reportHdfsBlockLocations(tabletFiles.get(ke));
    }
    System.out.println("%n*** RUNNING TEST ***%n");
    ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<FileRef> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFiles(fs, sconf.getSystemConfiguration(), files, ke, columns);
                }
            };
            tests.add(test);
        }
        runTest("read files", tests, opts.numThreads, threadPool);
    }
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<FileRef> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, false);
                }
            };
            tests.add(test);
        }
        runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
    }
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        for (final KeyExtent ke : tabletsToTest) {
            final List<FileRef> files = tabletFiles.get(ke);
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, true);
                }
            };
            tests.add(test);
        }
        runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
    }
    for (int i = 0; i < opts.iterations; i++) {
        ArrayList<Test> tests = new ArrayList<>();
        final Connector conn = opts.getConnector();
        for (final KeyExtent ke : tabletsToTest) {
            Test test = new Test(ke) {

                @Override
                public int runTest() throws Exception {
                    return scanTablet(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke.getPrevEndRow(), ke.getEndRow(), columns);
                }
            };
            tests.add(test);
        }
        runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
    }
    for (final KeyExtent ke : tabletsToTest) {
        final Connector conn = opts.getConnector();
        threadPool.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    calcTabletStats(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke, columns);
                } catch (Exception e) {
                    log.error("Failed to calculate tablet stats.", e);
                }
            }
        });
    }
    threadPool.shutdown();
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Connector(org.apache.accumulo.core.client.Connector) Instance(org.apache.accumulo.core.client.Instance) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) FileRef(org.apache.accumulo.server.fs.FileRef) List(java.util.List) ArrayList(java.util.ArrayList) Table(org.apache.accumulo.core.client.impl.Table) ClientOnRequiredTable(org.apache.accumulo.server.cli.ClientOnRequiredTable) ClientContext(org.apache.accumulo.core.client.impl.ClientContext) TreeMap(java.util.TreeMap) IOException(java.io.IOException) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) ExecutorService(java.util.concurrent.ExecutorService) Credentials(org.apache.accumulo.core.client.impl.Credentials)

Example 42 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MetadataTableUtil method getBulkFilesLoaded.

public static Map<Long, ? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException {
    Text metadataRow = extent.getMetadataEntry();
    Map<Long, List<FileRef>> result = new HashMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) {
        scanner.setRange(new Range(metadataRow));
        scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
        for (Entry<Key, Value> entry : scanner) {
            Long tid = Long.parseLong(entry.getValue().toString());
            List<FileRef> lst = result.get(tid);
            if (lst == null) {
                result.put(tid, lst = new ArrayList<>());
            }
            lst.add(new FileRef(fs, entry.getKey()));
        }
    }
    return result;
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) List(java.util.List) ArrayList(java.util.ArrayList) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 43 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MetadataTableUtil method getFileAndLogEntries.

public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
    ArrayList<LogEntry> result = new ArrayList<>();
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    VolumeManager fs = VolumeManagerImpl.get();
    if (extent.isRootTablet()) {
        getRootLogEntries(result);
        Path rootDir = new Path(getRootTabletDir());
        FileStatus[] files = fs.listStatus(rootDir);
        for (FileStatus fileStatus : files) {
            if (fileStatus.getPath().toString().endsWith("_tmp")) {
                continue;
            }
            DataFileValue dfv = new DataFileValue(0, 0);
            sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
        }
    } else {
        Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
        try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
            scanner.fetchColumnFamily(LogColumnFamily.NAME);
            scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
            scanner.setRange(extent.toMetadataRange());
            for (Entry<Key, Value> entry : scanner) {
                if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
                    throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
                }
                if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
                    result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
                } else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    DataFileValue dfv = new DataFileValue(entry.getValue().get());
                    sizes.put(new FileRef(fs, entry.getKey()), dfv);
                } else {
                    throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
                }
            }
        }
    }
    return new Pair<>(result, sizes);
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileStatus(org.apache.hadoop.fs.FileStatus) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) ReplicationTable(org.apache.accumulo.core.replication.ReplicationTable) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) Pair(org.apache.accumulo.core.util.Pair)

Example 44 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class RandomizeVolumes method randomize.

public static int randomize(Connector c, String tableName) throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException {
    final VolumeManager vm = VolumeManagerImpl.get();
    if (vm.getVolumes().size() < 2) {
        log.error("There are not enough volumes configured");
        return 1;
    }
    String tblStr = c.tableOperations().tableIdMap().get(tableName);
    if (null == tblStr) {
        log.error("Could not determine the table ID for table {}", tableName);
        return 2;
    }
    Table.ID tableId = Table.ID.of(tblStr);
    TableState tableState = TableManager.getInstance().getTableState(tableId);
    if (TableState.OFFLINE != tableState) {
        log.info("Taking {} offline", tableName);
        c.tableOperations().offline(tableName, true);
        log.info("{} offline", tableName);
    }
    SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
    log.info("Rewriting entries for {}", tableName);
    Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    DIRECTORY_COLUMN.fetch(scanner);
    scanner.setRange(TabletsSection.getRange(tableId));
    BatchWriter writer = c.createBatchWriter(MetadataTable.NAME, null);
    int count = 0;
    for (Entry<Key, Value> entry : scanner) {
        String oldLocation = entry.getValue().toString();
        String directory;
        if (oldLocation.contains(":")) {
            String[] parts = oldLocation.split(Path.SEPARATOR);
            Table.ID tableIdEntry = Table.ID.of(parts[parts.length - 2]);
            if (!tableIdEntry.equals(tableId)) {
                log.error("Unexpected table id found: {}, expected {}; skipping", tableIdEntry, tableId);
                continue;
            }
            directory = parts[parts.length - 1];
        } else {
            directory = oldLocation.substring(Path.SEPARATOR.length());
        }
        Key key = entry.getKey();
        Mutation m = new Mutation(key.getRow());
        VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
        final String newLocation = vm.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
        m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
        if (log.isTraceEnabled()) {
            log.trace("Replacing {} with {}", oldLocation, newLocation);
        }
        writer.addMutation(m);
        pool.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    vm.mkdirs(new Path(newLocation));
                } catch (IOException ex) {
                // nevermind
                }
            }
        });
        count++;
    }
    writer.close();
    pool.shutdown();
    while (!pool.isTerminated()) {
        log.trace("Waiting for mkdir() calls to finish");
        try {
            pool.awaitTermination(5, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            break;
        }
    }
    log.info("Updated {} entries for table {}", count, tableName);
    if (TableState.OFFLINE != tableState) {
        c.tableOperations().online(tableName, true);
        log.info("table {} back online", tableName);
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Scanner(org.apache.accumulo.core.client.Scanner) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Table(org.apache.accumulo.core.client.impl.Table) ClientOnRequiredTable(org.apache.accumulo.core.cli.ClientOnRequiredTable) IOException(java.io.IOException) VolumeChooserEnvironment(org.apache.accumulo.server.fs.VolumeChooserEnvironment) Value(org.apache.accumulo.core.data.Value) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) SimpleThreadPool(org.apache.accumulo.core.util.SimpleThreadPool) Key(org.apache.accumulo.core.data.Key) TableState(org.apache.accumulo.core.master.state.tables.TableState)

Example 45 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class ChangeSecret method main.

public static void main(String[] args) throws Exception {
    VolumeManager fs = VolumeManagerImpl.get();
    verifyHdfsWritePermission(fs);
    Opts opts = new Opts();
    List<String> argsList = new ArrayList<>(args.length + 2);
    argsList.add("--old");
    argsList.add("--new");
    argsList.addAll(Arrays.asList(args));
    opts.parseArgs(ChangeSecret.class.getName(), argsList.toArray(new String[0]));
    Instance inst = opts.getInstance();
    verifyAccumuloIsDown(inst, opts.oldPass);
    final String newInstanceId = UUID.randomUUID().toString();
    updateHdfs(fs, inst, newInstanceId);
    rewriteZooKeeperInstance(inst, newInstanceId, opts.oldPass, opts.newPass);
    if (opts.oldPass != null) {
        deleteInstance(inst, opts.oldPass);
    }
    System.out.println("New instance id is " + newInstanceId);
    System.out.println("Be sure to put your new secret in accumulo-site.xml");
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ClientOpts(org.apache.accumulo.server.cli.ClientOpts) Instance(org.apache.accumulo.core.client.Instance) ArrayList(java.util.ArrayList)

Aggregations

VolumeManager (org.apache.accumulo.server.fs.VolumeManager)57 Path (org.apache.hadoop.fs.Path)30 IOException (java.io.IOException)17 Test (org.junit.Test)17 Key (org.apache.accumulo.core.data.Key)14 HashMap (java.util.HashMap)13 Value (org.apache.accumulo.core.data.Value)13 Scanner (org.apache.accumulo.core.client.Scanner)12 ArrayList (java.util.ArrayList)11 FileRef (org.apache.accumulo.server.fs.FileRef)10 Connector (org.apache.accumulo.core.client.Connector)9 Instance (org.apache.accumulo.core.client.Instance)9 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)7 ServerConfigurationFactory (org.apache.accumulo.server.conf.ServerConfigurationFactory)7 File (java.io.File)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 Text (org.apache.hadoop.io.Text)6