Search in sources :

Example 21 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class BulkFailedCopyProcessor method process.

@Override
public void process(String workID, byte[] data) {
    String[] paths = new String(data, UTF_8).split(",");
    Path orig = new Path(paths[0]);
    Path dest = new Path(paths[1]);
    Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp");
    try {
        VolumeManager vm = VolumeManagerImpl.get(SiteConfiguration.getInstance());
        FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem();
        FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem();
        FileUtil.copy(origFs, orig, destFs, tmp, false, true, CachedConfiguration.getInstance());
        destFs.rename(tmp, dest);
        log.debug("copied {} to {}", orig, dest);
    } catch (IOException ex) {
        try {
            VolumeManager vm = VolumeManagerImpl.get(SiteConfiguration.getInstance());
            FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem();
            destFs.create(dest).close();
            log.warn(" marked " + dest + " failed", ex);
        } catch (IOException e) {
            log.error("Unable to create failure flag file " + dest, e);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 22 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class LocalityCheck method run.

public int run(String[] args) throws Exception {
    ClientOpts opts = new ClientOpts();
    opts.parseArgs(LocalityCheck.class.getName(), args);
    VolumeManager fs = VolumeManagerImpl.get();
    Connector connector = opts.getConnector();
    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
    scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
    scanner.setRange(MetadataSchema.TabletsSection.getRange());
    Map<String, Long> totalBlocks = new HashMap<>();
    Map<String, Long> localBlocks = new HashMap<>();
    ArrayList<String> files = new ArrayList<>();
    for (Entry<Key, Value> entry : scanner) {
        Key key = entry.getKey();
        if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
            String location = entry.getValue().toString();
            String[] parts = location.split(":");
            String host = parts[0];
            addBlocks(fs, host, files, totalBlocks, localBlocks);
            files.clear();
        } else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
            files.add(fs.getFullPath(key).toString());
        }
    }
    System.out.println(" Server         %local  total blocks");
    for (Entry<String, Long> entry : totalBlocks.entrySet()) {
        final String host = entry.getKey();
        final Long blocksForHost = entry.getValue();
        System.out.println(String.format("%15s %5.1f %8d", host, (localBlocks.get(host) * 100.) / blocksForHost, blocksForHost));
    }
    return 0;
}
Also used : VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ClientOpts(org.apache.accumulo.server.cli.ClientOpts) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key)

Example 23 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MasterMetadataUtil method fixSplit.

private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
    if (metadataPrevEndRow == null)
        // prev end row....
        throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
    // check to see if prev tablet exist in metadata tablet
    Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
    try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
        VolumeManager fs = VolumeManagerImpl.get();
        if (!scanner2.iterator().hasNext()) {
            log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
        } else {
            log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
            List<FileRef> highDatafilesToRemove = new ArrayList<>();
            SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
            SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
            try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
                Key rowKey = new Key(metadataEntry);
                scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
                scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
                for (Entry<Key, Value> entry : scanner3) {
                    if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
                        origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
                    }
                }
            }
            MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
            MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
            return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
        }
    }
}
Also used : AccumuloException(org.apache.accumulo.core.client.AccumuloException) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) Scanner(org.apache.accumulo.core.client.Scanner) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 24 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MetadataTableUtil method getBulkFilesLoaded.

public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
    List<FileRef> result = new ArrayList<>();
    try (Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY))) {
        VolumeManager fs = VolumeManagerImpl.get();
        mscanner.setRange(extent.toMetadataRange());
        mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
        for (Entry<Key, Value> entry : mscanner) {
            if (Long.parseLong(entry.getValue().toString()) == tid) {
                result.add(new FileRef(fs, entry.getKey()));
            }
        }
        return result;
    } catch (TableNotFoundException ex) {
        // unlikely
        throw new RuntimeException("Onos! teh metadata table has vanished!!");
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) FileRef(org.apache.accumulo.server.fs.FileRef) ArrayList(java.util.ArrayList) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 25 with VolumeManager

use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.

the class MetadataTableUtil method getDataFileSizes.

public static SortedMap<FileRef, DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
    TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
    try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
        mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        Text row = extent.getMetadataEntry();
        VolumeManager fs = VolumeManagerImpl.get();
        Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
        endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
        mdScanner.setRange(new Range(new Key(row), endKey));
        for (Entry<Key, Value> entry : mdScanner) {
            if (!entry.getKey().getRow().equals(row))
                break;
            DataFileValue dfv = new DataFileValue(entry.getValue().get());
            sizes.put(new FileRef(fs, entry.getKey()), dfv);
        }
        return sizes;
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) VolumeManager(org.apache.accumulo.server.fs.VolumeManager) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) TreeMap(java.util.TreeMap) Range(org.apache.accumulo.core.data.Range) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Aggregations

VolumeManager (org.apache.accumulo.server.fs.VolumeManager)57 Path (org.apache.hadoop.fs.Path)30 IOException (java.io.IOException)17 Test (org.junit.Test)17 Key (org.apache.accumulo.core.data.Key)14 HashMap (java.util.HashMap)13 Value (org.apache.accumulo.core.data.Value)13 Scanner (org.apache.accumulo.core.client.Scanner)12 ArrayList (java.util.ArrayList)11 FileRef (org.apache.accumulo.server.fs.FileRef)10 Connector (org.apache.accumulo.core.client.Connector)9 Instance (org.apache.accumulo.core.client.Instance)9 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)7 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)7 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)7 ServerConfigurationFactory (org.apache.accumulo.server.conf.ServerConfigurationFactory)7 File (java.io.File)6 AccumuloException (org.apache.accumulo.core.client.AccumuloException)6 FileStatus (org.apache.hadoop.fs.FileStatus)6 Text (org.apache.hadoop.io.Text)6