Search in sources :

Example 51 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImporter method assignMapFiles.

private List<KeyExtent> assignMapFiles(ClientContext context, HostAndPort location, Map<KeyExtent, List<PathSize>> assignmentsPerTablet) throws AccumuloException, AccumuloSecurityException {
    try {
        long timeInMillis = context.getConfiguration().getTimeInMillis(Property.TSERV_BULK_TIMEOUT);
        TabletClientService.Iface client = ThriftUtil.getTServerClient(location, context, timeInMillis);
        try {
            HashMap<KeyExtent, Map<String, org.apache.accumulo.core.dataImpl.thrift.MapFileInfo>> files = new HashMap<>();
            for (Entry<KeyExtent, List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
                HashMap<String, org.apache.accumulo.core.dataImpl.thrift.MapFileInfo> tabletFiles = new HashMap<>();
                files.put(entry.getKey(), tabletFiles);
                for (PathSize pathSize : entry.getValue()) {
                    org.apache.accumulo.core.dataImpl.thrift.MapFileInfo mfi = new org.apache.accumulo.core.dataImpl.thrift.MapFileInfo(pathSize.estSize);
                    tabletFiles.put(pathSize.path.toString(), mfi);
                }
            }
            log.debug("Asking {} to bulk load {}", location, files);
            List<TKeyExtent> failures = client.bulkImport(TraceUtil.traceInfo(), context.rpcCreds(), tid, files.entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey().toThrift(), Entry::getValue)), setTime);
            return failures.stream().map(KeyExtent::fromThrift).collect(Collectors.toList());
        } finally {
            ThriftUtil.returnClient((TServiceClient) client, context);
        }
    } catch (ThriftSecurityException e) {
        throw new AccumuloSecurityException(e.user, e.code, e);
    } catch (Exception t) {
        log.error("Encountered unknown exception in assignMapFiles.", t);
        throw new AccumuloException(t);
    }
}
Also used : HashMap(java.util.HashMap) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Entry(java.util.Map.Entry) List(java.util.List) ArrayList(java.util.ArrayList) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) TabletClientService(org.apache.accumulo.core.tabletserver.thrift.TabletClientService) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 52 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class TableDiskUsage method getDiskUsage.

public static Map<TreeSet<String>, Long> getDiskUsage(Set<TableId> tableIds, VolumeManager fs, AccumuloClient client) throws IOException {
    TableDiskUsage tdu = new TableDiskUsage();
    // Add each tableID
    for (TableId tableId : tableIds) tdu.addTable(tableId);
    HashSet<TableId> tablesReferenced = new HashSet<>(tableIds);
    HashSet<TableId> emptyTableIds = new HashSet<>();
    HashSet<String> nameSpacesReferenced = new HashSet<>();
    // For each table ID
    for (TableId tableId : tableIds) {
        Scanner mdScanner;
        try {
            mdScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
        } catch (TableNotFoundException e) {
            throw new RuntimeException(e);
        }
        mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
        mdScanner.setRange(new KeyExtent(tableId, null, null).toMetaRange());
        if (!mdScanner.iterator().hasNext()) {
            emptyTableIds.add(tableId);
        }
        // Read each file referenced by that table
        for (Entry<Key, Value> entry : mdScanner) {
            String file = entry.getKey().getColumnQualifier().toString();
            String[] parts = file.split("/");
            // the filename
            String uniqueName = parts[parts.length - 1];
            if (file.contains(":") || file.startsWith("../")) {
                String ref = parts[parts.length - 3];
                // Track any tables which are referenced externally by the current table
                if (!ref.equals(tableId.canonical())) {
                    tablesReferenced.add(TableId.of(ref));
                }
                if (file.contains(":") && parts.length > 3) {
                    List<String> base = Arrays.asList(Arrays.copyOf(parts, parts.length - 3));
                    nameSpacesReferenced.add(Joiner.on("/").join(base));
                }
            }
            // add this file to this table
            tdu.linkFileAndTable(tableId, uniqueName);
        }
    }
    // Each table seen (provided by user, or reference by table the user provided)
    for (TableId tableId : tablesReferenced) {
        for (String tableDir : nameSpacesReferenced) {
            // Find each file and add its size
            Path path = new Path(tableDir + "/" + tableId);
            if (!fs.exists(path)) {
                log.debug("Table ID directory {} does not exist.", path);
                continue;
            }
            log.info("Get all files recursively in {}", path);
            RemoteIterator<LocatedFileStatus> ri = fs.listFiles(path, true);
            while (ri.hasNext()) {
                FileStatus status = ri.next();
                String name = status.getPath().getName();
                tdu.addFileSize(name, status.getLen());
            }
        }
    }
    Map<TableId, String> reverseTableIdMap = ((ClientContext) client).getTableIdToNameMap();
    TreeMap<TreeSet<String>, Long> usage = new TreeMap<>((o1, o2) -> {
        int len1 = o1.size();
        int len2 = o2.size();
        int min = Math.min(len1, len2);
        Iterator<String> iter1 = o1.iterator();
        Iterator<String> iter2 = o2.iterator();
        int count = 0;
        while (count < min) {
            String s1 = iter1.next();
            String s2 = iter2.next();
            int cmp = s1.compareTo(s2);
            if (cmp != 0)
                return cmp;
            count++;
        }
        return len1 - len2;
    });
    for (Entry<List<TableId>, Long> entry : tdu.calculateUsage().entrySet()) {
        TreeSet<String> tableNames = new TreeSet<>();
        // Convert size shared by each table id into size shared by each table name
        for (TableId tableId : entry.getKey()) tableNames.add(reverseTableIdMap.get(tableId));
        // Make table names to shared file size
        usage.put(tableNames, entry.getValue());
    }
    if (!emptyTableIds.isEmpty()) {
        TreeSet<String> emptyTables = new TreeSet<>();
        for (TableId tableId : emptyTableIds) {
            emptyTables.add(reverseTableIdMap.get(tableId));
        }
        usage.put(emptyTables, 0L);
    }
    return usage;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Scanner(org.apache.accumulo.core.client.Scanner) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet) Path(org.apache.hadoop.fs.Path) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) TreeMap(java.util.TreeMap) Value(org.apache.accumulo.core.data.Value) Key(org.apache.accumulo.core.data.Key)

Example 53 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class VerifyTabletAssignments method checkTable.

private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, InterruptedException {
    if (check == null)
        System.out.println("Checking table " + tableName);
    else
        System.out.println("Checking table " + tableName + " again, failures " + check.size());
    TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
    TableId tableId = context.getTableNameToIdMap().get(tableName);
    MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
    final HashSet<KeyExtent> failures = new HashSet<>();
    Map<HostAndPort, List<KeyExtent>> extentsPerServer = new TreeMap<>();
    for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
        KeyExtent keyExtent = entry.getKey();
        String loc = entry.getValue();
        if (loc == null)
            System.out.println(" Tablet " + keyExtent + " has no location");
        else if (opts.verbose)
            System.out.println(" Tablet " + keyExtent + " is located at " + loc);
        if (loc != null) {
            final HostAndPort parsedLoc = HostAndPort.fromString(loc);
            List<KeyExtent> extentList = extentsPerServer.computeIfAbsent(parsedLoc, k -> new ArrayList<>());
            if (check == null || check.contains(keyExtent))
                extentList.add(keyExtent);
        }
    }
    ExecutorService tp = ThreadPools.createFixedThreadPool(20, "CheckTabletServer", false);
    for (final Entry<HostAndPort, List<KeyExtent>> entry : extentsPerServer.entrySet()) {
        Runnable r = () -> {
            try {
                checkTabletServer(context, entry, failures);
            } catch (Exception e) {
                log.error("Failure on tablet server '" + entry.getKey() + ".", e);
                failures.addAll(entry.getValue());
            }
        };
        tp.execute(r);
    }
    tp.shutdown();
    while (!tp.awaitTermination(1, TimeUnit.HOURS)) {
    }
    if (!failures.isEmpty())
        checkTable(context, opts, tableName, failures);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TreeMap(java.util.TreeMap) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) NoSuchScanIDException(org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) TException(org.apache.thrift.TException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) HostAndPort(org.apache.accumulo.core.util.HostAndPort) ExecutorService(java.util.concurrent.ExecutorService) ArrayList(java.util.ArrayList) List(java.util.List) HashSet(java.util.HashSet)

Example 54 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class FindOfflineTablets method findOffline.

static int findOffline(ServerContext context, String tableName) throws TableNotFoundException {
    final AtomicBoolean scanning = new AtomicBoolean(false);
    LiveTServerSet tservers = new LiveTServerSet(context, new Listener() {

        @Override
        public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
            if (!deleted.isEmpty() && scanning.get())
                log.warn("Tablet servers deleted while scanning: {}", deleted);
            if (!added.isEmpty() && scanning.get())
                log.warn("Tablet servers added while scanning: {}", added);
        }
    });
    tservers.startListeningForTabletServerChanges();
    scanning.set(true);
    Iterator<TabletLocationState> zooScanner = TabletStateStore.getStoreForLevel(DataLevel.ROOT, context).iterator();
    int offline = 0;
    System.out.println("Scanning zookeeper");
    if ((offline = checkTablets(context, zooScanner, tservers)) > 0)
        return offline;
    if (RootTable.NAME.equals(tableName))
        return 0;
    System.out.println("Scanning " + RootTable.NAME);
    Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(context, TabletsSection.getRange(), RootTable.NAME);
    if ((offline = checkTablets(context, rootScanner, tservers)) > 0)
        return offline;
    if (MetadataTable.NAME.equals(tableName))
        return 0;
    System.out.println("Scanning " + MetadataTable.NAME);
    Range range = TabletsSection.getRange();
    if (tableName != null) {
        TableId tableId = context.getTableId(tableName);
        range = new KeyExtent(tableId, null, null).toMetaRange();
    }
    try (MetaDataTableScanner metaScanner = new MetaDataTableScanner(context, range, MetadataTable.NAME)) {
        return checkTablets(context, metaScanner, tservers);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Listener(org.apache.accumulo.server.manager.LiveTServerSet.Listener) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TServerInstance(org.apache.accumulo.core.metadata.TServerInstance) LiveTServerSet(org.apache.accumulo.server.manager.LiveTServerSet) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MetaDataTableScanner(org.apache.accumulo.server.manager.state.MetaDataTableScanner) TabletLocationState(org.apache.accumulo.core.metadata.TabletLocationState)

Example 55 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class HostRegexTableLoadBalancerTest method testBalance.

@Test
public void testBalance() {
    init();
    Set<KeyExtent> migrations = new HashSet<>();
    List<TabletMigration> migrationsOut = new ArrayList<>();
    long wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
    assertEquals(20000, wait);
    // should balance four tablets in one of the tables before reaching max
    assertEquals(4, migrationsOut.size());
    // now balance again passing in the new migrations
    for (TabletMigration m : migrationsOut) {
        migrations.add(m.tablet);
    }
    migrationsOut.clear();
    wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
    assertEquals(20000, wait);
    // should balance four tablets in one of the other tables before reaching max
    assertEquals(4, migrationsOut.size());
    // now balance again passing in the new migrations
    for (TabletMigration m : migrationsOut) {
        migrations.add(m.tablet);
    }
    migrationsOut.clear();
    wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
    assertEquals(20000, wait);
    // should balance four tablets in one of the other tables before reaching max
    assertEquals(4, migrationsOut.size());
    // now balance again passing in the new migrations
    for (TabletMigration m : migrationsOut) {
        migrations.add(m.tablet);
    }
    migrationsOut.clear();
    wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
    assertEquals(20000, wait);
    // no more balancing to do
    assertEquals(0, migrationsOut.size());
}
Also used : TabletMigration(org.apache.accumulo.server.master.state.TabletMigration) ArrayList(java.util.ArrayList) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)239 Text (org.apache.hadoop.io.Text)98 ArrayList (java.util.ArrayList)72 HashMap (java.util.HashMap)60 Value (org.apache.accumulo.core.data.Value)57 Key (org.apache.accumulo.core.data.Key)56 TableId (org.apache.accumulo.core.data.TableId)53 Test (org.junit.Test)52 Mutation (org.apache.accumulo.core.data.Mutation)47 IOException (java.io.IOException)40 List (java.util.List)40 TKeyExtent (org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)39 HashSet (java.util.HashSet)38 TreeMap (java.util.TreeMap)38 Range (org.apache.accumulo.core.data.Range)38 Map (java.util.Map)33 Scanner (org.apache.accumulo.core.client.Scanner)31 Entry (java.util.Map.Entry)30 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)30 Test (org.junit.jupiter.api.Test)30