Search in sources :

Example 11 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class TableOperationsImpl method clone.

@Override
public void clone(String srcTableName, String newTableName, CloneConfiguration config) throws AccumuloSecurityException, TableNotFoundException, AccumuloException, TableExistsException {
    NEW_TABLE_NAME.validate(newTableName);
    TableId srcTableId = context.getTableId(srcTableName);
    if (config.isFlush())
        _flush(srcTableId, null, null, true);
    Set<String> propertiesToExclude = config.getPropertiesToExclude();
    if (propertiesToExclude == null)
        propertiesToExclude = Collections.emptySet();
    Map<String, String> propertiesToSet = config.getPropertiesToSet();
    if (propertiesToSet == null)
        propertiesToSet = Collections.emptyMap();
    List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(srcTableId.canonical().getBytes(UTF_8)), ByteBuffer.wrap(newTableName.getBytes(UTF_8)), ByteBuffer.wrap(Boolean.toString(config.isKeepOffline()).getBytes(UTF_8)));
    Map<String, String> opts = new HashMap<>();
    for (Entry<String, String> entry : propertiesToSet.entrySet()) {
        if (entry.getKey().startsWith(CLONE_EXCLUDE_PREFIX))
            throw new IllegalArgumentException("Property can not start with " + CLONE_EXCLUDE_PREFIX);
        opts.put(entry.getKey(), entry.getValue());
    }
    for (String prop : propertiesToExclude) {
        opts.put(CLONE_EXCLUDE_PREFIX + prop, "");
    }
    doTableFateOperation(newTableName, AccumuloException.class, FateOperation.TABLE_CLONE, args, opts);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) HashMap(java.util.HashMap) ByteBuffer(java.nio.ByteBuffer)

Example 12 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class ReplicationOperationsImpl method referencedFiles.

@Override
public Set<String> referencedFiles(String tableName) throws TableNotFoundException {
    log.debug("Collecting referenced files for replication of table {}", tableName);
    TableId tableId = context.getTableId(tableName);
    log.debug("Found id of {} for name {}", tableId, tableName);
    // Get the WALs currently referenced by the table
    BatchScanner metaBs = context.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
    metaBs.setRanges(Collections.singleton(TabletsSection.getRange(tableId)));
    metaBs.fetchColumnFamily(LogColumnFamily.NAME);
    Set<String> wals = new HashSet<>();
    try {
        for (Entry<Key, Value> entry : metaBs) {
            LogEntry logEntry = LogEntry.fromMetaWalEntry(entry);
            wals.add(new Path(logEntry.filename).toString());
        }
    } finally {
        metaBs.close();
    }
    // And the WALs that need to be replicated for this table
    metaBs = context.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
    metaBs.setRanges(Collections.singleton(ReplicationSection.getRange()));
    metaBs.fetchColumnFamily(ReplicationSection.COLF);
    try {
        Text buffer = new Text();
        for (Entry<Key, Value> entry : metaBs) {
            if (tableId.equals(ReplicationSection.getTableId(entry.getKey()))) {
                ReplicationSection.getFile(entry.getKey(), buffer);
                wals.add(buffer.toString());
            }
        }
    } finally {
        metaBs.close();
    }
    return wals;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Path(org.apache.hadoop.fs.Path) BatchScanner(org.apache.accumulo.core.client.BatchScanner) Value(org.apache.accumulo.core.data.Value) Text(org.apache.hadoop.io.Text) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) HashSet(java.util.HashSet)

Example 13 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class KeyExtent method fromThrift.

/**
 * Create a KeyExtent from its Thrift form.
 *
 * @param tke
 *          the KeyExtent in its Thrift object form
 */
public static KeyExtent fromThrift(TKeyExtent tke) {
    TableId tableId = TableId.of(new String(ByteBufferUtil.toBytes(tke.table), UTF_8));
    Text endRow = tke.endRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.endRow));
    Text prevEndRow = tke.prevEndRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.prevEndRow));
    return new KeyExtent(tableId, endRow, prevEndRow);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Text(org.apache.hadoop.io.Text) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)

Example 14 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class SimpleLoadBalancer method move.

/**
 * Select a tablet based on differences between table loads; if the loads are even, use the
 * busiest table
 */
List<TabletMigration> move(ServerCounts tooMuch, ServerCounts tooLittle, int count, Map<TableId, Map<TabletId, TabletStatistics>> donerTabletStats) {
    if (count == 0) {
        return Collections.emptyList();
    }
    List<TabletMigration> result = new ArrayList<>();
    // Copy counts so we can update them as we propose migrations
    Map<TableId, Integer> tooMuchMap = tabletCountsPerTable(tooMuch.status);
    Map<TableId, Integer> tooLittleMap = tabletCountsPerTable(tooLittle.status);
    for (int i = 0; i < count; i++) {
        TableId table;
        Integer tooLittleCount;
        if (tableToBalance == null) {
            // find a table to migrate
            // look for an uneven table count
            int biggestDifference = 0;
            TableId biggestDifferenceTable = null;
            for (var tableEntry : tooMuchMap.entrySet()) {
                TableId tableID = tableEntry.getKey();
                tooLittleMap.putIfAbsent(tableID, 0);
                int diff = tableEntry.getValue() - tooLittleMap.get(tableID);
                if (diff > biggestDifference) {
                    biggestDifference = diff;
                    biggestDifferenceTable = tableID;
                }
            }
            if (biggestDifference < 2) {
                table = busiest(tooMuch.status.getTableMap());
            } else {
                table = biggestDifferenceTable;
            }
        } else {
            // just balance the given table
            table = tableToBalance;
        }
        Map<TabletId, TabletStatistics> onlineTabletsForTable = donerTabletStats.get(table);
        try {
            if (onlineTabletsForTable == null) {
                onlineTabletsForTable = new HashMap<>();
                List<TabletStatistics> stats = getOnlineTabletsForTable(tooMuch.server, table);
                if (stats == null) {
                    log.warn("Unable to find tablets to move");
                    return result;
                }
                for (TabletStatistics stat : stats) onlineTabletsForTable.put(stat.getTabletId(), stat);
                donerTabletStats.put(table, onlineTabletsForTable);
            }
        } catch (Exception ex) {
            log.error("Unable to select a tablet to move", ex);
            return result;
        }
        TabletId tabletId = selectTablet(onlineTabletsForTable);
        onlineTabletsForTable.remove(tabletId);
        if (tabletId == null)
            return result;
        tooMuchMap.put(table, tooMuchMap.get(table) - 1);
        /*
       * If a table grows from 1 tablet then tooLittleMap.get(table) can return a null, since there
       * is only one tabletserver that holds all of the tablets. Here we check to see if in fact
       * that is the case and if so set the value to 0.
       */
        tooLittleCount = tooLittleMap.get(table);
        if (tooLittleCount == null) {
            tooLittleCount = 0;
        }
        tooLittleMap.put(table, tooLittleCount + 1);
        tooMuch.count--;
        tooLittle.count++;
        result.add(new TabletMigration(tabletId, tooMuch.server, tooLittle.server));
    }
    return result;
}
Also used : TableId(org.apache.accumulo.core.data.TableId) TabletMigration(org.apache.accumulo.core.spi.balancer.data.TabletMigration) ArrayList(java.util.ArrayList) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) TabletId(org.apache.accumulo.core.data.TabletId) TabletStatistics(org.apache.accumulo.core.spi.balancer.data.TabletStatistics)

Example 15 with TableId

use of org.apache.accumulo.core.data.TableId in project accumulo by apache.

the class TableLoadBalancer method getAssignments.

@Override
public void getAssignments(AssignmentParameters params) {
    // separate the unassigned into tables
    Map<TableId, Map<TabletId, TabletServerId>> groupedUnassigned = new HashMap<>();
    params.unassignedTablets().forEach((tid, lastTserver) -> groupedUnassigned.computeIfAbsent(tid.getTable(), k -> new HashMap<>()).put(tid, lastTserver));
    for (Entry<TableId, Map<TabletId, TabletServerId>> e : groupedUnassigned.entrySet()) {
        Map<TabletId, TabletServerId> newAssignments = new HashMap<>();
        getBalancerForTable(e.getKey()).getAssignments(new AssignmentParamsImpl(params.currentStatus(), e.getValue(), newAssignments));
        newAssignments.forEach(params::addAssignment);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) HashMap(java.util.HashMap) TabletServerId(org.apache.accumulo.core.spi.balancer.data.TabletServerId) TabletId(org.apache.accumulo.core.data.TabletId) AssignmentParamsImpl(org.apache.accumulo.core.manager.balancer.AssignmentParamsImpl) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

TableId (org.apache.accumulo.core.data.TableId)169 Text (org.apache.hadoop.io.Text)64 HashMap (java.util.HashMap)55 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)55 ArrayList (java.util.ArrayList)45 Test (org.junit.Test)43 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)41 Map (java.util.Map)37 Key (org.apache.accumulo.core.data.Key)36 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)34 HashSet (java.util.HashSet)31 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)31 Value (org.apache.accumulo.core.data.Value)31 IOException (java.io.IOException)28 Scanner (org.apache.accumulo.core.client.Scanner)28 AccumuloException (org.apache.accumulo.core.client.AccumuloException)27 Mutation (org.apache.accumulo.core.data.Mutation)27 List (java.util.List)26 Range (org.apache.accumulo.core.data.Range)24 BatchWriter (org.apache.accumulo.core.client.BatchWriter)23