use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class TableOperationsImpl method clone.
@Override
public void clone(String srcTableName, String newTableName, CloneConfiguration config) throws AccumuloSecurityException, TableNotFoundException, AccumuloException, TableExistsException {
NEW_TABLE_NAME.validate(newTableName);
TableId srcTableId = context.getTableId(srcTableName);
if (config.isFlush())
_flush(srcTableId, null, null, true);
Set<String> propertiesToExclude = config.getPropertiesToExclude();
if (propertiesToExclude == null)
propertiesToExclude = Collections.emptySet();
Map<String, String> propertiesToSet = config.getPropertiesToSet();
if (propertiesToSet == null)
propertiesToSet = Collections.emptyMap();
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(srcTableId.canonical().getBytes(UTF_8)), ByteBuffer.wrap(newTableName.getBytes(UTF_8)), ByteBuffer.wrap(Boolean.toString(config.isKeepOffline()).getBytes(UTF_8)));
Map<String, String> opts = new HashMap<>();
for (Entry<String, String> entry : propertiesToSet.entrySet()) {
if (entry.getKey().startsWith(CLONE_EXCLUDE_PREFIX))
throw new IllegalArgumentException("Property can not start with " + CLONE_EXCLUDE_PREFIX);
opts.put(entry.getKey(), entry.getValue());
}
for (String prop : propertiesToExclude) {
opts.put(CLONE_EXCLUDE_PREFIX + prop, "");
}
doTableFateOperation(newTableName, AccumuloException.class, FateOperation.TABLE_CLONE, args, opts);
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class ReplicationOperationsImpl method referencedFiles.
@Override
public Set<String> referencedFiles(String tableName) throws TableNotFoundException {
log.debug("Collecting referenced files for replication of table {}", tableName);
TableId tableId = context.getTableId(tableName);
log.debug("Found id of {} for name {}", tableId, tableName);
// Get the WALs currently referenced by the table
BatchScanner metaBs = context.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
metaBs.setRanges(Collections.singleton(TabletsSection.getRange(tableId)));
metaBs.fetchColumnFamily(LogColumnFamily.NAME);
Set<String> wals = new HashSet<>();
try {
for (Entry<Key, Value> entry : metaBs) {
LogEntry logEntry = LogEntry.fromMetaWalEntry(entry);
wals.add(new Path(logEntry.filename).toString());
}
} finally {
metaBs.close();
}
// And the WALs that need to be replicated for this table
metaBs = context.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4);
metaBs.setRanges(Collections.singleton(ReplicationSection.getRange()));
metaBs.fetchColumnFamily(ReplicationSection.COLF);
try {
Text buffer = new Text();
for (Entry<Key, Value> entry : metaBs) {
if (tableId.equals(ReplicationSection.getTableId(entry.getKey()))) {
ReplicationSection.getFile(entry.getKey(), buffer);
wals.add(buffer.toString());
}
}
} finally {
metaBs.close();
}
return wals;
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class KeyExtent method fromThrift.
/**
* Create a KeyExtent from its Thrift form.
*
* @param tke
* the KeyExtent in its Thrift object form
*/
public static KeyExtent fromThrift(TKeyExtent tke) {
TableId tableId = TableId.of(new String(ByteBufferUtil.toBytes(tke.table), UTF_8));
Text endRow = tke.endRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.endRow));
Text prevEndRow = tke.prevEndRow == null ? null : new Text(ByteBufferUtil.toBytes(tke.prevEndRow));
return new KeyExtent(tableId, endRow, prevEndRow);
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class SimpleLoadBalancer method move.
/**
* Select a tablet based on differences between table loads; if the loads are even, use the
* busiest table
*/
List<TabletMigration> move(ServerCounts tooMuch, ServerCounts tooLittle, int count, Map<TableId, Map<TabletId, TabletStatistics>> donerTabletStats) {
if (count == 0) {
return Collections.emptyList();
}
List<TabletMigration> result = new ArrayList<>();
// Copy counts so we can update them as we propose migrations
Map<TableId, Integer> tooMuchMap = tabletCountsPerTable(tooMuch.status);
Map<TableId, Integer> tooLittleMap = tabletCountsPerTable(tooLittle.status);
for (int i = 0; i < count; i++) {
TableId table;
Integer tooLittleCount;
if (tableToBalance == null) {
// find a table to migrate
// look for an uneven table count
int biggestDifference = 0;
TableId biggestDifferenceTable = null;
for (var tableEntry : tooMuchMap.entrySet()) {
TableId tableID = tableEntry.getKey();
tooLittleMap.putIfAbsent(tableID, 0);
int diff = tableEntry.getValue() - tooLittleMap.get(tableID);
if (diff > biggestDifference) {
biggestDifference = diff;
biggestDifferenceTable = tableID;
}
}
if (biggestDifference < 2) {
table = busiest(tooMuch.status.getTableMap());
} else {
table = biggestDifferenceTable;
}
} else {
// just balance the given table
table = tableToBalance;
}
Map<TabletId, TabletStatistics> onlineTabletsForTable = donerTabletStats.get(table);
try {
if (onlineTabletsForTable == null) {
onlineTabletsForTable = new HashMap<>();
List<TabletStatistics> stats = getOnlineTabletsForTable(tooMuch.server, table);
if (stats == null) {
log.warn("Unable to find tablets to move");
return result;
}
for (TabletStatistics stat : stats) onlineTabletsForTable.put(stat.getTabletId(), stat);
donerTabletStats.put(table, onlineTabletsForTable);
}
} catch (Exception ex) {
log.error("Unable to select a tablet to move", ex);
return result;
}
TabletId tabletId = selectTablet(onlineTabletsForTable);
onlineTabletsForTable.remove(tabletId);
if (tabletId == null)
return result;
tooMuchMap.put(table, tooMuchMap.get(table) - 1);
/*
* If a table grows from 1 tablet then tooLittleMap.get(table) can return a null, since there
* is only one tabletserver that holds all of the tablets. Here we check to see if in fact
* that is the case and if so set the value to 0.
*/
tooLittleCount = tooLittleMap.get(table);
if (tooLittleCount == null) {
tooLittleCount = 0;
}
tooLittleMap.put(table, tooLittleCount + 1);
tooMuch.count--;
tooLittle.count++;
result.add(new TabletMigration(tabletId, tooMuch.server, tooLittle.server));
}
return result;
}
use of org.apache.accumulo.core.data.TableId in project accumulo by apache.
the class TableLoadBalancer method getAssignments.
@Override
public void getAssignments(AssignmentParameters params) {
// separate the unassigned into tables
Map<TableId, Map<TabletId, TabletServerId>> groupedUnassigned = new HashMap<>();
params.unassignedTablets().forEach((tid, lastTserver) -> groupedUnassigned.computeIfAbsent(tid.getTable(), k -> new HashMap<>()).put(tid, lastTserver));
for (Entry<TableId, Map<TabletId, TabletServerId>> e : groupedUnassigned.entrySet()) {
Map<TabletId, TabletServerId> newAssignments = new HashMap<>();
getBalancerForTable(e.getKey()).getAssignments(new AssignmentParamsImpl(params.currentStatus(), e.getValue(), newAssignments));
newAssignments.forEach(params::addAssignment);
}
}
Aggregations