use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BulkImporter method assignMapFiles.
private List<KeyExtent> assignMapFiles(ClientContext context, HostAndPort location, Map<KeyExtent, List<PathSize>> assignmentsPerTablet) throws AccumuloException, AccumuloSecurityException {
try {
long timeInMillis = context.getConfiguration().getTimeInMillis(Property.TSERV_BULK_TIMEOUT);
TabletClientService.Iface client = ThriftUtil.getTServerClient(location, context, timeInMillis);
try {
HashMap<KeyExtent, Map<String, org.apache.accumulo.core.dataImpl.thrift.MapFileInfo>> files = new HashMap<>();
for (Entry<KeyExtent, List<PathSize>> entry : assignmentsPerTablet.entrySet()) {
HashMap<String, org.apache.accumulo.core.dataImpl.thrift.MapFileInfo> tabletFiles = new HashMap<>();
files.put(entry.getKey(), tabletFiles);
for (PathSize pathSize : entry.getValue()) {
org.apache.accumulo.core.dataImpl.thrift.MapFileInfo mfi = new org.apache.accumulo.core.dataImpl.thrift.MapFileInfo(pathSize.estSize);
tabletFiles.put(pathSize.path.toString(), mfi);
}
}
log.debug("Asking {} to bulk load {}", location, files);
List<TKeyExtent> failures = client.bulkImport(TraceUtil.traceInfo(), context.rpcCreds(), tid, files.entrySet().stream().collect(Collectors.toMap(entry -> entry.getKey().toThrift(), Entry::getValue)), setTime);
return failures.stream().map(KeyExtent::fromThrift).collect(Collectors.toList());
} finally {
ThriftUtil.returnClient((TServiceClient) client, context);
}
} catch (ThriftSecurityException e) {
throw new AccumuloSecurityException(e.user, e.code, e);
} catch (Exception t) {
log.error("Encountered unknown exception in assignMapFiles.", t);
throw new AccumuloException(t);
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class TableDiskUsage method getDiskUsage.
public static Map<TreeSet<String>, Long> getDiskUsage(Set<TableId> tableIds, VolumeManager fs, AccumuloClient client) throws IOException {
TableDiskUsage tdu = new TableDiskUsage();
// Add each tableID
for (TableId tableId : tableIds) tdu.addTable(tableId);
HashSet<TableId> tablesReferenced = new HashSet<>(tableIds);
HashSet<TableId> emptyTableIds = new HashSet<>();
HashSet<String> nameSpacesReferenced = new HashSet<>();
// For each table ID
for (TableId tableId : tableIds) {
Scanner mdScanner;
try {
mdScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
} catch (TableNotFoundException e) {
throw new RuntimeException(e);
}
mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
mdScanner.setRange(new KeyExtent(tableId, null, null).toMetaRange());
if (!mdScanner.iterator().hasNext()) {
emptyTableIds.add(tableId);
}
// Read each file referenced by that table
for (Entry<Key, Value> entry : mdScanner) {
String file = entry.getKey().getColumnQualifier().toString();
String[] parts = file.split("/");
// the filename
String uniqueName = parts[parts.length - 1];
if (file.contains(":") || file.startsWith("../")) {
String ref = parts[parts.length - 3];
// Track any tables which are referenced externally by the current table
if (!ref.equals(tableId.canonical())) {
tablesReferenced.add(TableId.of(ref));
}
if (file.contains(":") && parts.length > 3) {
List<String> base = Arrays.asList(Arrays.copyOf(parts, parts.length - 3));
nameSpacesReferenced.add(Joiner.on("/").join(base));
}
}
// add this file to this table
tdu.linkFileAndTable(tableId, uniqueName);
}
}
// Each table seen (provided by user, or reference by table the user provided)
for (TableId tableId : tablesReferenced) {
for (String tableDir : nameSpacesReferenced) {
// Find each file and add its size
Path path = new Path(tableDir + "/" + tableId);
if (!fs.exists(path)) {
log.debug("Table ID directory {} does not exist.", path);
continue;
}
log.info("Get all files recursively in {}", path);
RemoteIterator<LocatedFileStatus> ri = fs.listFiles(path, true);
while (ri.hasNext()) {
FileStatus status = ri.next();
String name = status.getPath().getName();
tdu.addFileSize(name, status.getLen());
}
}
}
Map<TableId, String> reverseTableIdMap = ((ClientContext) client).getTableIdToNameMap();
TreeMap<TreeSet<String>, Long> usage = new TreeMap<>((o1, o2) -> {
int len1 = o1.size();
int len2 = o2.size();
int min = Math.min(len1, len2);
Iterator<String> iter1 = o1.iterator();
Iterator<String> iter2 = o2.iterator();
int count = 0;
while (count < min) {
String s1 = iter1.next();
String s2 = iter2.next();
int cmp = s1.compareTo(s2);
if (cmp != 0)
return cmp;
count++;
}
return len1 - len2;
});
for (Entry<List<TableId>, Long> entry : tdu.calculateUsage().entrySet()) {
TreeSet<String> tableNames = new TreeSet<>();
// Convert size shared by each table id into size shared by each table name
for (TableId tableId : entry.getKey()) tableNames.add(reverseTableIdMap.get(tableId));
// Make table names to shared file size
usage.put(tableNames, entry.getValue());
}
if (!emptyTableIds.isEmpty()) {
TreeSet<String> emptyTables = new TreeSet<>();
for (TableId tableId : emptyTableIds) {
emptyTables.add(reverseTableIdMap.get(tableId));
}
usage.put(emptyTables, 0L);
}
return usage;
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class VerifyTabletAssignments method checkTable.
private static void checkTable(final ClientContext context, final Opts opts, String tableName, HashSet<KeyExtent> check) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, InterruptedException {
if (check == null)
System.out.println("Checking table " + tableName);
else
System.out.println("Checking table " + tableName + " again, failures " + check.size());
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
TableId tableId = context.getTableNameToIdMap().get(tableName);
MetadataServicer.forTableId(context, tableId).getTabletLocations(tabletLocations);
final HashSet<KeyExtent> failures = new HashSet<>();
Map<HostAndPort, List<KeyExtent>> extentsPerServer = new TreeMap<>();
for (Entry<KeyExtent, String> entry : tabletLocations.entrySet()) {
KeyExtent keyExtent = entry.getKey();
String loc = entry.getValue();
if (loc == null)
System.out.println(" Tablet " + keyExtent + " has no location");
else if (opts.verbose)
System.out.println(" Tablet " + keyExtent + " is located at " + loc);
if (loc != null) {
final HostAndPort parsedLoc = HostAndPort.fromString(loc);
List<KeyExtent> extentList = extentsPerServer.computeIfAbsent(parsedLoc, k -> new ArrayList<>());
if (check == null || check.contains(keyExtent))
extentList.add(keyExtent);
}
}
ExecutorService tp = ThreadPools.createFixedThreadPool(20, "CheckTabletServer", false);
for (final Entry<HostAndPort, List<KeyExtent>> entry : extentsPerServer.entrySet()) {
Runnable r = () -> {
try {
checkTabletServer(context, entry, failures);
} catch (Exception e) {
log.error("Failure on tablet server '" + entry.getKey() + ".", e);
failures.addAll(entry.getValue());
}
};
tp.execute(r);
}
tp.shutdown();
while (!tp.awaitTermination(1, TimeUnit.HOURS)) {
}
if (!failures.isEmpty())
checkTable(context, opts, tableName, failures);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class FindOfflineTablets method findOffline.
static int findOffline(ServerContext context, String tableName) throws TableNotFoundException {
final AtomicBoolean scanning = new AtomicBoolean(false);
LiveTServerSet tservers = new LiveTServerSet(context, new Listener() {
@Override
public void update(LiveTServerSet current, Set<TServerInstance> deleted, Set<TServerInstance> added) {
if (!deleted.isEmpty() && scanning.get())
log.warn("Tablet servers deleted while scanning: {}", deleted);
if (!added.isEmpty() && scanning.get())
log.warn("Tablet servers added while scanning: {}", added);
}
});
tservers.startListeningForTabletServerChanges();
scanning.set(true);
Iterator<TabletLocationState> zooScanner = TabletStateStore.getStoreForLevel(DataLevel.ROOT, context).iterator();
int offline = 0;
System.out.println("Scanning zookeeper");
if ((offline = checkTablets(context, zooScanner, tservers)) > 0)
return offline;
if (RootTable.NAME.equals(tableName))
return 0;
System.out.println("Scanning " + RootTable.NAME);
Iterator<TabletLocationState> rootScanner = new MetaDataTableScanner(context, TabletsSection.getRange(), RootTable.NAME);
if ((offline = checkTablets(context, rootScanner, tservers)) > 0)
return offline;
if (MetadataTable.NAME.equals(tableName))
return 0;
System.out.println("Scanning " + MetadataTable.NAME);
Range range = TabletsSection.getRange();
if (tableName != null) {
TableId tableId = context.getTableId(tableName);
range = new KeyExtent(tableId, null, null).toMetaRange();
}
try (MetaDataTableScanner metaScanner = new MetaDataTableScanner(context, range, MetadataTable.NAME)) {
return checkTablets(context, metaScanner, tservers);
}
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class HostRegexTableLoadBalancerTest method testBalance.
@Test
public void testBalance() {
init();
Set<KeyExtent> migrations = new HashSet<>();
List<TabletMigration> migrationsOut = new ArrayList<>();
long wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
assertEquals(20000, wait);
// should balance four tablets in one of the tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.tablet);
}
migrationsOut.clear();
wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
assertEquals(20000, wait);
// should balance four tablets in one of the other tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.tablet);
}
migrationsOut.clear();
wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
assertEquals(20000, wait);
// should balance four tablets in one of the other tables before reaching max
assertEquals(4, migrationsOut.size());
// now balance again passing in the new migrations
for (TabletMigration m : migrationsOut) {
migrations.add(m.tablet);
}
migrationsOut.clear();
wait = this.balance(Collections.unmodifiableSortedMap(createCurrent(15)), migrations, migrationsOut);
assertEquals(20000, wait);
// no more balancing to do
assertEquals(0, migrationsOut.size());
}
Aggregations