use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class CollectTabletStats method main.
public static void main(String[] args) throws Exception {
final CollectOptions opts = new CollectOptions();
final ScannerOpts scanOpts = new ScannerOpts();
opts.parseArgs(CollectTabletStats.class.getName(), args, scanOpts);
String[] columnsTmp = new String[] {};
if (opts.columns != null)
columnsTmp = opts.columns.split(",");
final String[] columns = columnsTmp;
final VolumeManager fs = VolumeManagerImpl.get();
Instance instance = opts.getInstance();
final ServerConfigurationFactory sconf = new ServerConfigurationFactory(instance);
Credentials creds = new Credentials(opts.getPrincipal(), opts.getToken());
ClientContext context = new ClientContext(instance, creds, sconf.getSystemConfiguration());
Table.ID tableId = Tables.getTableId(instance, opts.getTableName());
if (tableId == null) {
log.error("Unable to find table named {}", opts.getTableName());
System.exit(-1);
}
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.getTableName(), tabletLocations);
if (candidates.size() < opts.numThreads) {
System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
System.exit(-1);
}
List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
Map<KeyExtent, List<FileRef>> tabletFiles = new HashMap<>();
for (KeyExtent ke : tabletsToTest) {
List<FileRef> files = getTabletFiles(context, ke);
tabletFiles.put(ke, files);
}
System.out.println();
System.out.println("run location : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
System.out.println("num threads : " + opts.numThreads);
System.out.println("table : " + opts.getTableName());
System.out.println("table id : " + tableId);
for (KeyExtent ke : tabletsToTest) {
System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
System.out.println("\t\ttablet location : " + tabletLocations.get(ke));
reportHdfsBlockLocations(tabletFiles.get(ke));
}
System.out.println("%n*** RUNNING TEST ***%n");
ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFiles(fs, sconf.getSystemConfiguration(), files, ke, columns);
}
};
tests.add(test);
}
runTest("read files", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, false);
}
};
tests.add(test);
}
runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<FileRef> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, sconf, files, opts.auths, ke, columns, true);
}
};
tests.add(test);
}
runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
final Connector conn = opts.getConnector();
for (final KeyExtent ke : tabletsToTest) {
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return scanTablet(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke.getPrevEndRow(), ke.getEndRow(), columns);
}
};
tests.add(test);
}
runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
}
for (final KeyExtent ke : tabletsToTest) {
final Connector conn = opts.getConnector();
threadPool.submit(new Runnable() {
@Override
public void run() {
try {
calcTabletStats(conn, opts.getTableName(), opts.auths, scanOpts.scanBatchSize, ke, columns);
} catch (Exception e) {
log.error("Failed to calculate tablet stats.", e);
}
}
});
}
threadPool.shutdown();
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MetadataTableUtil method getBulkFilesLoaded.
public static Map<Long, ? extends Collection<FileRef>> getBulkFilesLoaded(ClientContext context, KeyExtent extent) throws IOException {
Text metadataRow = extent.getMetadataEntry();
Map<Long, List<FileRef>> result = new HashMap<>();
VolumeManager fs = VolumeManagerImpl.get();
try (Scanner scanner = new ScannerImpl(context, extent.isMeta() ? RootTable.ID : MetadataTable.ID, Authorizations.EMPTY)) {
scanner.setRange(new Range(metadataRow));
scanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key, Value> entry : scanner) {
Long tid = Long.parseLong(entry.getValue().toString());
List<FileRef> lst = result.get(tid);
if (lst == null) {
result.put(tid, lst = new ArrayList<>());
}
lst.add(new FileRef(fs, entry.getKey()));
}
}
return result;
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MetadataTableUtil method getFileAndLogEntries.
public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
ArrayList<LogEntry> result = new ArrayList<>();
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
VolumeManager fs = VolumeManagerImpl.get();
if (extent.isRootTablet()) {
getRootLogEntries(result);
Path rootDir = new Path(getRootTabletDir());
FileStatus[] files = fs.listStatus(rootDir);
for (FileStatus fileStatus : files) {
if (fileStatus.getPath().toString().endsWith("_tmp")) {
continue;
}
DataFileValue dfv = new DataFileValue(0, 0);
sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
}
} else {
Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
scanner.fetchColumnFamily(LogColumnFamily.NAME);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(extent.toMetadataRange());
for (Entry<Key, Value> entry : scanner) {
if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
}
if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
} else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
} else {
throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
}
}
}
}
return new Pair<>(result, sizes);
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class RandomizeVolumes method randomize.
public static int randomize(Connector c, String tableName) throws IOException, AccumuloSecurityException, AccumuloException, TableNotFoundException {
final VolumeManager vm = VolumeManagerImpl.get();
if (vm.getVolumes().size() < 2) {
log.error("There are not enough volumes configured");
return 1;
}
String tblStr = c.tableOperations().tableIdMap().get(tableName);
if (null == tblStr) {
log.error("Could not determine the table ID for table {}", tableName);
return 2;
}
Table.ID tableId = Table.ID.of(tblStr);
TableState tableState = TableManager.getInstance().getTableState(tableId);
if (TableState.OFFLINE != tableState) {
log.info("Taking {} offline", tableName);
c.tableOperations().offline(tableName, true);
log.info("{} offline", tableName);
}
SimpleThreadPool pool = new SimpleThreadPool(50, "directory maker");
log.info("Rewriting entries for {}", tableName);
Scanner scanner = c.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
DIRECTORY_COLUMN.fetch(scanner);
scanner.setRange(TabletsSection.getRange(tableId));
BatchWriter writer = c.createBatchWriter(MetadataTable.NAME, null);
int count = 0;
for (Entry<Key, Value> entry : scanner) {
String oldLocation = entry.getValue().toString();
String directory;
if (oldLocation.contains(":")) {
String[] parts = oldLocation.split(Path.SEPARATOR);
Table.ID tableIdEntry = Table.ID.of(parts[parts.length - 2]);
if (!tableIdEntry.equals(tableId)) {
log.error("Unexpected table id found: {}, expected {}; skipping", tableIdEntry, tableId);
continue;
}
directory = parts[parts.length - 1];
} else {
directory = oldLocation.substring(Path.SEPARATOR.length());
}
Key key = entry.getKey();
Mutation m = new Mutation(key.getRow());
VolumeChooserEnvironment chooserEnv = new VolumeChooserEnvironment(tableId);
final String newLocation = vm.choose(chooserEnv, ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
if (log.isTraceEnabled()) {
log.trace("Replacing {} with {}", oldLocation, newLocation);
}
writer.addMutation(m);
pool.submit(new Runnable() {
@Override
public void run() {
try {
vm.mkdirs(new Path(newLocation));
} catch (IOException ex) {
// nevermind
}
}
});
count++;
}
writer.close();
pool.shutdown();
while (!pool.isTerminated()) {
log.trace("Waiting for mkdir() calls to finish");
try {
pool.awaitTermination(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
break;
}
}
log.info("Updated {} entries for table {}", count, tableName);
if (TableState.OFFLINE != tableState) {
c.tableOperations().online(tableName, true);
log.info("table {} back online", tableName);
}
return 0;
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class ChangeSecret method main.
public static void main(String[] args) throws Exception {
VolumeManager fs = VolumeManagerImpl.get();
verifyHdfsWritePermission(fs);
Opts opts = new Opts();
List<String> argsList = new ArrayList<>(args.length + 2);
argsList.add("--old");
argsList.add("--new");
argsList.addAll(Arrays.asList(args));
opts.parseArgs(ChangeSecret.class.getName(), argsList.toArray(new String[0]));
Instance inst = opts.getInstance();
verifyAccumuloIsDown(inst, opts.oldPass);
final String newInstanceId = UUID.randomUUID().toString();
updateHdfs(fs, inst, newInstanceId);
rewriteZooKeeperInstance(inst, newInstanceId, opts.oldPass, opts.newPass);
if (opts.oldPass != null) {
deleteInstance(inst, opts.oldPass);
}
System.out.println("New instance id is " + newInstanceId);
System.out.println("Be sure to put your new secret in accumulo-site.xml");
}
Aggregations