use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CollectTabletStats method reportHdfsBlockLocations.
private static void reportHdfsBlockLocations(ServerContext context, List<TabletFile> files) throws Exception {
VolumeManager fs = context.getVolumeManager();
System.out.println("\t\tFile block report : ");
for (TabletFile file : files) {
FileStatus status = fs.getFileStatus(file.getPath());
if (status.isDirectory()) {
// assume it is a map file
status = fs.getFileStatus(new Path(file + "/data"));
}
FileSystem ns = fs.getFileSystemByPath(file.getPath());
BlockLocation[] locs = ns.getFileBlockLocations(status, 0, status.getLen());
System.out.println("\t\t\tBlocks for : " + file);
for (BlockLocation blockLocation : locs) {
System.out.printf("\t\t\t\t offset : %,13d hosts :", blockLocation.getOffset());
for (String host : blockLocation.getHosts()) {
System.out.print(" " + host);
}
System.out.println();
}
}
System.out.println();
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CollectTabletStats method readFilesUsingIterStack.
private static int readFilesUsingIterStack(VolumeManager fs, ServerContext context, List<TabletFile> files, Authorizations auths, KeyExtent ke, String[] columns, boolean useTableIterators) throws Exception {
SortedKeyValueIterator<Key, Value> reader;
List<SortedKeyValueIterator<Key, Value>> readers = new ArrayList<>(files.size());
for (TabletFile file : files) {
FileSystem ns = fs.getFileSystemByPath(file.getPath());
readers.add(FileOperations.getInstance().newReaderBuilder().forFile(file.getPathStr(), ns, ns.getConf(), CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(context.getConfiguration()).build());
}
List<IterInfo> emptyIterinfo = Collections.emptyList();
Map<String, Map<String, String>> emptySsio = Collections.emptyMap();
TableConfiguration tconf = context.getTableConfiguration(ke.tableId());
reader = createScanIterator(ke, readers, auths, new byte[] {}, new HashSet<>(), emptyIterinfo, emptySsio, useTableIterators, tconf);
HashSet<ByteSequence> columnSet = createColumnBSS(columns);
reader.seek(new Range(ke.prevEndRow(), false, ke.endRow(), true), columnSet, !columnSet.isEmpty());
int count = 0;
while (reader.hasTop()) {
count++;
reader.next();
}
return count;
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CollectTabletStats method main.
public static void main(String[] args) throws Exception {
final CollectOptions opts = new CollectOptions();
opts.parseArgs(CollectTabletStats.class.getName(), args);
String[] columnsTmp = {};
if (opts.columns != null)
columnsTmp = opts.columns.split(",");
final String[] columns = columnsTmp;
ServerContext context = opts.getServerContext();
final VolumeManager fs = context.getVolumeManager();
TableId tableId = context.getTableId(opts.tableName);
if (tableId == null) {
log.error("Unable to find table named {}", opts.tableName);
System.exit(-1);
}
TreeMap<KeyExtent, String> tabletLocations = new TreeMap<>();
List<KeyExtent> candidates = findTablets(context, !opts.selectFarTablets, opts.tableName, tabletLocations);
if (candidates.size() < opts.numThreads) {
System.err.println("ERROR : Unable to find " + opts.numThreads + " " + (opts.selectFarTablets ? "far" : "local") + " tablets");
System.exit(-1);
}
List<KeyExtent> tabletsToTest = selectRandomTablets(opts.numThreads, candidates);
Map<KeyExtent, List<TabletFile>> tabletFiles = new HashMap<>();
for (KeyExtent ke : tabletsToTest) {
List<TabletFile> files = getTabletFiles(context, ke);
tabletFiles.put(ke, files);
}
System.out.println();
System.out.println("run location : " + InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress());
System.out.println("num threads : " + opts.numThreads);
System.out.println("table : " + opts.tableName);
System.out.println("table id : " + tableId);
for (KeyExtent ke : tabletsToTest) {
System.out.println("\t *** Information about tablet " + ke.getUUID() + " *** ");
System.out.println("\t\t# files in tablet : " + tabletFiles.get(ke).size());
System.out.println("\t\ttablet location : " + tabletLocations.get(ke));
reportHdfsBlockLocations(context, tabletFiles.get(ke));
}
System.out.println("%n*** RUNNING TEST ***%n");
ExecutorService threadPool = Executors.newFixedThreadPool(opts.numThreads);
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<TabletFile> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFiles(fs, context.getConfiguration(), files, ke, columns);
}
};
tests.add(test);
}
runTest("read files", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<TabletFile> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, context, files, opts.auths, ke, columns, false);
}
};
tests.add(test);
}
runTest("read tablet files w/ system iter stack", tests, opts.numThreads, threadPool);
}
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
final List<TabletFile> files = tabletFiles.get(ke);
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return readFilesUsingIterStack(fs, context, files, opts.auths, ke, columns, true);
}
};
tests.add(test);
}
runTest("read tablet files w/ table iter stack", tests, opts.numThreads, threadPool);
}
try (AccumuloClient client = Accumulo.newClient().from(opts.getClientProps()).build()) {
for (int i = 0; i < opts.iterations; i++) {
ArrayList<Test> tests = new ArrayList<>();
for (final KeyExtent ke : tabletsToTest) {
Test test = new Test(ke) {
@Override
public int runTest() throws Exception {
return scanTablet(client, opts.tableName, opts.auths, ke.prevEndRow(), ke.endRow(), columns);
}
};
tests.add(test);
}
runTest("read tablet data through accumulo", tests, opts.numThreads, threadPool);
}
for (final KeyExtent ke : tabletsToTest) {
threadPool.submit(() -> {
try {
calcTabletStats(client, opts.tableName, opts.auths, ke, columns);
} catch (Exception e) {
log.error("Failed to calculate tablet stats.", e);
}
});
}
}
threadPool.shutdown();
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class CollectTabletStats method readFiles.
private static int readFiles(VolumeManager fs, AccumuloConfiguration aconf, List<TabletFile> files, KeyExtent ke, String[] columns) throws Exception {
int count = 0;
HashSet<ByteSequence> columnSet = createColumnBSS(columns);
for (TabletFile file : files) {
FileSystem ns = fs.getFileSystemByPath(file.getPath());
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(file.getPathStr(), ns, ns.getConf(), CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(aconf).build();
Range range = new Range(ke.prevEndRow(), false, ke.endRow(), true);
reader.seek(range, columnSet, !columnSet.isEmpty());
while (reader.hasTop() && !range.afterEndKey(reader.getTopKey())) {
count++;
reader.next();
}
reader.close();
}
return count;
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class Tablet method importMapFiles.
public void importMapFiles(long tid, Map<TabletFile, MapFileInfo> fileMap, boolean setTime) throws IOException {
Map<TabletFile, DataFileValue> entries = new HashMap<>(fileMap.size());
List<String> files = new ArrayList<>();
for (Entry<TabletFile, MapFileInfo> entry : fileMap.entrySet()) {
entries.put(entry.getKey(), new DataFileValue(entry.getValue().estimatedSize, 0L));
files.add(entry.getKey().getPathStr());
}
// Clients timeout and will think that this operation failed.
// Don't do it if we spent too long waiting for the lock
long now = System.currentTimeMillis();
synchronized (this) {
if (isClosed()) {
throw new IOException("tablet " + extent + " is closed");
}
// TODO check seems unneeded now - ACCUMULO-1291
long lockWait = System.currentTimeMillis() - now;
if (lockWait > getTabletServer().getConfiguration().getTimeInMillis(Property.GENERAL_RPC_TIMEOUT)) {
throw new IOException("Timeout waiting " + (lockWait / 1000.) + " seconds to get tablet lock for " + extent);
}
List<TabletFile> alreadyImported = bulkImported.get(tid);
if (alreadyImported != null) {
for (TabletFile entry : alreadyImported) {
if (fileMap.remove(entry) != null) {
log.trace("Ignoring import of bulk file already imported: {}", entry);
}
}
}
fileMap.keySet().removeIf(file -> {
if (bulkImporting.contains(file)) {
log.info("Ignoring import of bulk file currently importing: " + file);
return true;
}
return false;
});
if (fileMap.isEmpty()) {
return;
}
incrementWritesInProgress();
// prevent other threads from processing this file while its added to the metadata table.
bulkImporting.addAll(fileMap.keySet());
}
try {
tabletServer.updateBulkImportState(files, BulkImportState.LOADING);
var storedTabletFile = getDatafileManager().importMapFiles(tid, entries, setTime);
lastMapFileImportTime = System.currentTimeMillis();
if (needsSplit()) {
getTabletServer().executeSplit(this);
} else {
compactable.filesAdded(false, storedTabletFile);
}
} finally {
synchronized (this) {
decrementWritesInProgress();
if (!bulkImporting.removeAll(fileMap.keySet())) {
throw new AssertionError("Likely bug in code, always expect to remove something. Please open an Accumulo issue.");
}
try {
bulkImported.computeIfAbsent(tid, k -> new ArrayList<>()).addAll(fileMap.keySet());
} catch (Exception ex) {
log.info(ex.toString(), ex);
}
tabletServer.removeBulkImportState(files);
}
}
}
Aggregations