use of org.apache.accumulo.core.file.FileSKVIterator in project accumulo by apache.
the class Tablet method getFirstAndLastKeys.
private Map<FileRef, Pair<Key, Key>> getFirstAndLastKeys(SortedMap<FileRef, DataFileValue> allFiles) throws IOException {
Map<FileRef, Pair<Key, Key>> result = new HashMap<>();
FileOperations fileFactory = FileOperations.getInstance();
VolumeManager fs = getTabletServer().getFileSystem();
for (Entry<FileRef, DataFileValue> entry : allFiles.entrySet()) {
FileRef file = entry.getKey();
FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
try (FileSKVIterator openReader = fileFactory.newReaderBuilder().forFile(file.path().toString(), ns, ns.getConf()).withTableConfiguration(this.getTableConfiguration()).seekToBeginning().build()) {
Key first = openReader.getFirstKey();
Key last = openReader.getLastKey();
result.put(file, new Pair<>(first, last));
}
}
return result;
}
use of org.apache.accumulo.core.file.FileSKVIterator in project accumulo by apache.
the class CollectTabletStats method readFiles.
private static int readFiles(VolumeManager fs, AccumuloConfiguration aconf, List<FileRef> files, KeyExtent ke, String[] columns) throws Exception {
int count = 0;
HashSet<ByteSequence> columnSet = createColumnBSS(columns);
for (FileRef file : files) {
FileSystem ns = fs.getVolumeByPath(file.path()).getFileSystem();
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(file.path().toString(), ns, ns.getConf()).withTableConfiguration(aconf).build();
Range range = new Range(ke.getPrevEndRow(), false, ke.getEndRow(), true);
reader.seek(range, columnSet, columnSet.size() == 0 ? false : true);
while (reader.hasTop() && !range.afterEndKey(reader.getTopKey())) {
count++;
reader.next();
}
reader.close();
}
return count;
}
use of org.apache.accumulo.core.file.FileSKVIterator in project accumulo by apache.
the class MockTableOperations method importDirectory.
@Override
public void importDirectory(String tableName, String dir, String failureDir, boolean setTime) throws IOException, AccumuloException, AccumuloSecurityException, TableNotFoundException {
long time = System.currentTimeMillis();
MockTable table = acu.tables.get(tableName);
if (table == null) {
throw new TableNotFoundException(null, tableName, "The table was not found");
}
Path importPath = new Path(dir);
Path failurePath = new Path(failureDir);
FileSystem fs = acu.getFileSystem();
// directories are directories
if (fs.isFile(importPath)) {
throw new IOException("Import path must be a directory.");
}
if (fs.isFile(failurePath)) {
throw new IOException("Failure path must be a directory.");
}
// failures are writable
Path createPath = failurePath.suffix("/.createFile");
FSDataOutputStream createStream = null;
try {
createStream = fs.create(createPath);
} catch (IOException e) {
throw new IOException("Error path is not writable.");
} finally {
if (createStream != null) {
createStream.close();
}
}
fs.delete(createPath, false);
// failures are empty
FileStatus[] failureChildStats = fs.listStatus(failurePath);
if (failureChildStats.length > 0) {
throw new IOException("Error path must be empty.");
}
/*
* Begin the import - iterate the files in the path
*/
for (FileStatus importStatus : fs.listStatus(importPath)) {
try {
FileSKVIterator importIterator = FileOperations.getInstance().newReaderBuilder().forFile(importStatus.getPath().toString(), fs, fs.getConf()).withTableConfiguration(DefaultConfiguration.getInstance()).seekToBeginning().build();
while (importIterator.hasTop()) {
Key key = importIterator.getTopKey();
Value value = importIterator.getTopValue();
if (setTime) {
key.setTimestamp(time);
}
Mutation mutation = new Mutation(key.getRow());
if (!key.isDeleted()) {
mutation.put(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()), key.getTimestamp(), value);
} else {
mutation.putDelete(key.getColumnFamily(), key.getColumnQualifier(), new ColumnVisibility(key.getColumnVisibilityData().toArray()), key.getTimestamp());
}
table.addMutation(mutation);
importIterator.next();
}
} catch (Exception e) {
FSDataOutputStream failureWriter = null;
DataInputStream failureReader = null;
try {
failureWriter = fs.create(failurePath.suffix("/" + importStatus.getPath().getName()));
failureReader = fs.open(importStatus.getPath());
int read = 0;
byte[] buffer = new byte[1024];
while (-1 != (read = failureReader.read(buffer))) {
failureWriter.write(buffer, 0, read);
}
} finally {
if (failureReader != null)
failureReader.close();
if (failureWriter != null)
failureWriter.close();
}
}
fs.delete(importStatus.getPath(), true);
}
}
use of org.apache.accumulo.core.file.FileSKVIterator in project accumulo by apache.
the class FileUtil method findLastKey.
public static WritableComparable<Key> findLastKey(VolumeManager fs, AccumuloConfiguration acuConf, Collection<FileRef> mapFiles) throws IOException {
Key lastKey = null;
for (FileRef ref : mapFiles) {
Path path = ref.path();
FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(path.toString(), ns, ns.getConf()).withTableConfiguration(acuConf).seekToBeginning().build();
try {
if (!reader.hasTop())
// file is empty, so there is no last key
continue;
Key key = reader.getLastKey();
if (lastKey == null || key.compareTo(lastKey) > 0)
lastKey = key;
} finally {
try {
if (reader != null)
reader.close();
} catch (IOException e) {
log.error("{}", e.getMessage(), e);
}
}
}
return lastKey;
}
use of org.apache.accumulo.core.file.FileSKVIterator in project accumulo by apache.
the class FileUtil method estimatePercentageLTE.
public static double estimatePercentageLTE(VolumeManager fs, String tabletDir, AccumuloConfiguration acuconf, Text prevEndRow, Text endRow, Collection<String> mapFiles, Text splitRow) throws IOException {
Configuration conf = CachedConfiguration.getInstance();
Path tmpDir = null;
int maxToOpen = acuconf.getCount(Property.TSERV_TABLET_SPLIT_FINDMIDPOINT_MAXOPEN);
ArrayList<FileSKVIterator> readers = new ArrayList<>(mapFiles.size());
try {
if (mapFiles.size() > maxToOpen) {
tmpDir = createTmpDir(acuconf, fs, tabletDir);
log.debug("Too many indexes ({}) to open at once for {} {}, reducing in tmpDir = {}", mapFiles.size(), endRow, prevEndRow, tmpDir);
long t1 = System.currentTimeMillis();
mapFiles = reduceFiles(acuconf, conf, fs, prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0);
long t2 = System.currentTimeMillis();
log.debug("Finished reducing indexes for {} {} in {}", endRow, prevEndRow, String.format("%6.2f secs", (t2 - t1) / 1000.0));
}
if (prevEndRow == null)
prevEndRow = new Text();
long numKeys = 0;
numKeys = countIndexEntries(acuconf, prevEndRow, endRow, mapFiles, true, conf, fs, readers);
if (numKeys == 0) {
// the data just punt and return .5
return .5;
}
List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(readers);
MultiIterator mmfi = new MultiIterator(iters, true);
// skip the prevendrow
while (mmfi.hasTop() && mmfi.getTopKey().compareRow(prevEndRow) <= 0) {
mmfi.next();
}
int numLte = 0;
while (mmfi.hasTop() && mmfi.getTopKey().compareRow(splitRow) <= 0) {
numLte++;
mmfi.next();
}
if (numLte > numKeys) {
// something went wrong
throw new RuntimeException("numLte > numKeys " + numLte + " " + numKeys + " " + prevEndRow + " " + endRow + " " + splitRow + " " + mapFiles);
}
// do not want to return 0% or 100%, so add 1 and 2 below
return (numLte + 1) / (double) (numKeys + 2);
} finally {
cleanupIndexOp(tmpDir, fs, readers);
}
}
Aggregations