use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method updateTabletVolumes.
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef, DataFileValue> filesToAdd, String newDir, ZooLock zooLock, AccumuloServerContext context) {
if (extent.isRootTablet()) {
if (newDir != null)
throw new IllegalArgumentException("newDir not expected for " + extent);
if (filesToRemove.size() != 0 || filesToAdd.size() != 0)
throw new IllegalArgumentException("files not expected for " + extent);
// add before removing in case of process death
for (LogEntry logEntry : logsToAdd) addRootLogEntry(context, zooLock, logEntry);
removeUnusedWALEntries(context, extent, logsToRemove, zooLock);
} else {
Mutation m = new Mutation(extent.getMetadataEntry());
for (LogEntry logEntry : logsToRemove) m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier());
for (LogEntry logEntry : logsToAdd) m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue());
for (FileRef fileRef : filesToRemove) m.putDelete(DataFileColumnFamily.NAME, fileRef.meta());
for (Entry<FileRef, DataFileValue> entry : filesToAdd.entrySet()) m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode()));
if (newDir != null)
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8)));
update(context, m, extent);
}
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method getFileAndLogEntries.
public static Pair<List<LogEntry>, SortedMap<FileRef, DataFileValue>> getFileAndLogEntries(ClientContext context, KeyExtent extent) throws KeeperException, InterruptedException, IOException {
ArrayList<LogEntry> result = new ArrayList<>();
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
VolumeManager fs = VolumeManagerImpl.get();
if (extent.isRootTablet()) {
getRootLogEntries(result);
Path rootDir = new Path(getRootTabletDir());
FileStatus[] files = fs.listStatus(rootDir);
for (FileStatus fileStatus : files) {
if (fileStatus.getPath().toString().endsWith("_tmp")) {
continue;
}
DataFileValue dfv = new DataFileValue(0, 0);
sizes.put(new FileRef(fileStatus.getPath().toString(), fileStatus.getPath()), dfv);
}
} else {
Table.ID systemTableToCheck = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
try (Scanner scanner = new ScannerImpl(context, systemTableToCheck, Authorizations.EMPTY)) {
scanner.fetchColumnFamily(LogColumnFamily.NAME);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(extent.toMetadataRange());
for (Entry<Key, Value> entry : scanner) {
if (!entry.getKey().getRow().equals(extent.getMetadataEntry())) {
throw new RuntimeException("Unexpected row " + entry.getKey().getRow() + " expected " + extent.getMetadataEntry());
}
if (entry.getKey().getColumnFamily().equals(LogColumnFamily.NAME)) {
result.add(LogEntry.fromKeyValue(entry.getKey(), entry.getValue()));
} else if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
} else {
throw new RuntimeException("Unexpected col fam " + entry.getKey().getColumnFamily());
}
}
}
}
return new Pair<>(result, sizes);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class FileUtil method findLastKey.
public static WritableComparable<Key> findLastKey(VolumeManager fs, AccumuloConfiguration acuConf, Collection<FileRef> mapFiles) throws IOException {
Key lastKey = null;
for (FileRef ref : mapFiles) {
Path path = ref.path();
FileSystem ns = fs.getVolumeByPath(path).getFileSystem();
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(path.toString(), ns, ns.getConf()).withTableConfiguration(acuConf).seekToBeginning().build();
try {
if (!reader.hasTop())
// file is empty, so there is no last key
continue;
Key key = reader.getLastKey();
if (lastKey == null || key.compareTo(lastKey) > 0)
lastKey = key;
} finally {
try {
if (reader != null)
reader.close();
} catch (IOException e) {
log.error("{}", e.getMessage(), e);
}
}
}
return lastKey;
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class CopyFailed method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
// This needs to execute after the arbiter is stopped
master.updateBulkImportStatus(source, BulkImportState.COPY_FILES);
VolumeManager fs = master.getFileSystem();
if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
return new CleanUpBulkImport(tableId, source, bulk, error);
HashMap<FileRef, String> failures = new HashMap<>();
HashMap<FileRef, String> loadedFailures = new HashMap<>();
try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
String line = null;
while ((line = in.readLine()) != null) {
Path path = new Path(line);
if (!fs.exists(new Path(error, path.getName())))
failures.put(new FileRef(line, path), line);
}
}
/*
* I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
* have no loaded markers.
*/
// determine which failed files were loaded
Connector conn = master.getConnector();
try (Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))) {
mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key, Value> entry : mscanner) {
if (Long.parseLong(entry.getValue().toString()) == tid) {
FileRef loadedFile = new FileRef(fs, entry.getKey());
String absPath = failures.remove(loadedFile);
if (absPath != null) {
loadedFailures.put(loadedFile, absPath);
}
}
}
}
// move failed files that were not loaded
for (String failure : failures.values()) {
Path orig = new Path(failure);
Path dest = new Path(error, orig.getName());
fs.rename(orig, dest);
log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
}
if (loadedFailures.size() > 0) {
DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
HashSet<String> workIds = new HashSet<>();
for (String failure : loadedFailures.values()) {
Path orig = new Path(failure);
Path dest = new Path(error, orig.getName());
if (fs.exists(dest))
continue;
bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
workIds.add(orig.getName());
log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
}
bifCopyQueue.waitUntilDone(workIds);
}
fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
return new CleanUpBulkImport(tableId, source, bulk, error);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class SplitRecoveryIT method runSplitRecoveryTest.
private void runSplitRecoveryTest(AccumuloServerContext context, int failPoint, String mr, int extentToSplit, ZooLock zl, KeyExtent... extents) throws Exception {
Text midRow = new Text(mr);
SortedMap<FileRef, DataFileValue> splitMapFiles = null;
for (int i = 0; i < extents.length; i++) {
KeyExtent extent = extents[i];
String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId() + "/dir_" + i;
MetadataTableUtil.addTablet(extent, tdir, context, TabletTime.LOGICAL_TIME_ID, zl);
SortedMap<FileRef, DataFileValue> mapFiles = new TreeMap<>();
mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
if (i == extentToSplit) {
splitMapFiles = mapFiles;
}
int tid = 0;
TransactionWatcher.ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, "L0", context, zl);
}
KeyExtent extent = extents[extentToSplit];
KeyExtent high = new KeyExtent(extent.getTableId(), extent.getEndRow(), midRow);
KeyExtent low = new KeyExtent(extent.getTableId(), midRow, extent.getPrevEndRow());
splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
Aggregations