use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class BulkFailedCopyProcessor method process.
@Override
public void process(String workID, byte[] data) {
String[] paths = new String(data, UTF_8).split(",");
Path orig = new Path(paths[0]);
Path dest = new Path(paths[1]);
Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp");
try {
VolumeManager vm = VolumeManagerImpl.get(SiteConfiguration.getInstance());
FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem();
FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem();
FileUtil.copy(origFs, orig, destFs, tmp, false, true, CachedConfiguration.getInstance());
destFs.rename(tmp, dest);
log.debug("copied {} to {}", orig, dest);
} catch (IOException ex) {
try {
VolumeManager vm = VolumeManagerImpl.get(SiteConfiguration.getInstance());
FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem();
destFs.create(dest).close();
log.warn(" marked " + dest + " failed", ex);
} catch (IOException e) {
log.error("Unable to create failure flag file " + dest, e);
}
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class LocalityCheck method run.
public int run(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(LocalityCheck.class.getName(), args);
VolumeManager fs = VolumeManagerImpl.get();
Connector connector = opts.getConnector();
Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner.setRange(MetadataSchema.TabletsSection.getRange());
Map<String, Long> totalBlocks = new HashMap<>();
Map<String, Long> localBlocks = new HashMap<>();
ArrayList<String> files = new ArrayList<>();
for (Entry<Key, Value> entry : scanner) {
Key key = entry.getKey();
if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
String location = entry.getValue().toString();
String[] parts = location.split(":");
String host = parts[0];
addBlocks(fs, host, files, totalBlocks, localBlocks);
files.clear();
} else if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
files.add(fs.getFullPath(key).toString());
}
}
System.out.println(" Server %local total blocks");
for (Entry<String, Long> entry : totalBlocks.entrySet()) {
final String host = entry.getKey();
final Long blocksForHost = entry.getValue();
System.out.println(String.format("%15s %5.1f %8d", host, (localBlocks.get(host) * 100.) / blocksForHost, blocksForHost));
}
return 0;
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MasterMetadataUtil method fixSplit.
private static KeyExtent fixSplit(ClientContext context, Table.ID tableId, Text metadataEntry, Text metadataPrevEndRow, Value oper, double splitRatio, TServerInstance tserver, String time, long initFlushID, long initCompactID, ZooLock lock) throws AccumuloException, IOException {
if (metadataPrevEndRow == null)
// prev end row....
throw new AccumuloException("Split tablet does not have prev end row, something is amiss, extent = " + metadataEntry);
// check to see if prev tablet exist in metadata tablet
Key prevRowKey = new Key(new Text(KeyExtent.getMetadataEntry(tableId, metadataPrevEndRow)));
try (ScannerImpl scanner2 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
scanner2.setRange(new Range(prevRowKey, prevRowKey.followingKey(PartialKey.ROW)));
VolumeManager fs = VolumeManagerImpl.get();
if (!scanner2.iterator().hasNext()) {
log.info("Rolling back incomplete split {} {}", metadataEntry, metadataPrevEndRow);
MetadataTableUtil.rollBackSplit(metadataEntry, KeyExtent.decodePrevEndRow(oper), context, lock);
return new KeyExtent(metadataEntry, KeyExtent.decodePrevEndRow(oper));
} else {
log.info("Finishing incomplete split {} {}", metadataEntry, metadataPrevEndRow);
List<FileRef> highDatafilesToRemove = new ArrayList<>();
SortedMap<FileRef, DataFileValue> origDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
try (Scanner scanner3 = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
Key rowKey = new Key(metadataEntry);
scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
for (Entry<Key, Value> entry : scanner3) {
if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
}
}
}
MetadataTableUtil.splitDatafiles(metadataPrevEndRow, splitRatio, new HashMap<>(), origDatafileSizes, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
MetadataTableUtil.finishSplit(metadataEntry, highDatafileSizes, highDatafilesToRemove, context, lock);
return new KeyExtent(metadataEntry, KeyExtent.encodePrevEndRow(metadataPrevEndRow));
}
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MetadataTableUtil method getBulkFilesLoaded.
public static List<FileRef> getBulkFilesLoaded(Connector conn, KeyExtent extent, long tid) throws IOException {
List<FileRef> result = new ArrayList<>();
try (Scanner mscanner = new IsolatedScanner(conn.createScanner(extent.isMeta() ? RootTable.NAME : MetadataTable.NAME, Authorizations.EMPTY))) {
VolumeManager fs = VolumeManagerImpl.get();
mscanner.setRange(extent.toMetadataRange());
mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key, Value> entry : mscanner) {
if (Long.parseLong(entry.getValue().toString()) == tid) {
result.add(new FileRef(fs, entry.getKey()));
}
}
return result;
} catch (TableNotFoundException ex) {
// unlikely
throw new RuntimeException("Onos! teh metadata table has vanished!!");
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MetadataTableUtil method getDataFileSizes.
public static SortedMap<FileRef, DataFileValue> getDataFileSizes(KeyExtent extent, ClientContext context) throws IOException {
TreeMap<FileRef, DataFileValue> sizes = new TreeMap<>();
try (Scanner mdScanner = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY)) {
mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);
Text row = extent.getMetadataEntry();
VolumeManager fs = VolumeManagerImpl.get();
Key endKey = new Key(row, DataFileColumnFamily.NAME, new Text(""));
endKey = endKey.followingKey(PartialKey.ROW_COLFAM);
mdScanner.setRange(new Range(new Key(row), endKey));
for (Entry<Key, Value> entry : mdScanner) {
if (!entry.getKey().getRow().equals(row))
break;
DataFileValue dfv = new DataFileValue(entry.getValue().get());
sizes.put(new FileRef(fs, entry.getKey()), dfv);
}
return sizes;
}
}
Aggregations