use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method updateTabletDataFile.
public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef, DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock) {
Mutation m = new Mutation(extent.getMetadataEntry());
byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
for (Entry<FileRef, DataFileValue> entry : estSizes.entrySet()) {
Text file = entry.getKey().meta();
m.put(DataFileColumnFamily.NAME, file, new Value(entry.getValue().encode()));
m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
}
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
update(context, zooLock, m, extent);
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class MetadataTableUtil method deleteTable.
public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
// scan metadata for our table and delete everything we find
Mutation m = null;
ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
// insert deletes before deleting data from metadata... this makes the code fault tolerant
if (insertDeletes) {
ms.fetchColumnFamily(DataFileColumnFamily.NAME);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
}
if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
}
}
bw.flush();
ms.clearColumns();
}
for (Entry<Key, Value> cell : ms) {
Key key = cell.getKey();
if (m == null) {
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
bw.addMutation(m);
m = new Mutation(key.getRow());
if (lock != null)
putLockID(lock, m);
}
m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
}
if (m != null)
bw.addMutation(m);
}
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class FileUtil method tryToGetFirstAndLastRows.
public static Map<FileRef, FileInfo> tryToGetFirstAndLastRows(VolumeManager fs, AccumuloConfiguration acuConf, Set<FileRef> mapfiles) {
HashMap<FileRef, FileInfo> mapFilesInfo = new HashMap<>();
long t1 = System.currentTimeMillis();
for (FileRef mapfile : mapfiles) {
FileSKVIterator reader = null;
FileSystem ns = fs.getVolumeByPath(mapfile.path()).getFileSystem();
try {
reader = FileOperations.getInstance().newReaderBuilder().forFile(mapfile.toString(), ns, ns.getConf()).withTableConfiguration(acuConf).build();
Key firstKey = reader.getFirstKey();
if (firstKey != null) {
mapFilesInfo.put(mapfile, new FileInfo(firstKey, reader.getLastKey()));
}
} catch (IOException ioe) {
log.warn("Failed to read map file to determine first and last key : " + mapfile, ioe);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException ioe) {
log.warn("failed to close " + mapfile, ioe);
}
}
}
}
long t2 = System.currentTimeMillis();
log.debug(String.format("Found first and last keys for %d map files in %6.2f secs", mapfiles.size(), (t2 - t1) / 1000.0));
return mapFilesInfo;
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class FileUtilTest method testToPathStrings.
@Test
public void testToPathStrings() {
Collection<FileRef> c = new java.util.ArrayList<>();
FileRef r1 = createMock(FileRef.class);
expect(r1.path()).andReturn(new Path("/foo"));
replay(r1);
c.add(r1);
FileRef r2 = createMock(FileRef.class);
expect(r2.path()).andReturn(new Path("/bar"));
replay(r2);
c.add(r2);
Collection<String> cs = FileUtil.toPathStrings(c);
Assert.assertEquals(2, cs.size());
Iterator<String> iter = cs.iterator();
Assert.assertEquals("/foo", iter.next());
Assert.assertEquals("/bar", iter.next());
}
use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.
the class Compactor method openMapDataFiles.
private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(String lgName, ArrayList<FileSKVIterator> readers) throws IOException {
List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());
for (FileRef mapFile : filesToCompact.keySet()) {
try {
FileOperations fileFactory = FileOperations.getInstance();
FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem();
FileSKVIterator reader;
reader = fileFactory.newReaderBuilder().forFile(mapFile.path().toString(), fs, fs.getConf()).withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();
readers.add(reader);
SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.getTableId(), mapFile.path().toString(), false, reader);
if (filesToCompact.get(mapFile).isTimeSet()) {
iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
}
iters.add(iter);
} catch (Throwable e) {
ProblemReports.getInstance(context).report(new ProblemReport(extent.getTableId(), ProblemType.FILE_READ, mapFile.path().toString(), e));
log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
// failed to open some map file... close the ones that were opened
for (FileSKVIterator reader : readers) {
try {
reader.close();
} catch (Throwable e2) {
log.warn("Failed to close map file", e2);
}
}
readers.clear();
if (e instanceof IOException)
throw (IOException) e;
throw new IOException("Failed to open map data files", e);
}
}
return iters;
}
Aggregations