use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class HadoopLogCloser method close.
@Override
public long close(AccumuloConfiguration conf, VolumeManager fs, Path source) throws IOException {
FileSystem ns = fs.getVolumeByPath(source).getFileSystem();
// if path points to a viewfs path, then resolve to underlying filesystem
if (ViewFSUtils.isViewFS(ns)) {
Path newSource = ns.resolvePath(source);
if (!newSource.equals(source) && newSource.toUri().getScheme() != null) {
ns = newSource.getFileSystem(CachedConfiguration.getInstance());
source = newSource;
}
}
if (ns instanceof DistributedFileSystem) {
DistributedFileSystem dfs = (DistributedFileSystem) ns;
try {
if (!dfs.recoverLease(source)) {
log.info("Waiting for file to be closed {}", source.toString());
return conf.getTimeInMillis(Property.MASTER_LEASE_RECOVERY_WAITING_PERIOD);
}
log.info("Recovered lease on {}", source.toString());
} catch (FileNotFoundException ex) {
throw ex;
} catch (Exception ex) {
log.warn("Error recovering lease on " + source.toString(), ex);
ns.append(source).close();
log.info("Recovered lease on {} using append", source.toString());
}
} else if (ns instanceof LocalFileSystem || ns instanceof RawLocalFileSystem) {
// ignore
} else {
throw new IllegalStateException("Don't know how to recover a lease for " + ns.getClass().getName());
}
return 0;
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testCache.
@Test
public void testCache() throws Exception {
SortedMap<Key, Value> testData = createTestData(10000, 1, 1);
String testFile = createRFile(testData);
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
Scanner scanner = RFile.newScanner().from(testFile).withFileSystem(localFs).withIndexCache(1000000).withDataCache(10000000).build();
Random rand = new Random(5);
for (int i = 0; i < 100; i++) {
int r = rand.nextInt(10000);
scanner.setRange(new Range(rowStr(r)));
Iterator<Entry<Key, Value>> iter = scanner.iterator();
Assert.assertTrue(iter.hasNext());
Assert.assertEquals(rowStr(r), iter.next().getKey().getRow().toString());
Assert.assertFalse(iter.hasNext());
}
scanner.close();
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testOutOfOrderIterable.
@Test(expected = IllegalArgumentException.class)
public void testOutOfOrderIterable() throws Exception {
// test that exception declared in API is thrown
Key k1 = new Key("r1", "f1", "q1");
Value v1 = new Value("1".getBytes());
Key k2 = new Key("r2", "f1", "q1");
Value v2 = new Value("2".getBytes());
ArrayList<Entry<Key, Value>> data = new ArrayList<>();
data.add(new AbstractMap.SimpleEntry<>(k2, v2));
data.add(new AbstractMap.SimpleEntry<>(k1, v1));
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
writer.append(data);
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testStartAfter.
@Test(expected = IllegalStateException.class)
public void testStartAfter() throws Exception {
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
Key k1 = new Key("r1", "f1", "q1");
writer.append(k1, new Value("".getBytes()));
writer.startNewLocalityGroup("lg1", "fam1");
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project accumulo by apache.
the class RFileTest method testAppendStartDefault.
@Test(expected = IllegalStateException.class)
public void testAppendStartDefault() throws Exception {
LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
String testFile = createTmpTestFile();
try (RFileWriter writer = RFile.newWriter().to(testFile).withFileSystem(localFs).build()) {
writer.append(new Key("r1", "f1", "q1"), new Value("1".getBytes()));
writer.startDefaultLocalityGroup();
}
}
Aggregations