use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestWrites method testCheckCommit.
@Test
public // COMMIT_INACTIVE_WITH_PENDING_WRITE, COMMIT_ERROR, and COMMIT_DO_SYNC.
void testCheckCommit() throws IOException {
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
NfsConfiguration conf = new NfsConfiguration();
conf.setBoolean(NfsConfigKeys.LARGE_FILE_UPLOAD, false);
OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(conf), false, conf);
COMMIT_STATUS ret;
// Test inactive open file context
ctx.setActiveStatusForTest(false);
Channel ch = Mockito.mock(Channel.class);
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_CTX);
ctx.getPendingWritesForTest().put(new OffsetRange(5, 10), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_INACTIVE_WITH_PENDING_WRITE);
// Test request with non zero commit offset
ctx.setActiveStatusForTest(true);
Mockito.when(fos.getPos()).thenReturn((long) 10);
ctx.setNextOffsetForTest(10);
COMMIT_STATUS status = ctx.checkCommitInternal(5, null, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
// Do_SYNC state will be updated to FINISHED after data sync
ret = ctx.checkCommit(dfsClient, 5, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
status = ctx.checkCommitInternal(10, ch, 1, attr, false);
Assert.assertTrue(status == COMMIT_STATUS.COMMIT_DO_SYNC);
ret = ctx.checkCommit(dfsClient, 10, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
ConcurrentNavigableMap<Long, CommitCtx> commits = ctx.getPendingCommitsForTest();
Assert.assertTrue(commits.size() == 0);
ret = ctx.checkCommit(dfsClient, 11, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
long key = commits.firstKey();
Assert.assertTrue(key == 11);
// Test request with zero commit offset
commits.remove(new Long(11));
// There is one pending write [5,10]
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_WAIT);
Assert.assertTrue(commits.size() == 1);
key = commits.firstKey();
Assert.assertTrue(key == 9);
// Empty pending writes
ctx.getPendingWritesForTest().remove(new OffsetRange(5, 10));
ret = ctx.checkCommit(dfsClient, 0, ch, 1, attr, false);
Assert.assertTrue(ret == COMMIT_STATUS.COMMIT_FINISHED);
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestMountd method testStart.
@Test
public void testStart() throws IOException {
// Start minicluster
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
// 1s
int newTimeoutMillis = 1000;
// Set the new portmap rpc timeout values and check
config.setInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, newTimeoutMillis);
assertTrue(config.getInt(NfsConfigKeys.NFS_UDP_CLIENT_PORTMAP_TIMEOUT_MILLIS_KEY, 0) == newTimeoutMillis);
// Start nfs
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));
assertTrue(mountd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
nfsd.nullProcedure();
assertTrue(nfsd.getPortmapUdpTimeoutMillis() == newTimeoutMillis);
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestOpenFileCtxCache method testEviction.
@Test
public void testEviction() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
Thread.sleep(1000);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
ret = cache.put(new FileHandle(3), context3);
assertFalse(ret);
assertTrue(cache.size() == 2);
// Wait for the oldest stream to be evict-able, insert again
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 2);
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
assertTrue(cache.size() == 2);
assertTrue(cache.get(new FileHandle(1)) == null);
// Test inactive entry is evicted immediately
context3.setActiveStatusForTest(false);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
// Now the cache has context2 and context4
// Test eviction failure if all entries have pending work.
context2.getPendingWritesForTest().put(new OffsetRange(0, 100), new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
context4.getPendingCommitsForTest().put(new Long(100), new CommitCtx(0, null, 0, attr));
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
ret = cache.put(new FileHandle(5), context5);
assertFalse(ret);
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class TestOpenFileCtxCache method testScan.
@Test
public void testScan() throws IOException, InterruptedException {
NfsConfiguration conf = new NfsConfiguration();
// Only two entries will be in the cache
conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
DFSClient dfsClient = Mockito.mock(DFSClient.class);
Nfs3FileAttributes attr = new Nfs3FileAttributes();
HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
Mockito.when(fos.getPos()).thenReturn((long) 0);
OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient, new ShellBasedIdMapping(new NfsConfiguration()));
OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
// Test cleaning expired entry
boolean ret = cache.put(new FileHandle(1), context1);
assertTrue(ret);
ret = cache.put(new FileHandle(2), context2);
assertTrue(ret);
Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
assertTrue(cache.size() == 0);
// Test cleaning inactive entry
ret = cache.put(new FileHandle(3), context3);
assertTrue(ret);
ret = cache.put(new FileHandle(4), context4);
assertTrue(ret);
context3.setActiveStatusForTest(false);
cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
assertTrue(cache.size() == 1);
assertTrue(cache.get(new FileHandle(3)) == null);
assertTrue(cache.get(new FileHandle(4)) != null);
}
use of org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration in project hadoop by apache.
the class PrivilegedNfsGatewayStarter method init.
@Override
public void init(DaemonContext context) throws Exception {
System.err.println("Initializing privileged NFS client socket...");
NfsConfiguration conf = new NfsConfiguration();
int clientPort = conf.getInt(NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY, NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT);
if (clientPort < 1 || clientPort > 1023) {
throw new RuntimeException("Must start privileged NFS server with '" + NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " + "privileged port.");
}
try {
InetSocketAddress socketAddress = new InetSocketAddress("localhost", clientPort);
registrationSocket = new DatagramSocket(null);
registrationSocket.setReuseAddress(true);
registrationSocket.bind(socketAddress);
} catch (SocketException e) {
LOG.error("Init failed for port=" + clientPort, e);
throw e;
}
args = context.getArguments();
}
Aggregations