use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class TestWrites method testWriteStableHow.
@Test
public void testWriteStableHow() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
DFSClient client = null;
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
client = new DFSClient(DFSUtilClient.getNNAddress(config), config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
// Start nfs
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
HdfsFileStatus status = client.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
// Create file1
CREATE3Request createReq = new CREATE3Request(rootHandle, "file1", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
// Test DATA_SYNC
byte[] buffer = new byte[10];
for (int i = 0; i < 10; i++) {
buffer[i] = (byte) i;
}
WRITE3Request writeReq = new WRITE3Request(handle, 0, 10, WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
waitWrite(nfsd, handle, 60000);
// Readback
READ3Request readReq = new READ3Request(handle, 0, 10);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertTrue(Arrays.equals(buffer, readRsp.getData().array()));
// Test FILE_SYNC
// Create file2
CREATE3Request createReq2 = new CREATE3Request(rootHandle, "file2", Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr2 = new XDR();
createReq2.serialize(createXdr2);
CREATE3Response createRsp2 = nfsd.create(createXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle2 = createRsp2.getObjHandle();
WRITE3Request writeReq2 = new WRITE3Request(handle2, 0, 10, WriteStableHow.FILE_SYNC, ByteBuffer.wrap(buffer));
XDR writeXdr2 = new XDR();
writeReq2.serialize(writeXdr2);
nfsd.write(writeXdr2.asReadOnlyWrap(), null, 1, securityHandler, new InetSocketAddress("localhost", 1234));
waitWrite(nfsd, handle2, 60000);
// Readback
READ3Request readReq2 = new READ3Request(handle2, 0, 10);
XDR readXdr2 = new XDR();
readReq2.serialize(readXdr2);
READ3Response readRsp2 = nfsd.read(readXdr2.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
assertTrue(Arrays.equals(buffer, readRsp2.getData().array()));
// FILE_SYNC should sync the file size
status = client.getFileInfo("/file2");
assertTrue(status.getLen() == 10);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class WebHdfsHandler method onOpen.
private void onOpen(ChannelHandlerContext ctx) throws IOException {
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
final long offset = params.offset();
final long length = params.length();
resp = new DefaultHttpResponse(HTTP_1_1, OK);
HttpHeaders headers = resp.headers();
// Allow the UI to access the file
headers.set(ACCESS_CONTROL_ALLOW_METHODS, GET);
headers.set(ACCESS_CONTROL_ALLOW_ORIGIN, "*");
headers.set(CONTENT_TYPE, APPLICATION_OCTET_STREAM);
headers.set(CONNECTION, CLOSE);
final DFSClient dfsclient = newDfsClient(nnId, conf);
HdfsDataInputStream in = dfsclient.createWrappedInputStream(dfsclient.open(path, bufferSize, true));
in.seek(offset);
long contentLength = in.getVisibleLength() - offset;
if (length >= 0) {
contentLength = Math.min(contentLength, length);
}
final InputStream data;
if (contentLength >= 0) {
headers.set(CONTENT_LENGTH, contentLength);
data = new LimitInputStream(in, contentLength);
} else {
data = in;
}
ctx.write(resp);
ctx.writeAndFlush(new ChunkedStream(data) {
@Override
public void close() throws Exception {
super.close();
dfsclient.close();
}
}).addListener(ChannelFutureListener.CLOSE);
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class WebHdfsHandler method onAppend.
private void onAppend(ChannelHandlerContext ctx) throws IOException {
writeContinueHeader(ctx);
final String nnId = params.namenodeId();
final int bufferSize = params.bufferSize();
DFSClient dfsClient = newDfsClient(nnId, conf);
OutputStream out = dfsClient.append(path, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
resp = new DefaultHttpResponse(HTTP_1_1, OK);
resp.headers().set(CONTENT_LENGTH, 0);
ctx.pipeline().replace(this, HdfsWriter.class.getSimpleName(), new HdfsWriter(dfsClient, out, resp));
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class NamenodeFsck method copyBlocksToLostFound.
private void copyBlocksToLostFound(String parent, HdfsFileStatus file, LocatedBlocks blocks) throws IOException {
final DFSClient dfs = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
final String fullName = file.getFullName(parent);
OutputStream fos = null;
try {
if (!lfInited) {
lostFoundInit(dfs);
}
if (!lfInitedOk) {
throw new IOException("failed to initialize lost+found");
}
String target = lostFound + fullName;
if (hdfsPathExists(target)) {
LOG.warn("Fsck: can't copy the remains of " + fullName + " to " + "lost+found, because " + target + " already exists.");
return;
}
if (!namenode.getRpcServer().mkdirs(target, file.getPermission(), true)) {
throw new IOException("failed to create directory " + target);
}
// create chains
int chain = 0;
boolean copyError = false;
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
LocatedBlock lblock = lBlk;
DatanodeInfo[] locs = lblock.getLocations();
if (locs == null || locs.length == 0) {
if (fos != null) {
fos.flush();
fos.close();
fos = null;
}
continue;
}
if (fos == null) {
fos = dfs.create(target + "/" + chain, true);
chain++;
}
// copy the block. It's a pity it's not abstracted from DFSInputStream ...
try {
copyBlock(dfs, lblock, fos);
} catch (Exception e) {
LOG.error("Fsck: could not copy block " + lblock.getBlock() + " to " + target, e);
fos.flush();
fos.close();
fos = null;
internalError = true;
copyError = true;
}
}
if (copyError) {
LOG.warn("Fsck: there were errors copying the remains of the " + "corrupted file " + fullName + " to /lost+found");
} else {
LOG.info("Fsck: copied the remains of the corrupted file " + fullName + " to /lost+found");
}
} catch (Exception e) {
LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
internalError = true;
} finally {
if (fos != null)
fos.close();
dfs.close();
}
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class TestResolveHdfsSymlink method testLinkTargetNonSymlink.
/**
* Verifies that attempting to resolve a non-symlink results in client
* exception
*/
@Test
public void testLinkTargetNonSymlink() throws UnsupportedFileSystemException, IOException {
FileContext fc = null;
Path notSymlink = new Path("/notasymlink");
try {
fc = FileContext.getFileContext(cluster.getFileSystem().getUri());
fc.create(notSymlink, EnumSet.of(CreateFlag.CREATE));
DFSClient client = new DFSClient(cluster.getFileSystem().getUri(), cluster.getConfiguration(0));
try {
client.getLinkTarget(notSymlink.toString());
fail("Expected exception for resolving non-symlink");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("is not a symbolic link", e);
}
} finally {
if (fc != null) {
fc.delete(notSymlink, false);
}
}
}
Aggregations