use of org.apache.hadoop.hdfs.DFSClient in project hbase by apache.
the class HFileSystem method addLocationsOrderInterceptor.
/**
* Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient
* linked to this FileSystem. See HBASE-6435 for the background.
* <p/>
* There should be no reason, except testing, to create a specific ReorderBlocks.
*
* @return true if the interceptor was added, false otherwise.
*/
static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) {
if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) {
// activated by default
LOG.debug("addLocationsOrderInterceptor configured to false");
return false;
}
FileSystem fs;
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
LOG.warn("Can't get the file system from the conf.", e);
return false;
}
if (!(fs instanceof DistributedFileSystem)) {
LOG.debug("The file system is not a DistributedFileSystem. " + "Skipping on block location reordering");
return false;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DFSClient dfsc = dfs.getClient();
if (dfsc == null) {
LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + "block reordering interceptor. Continuing, but this is unexpected.");
return false;
}
try {
Field nf = DFSClient.class.getDeclaredField("namenode");
nf.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);
ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
if (namenode == null) {
LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + " reordering interceptor. Continuing, but this is unexpected.");
return false;
}
ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
nf.set(dfsc, cp1);
LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + " using class " + lrb.getClass().getName());
} catch (NoSuchFieldException e) {
LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
} catch (IllegalAccessException e) {
LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
}
return true;
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class TestLeaseRenewer method createMockClient.
private DFSClient createMockClient() {
final DfsClientConf mockConf = Mockito.mock(DfsClientConf.class);
Mockito.doReturn((int) FAST_GRACE_PERIOD).when(mockConf).getHdfsTimeout();
DFSClient mock = Mockito.mock(DFSClient.class);
Mockito.doReturn(true).when(mock).isClientRunning();
Mockito.doReturn(mockConf).when(mock).getConf();
Mockito.doReturn("myclient").when(mock).getClientName();
return mock;
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class TestLeaseRenewer method testManyDfsClientsWhereSomeNotOpen.
/**
* Regression test for HDFS-2810. In this bug, the LeaseRenewer has handles
* to several DFSClients with the same name, the first of which has no files
* open. Previously, this was causing the lease to not get renewed.
*/
@Test
public void testManyDfsClientsWhereSomeNotOpen() throws Exception {
// First DFSClient has no files open so doesn't renew leases.
final DFSClient mockClient1 = createMockClient();
Mockito.doReturn(false).when(mockClient1).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, mockClient1));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream1 = Mockito.mock(DFSOutputStream.class);
long fileId = 456L;
renewer.put(fileId, mockStream1, mockClient1);
// Second DFSClient does renew lease
final DFSClient mockClient2 = createMockClient();
Mockito.doReturn(true).when(mockClient2).renewLease();
assertSame(renewer, LeaseRenewer.getInstance(FAKE_AUTHORITY, FAKE_UGI_A, mockClient2));
// Set up a file so that we start renewing our lease.
DFSOutputStream mockStream2 = Mockito.mock(DFSOutputStream.class);
renewer.put(fileId, mockStream2, mockClient2);
// Wait for lease to get renewed
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
Mockito.verify(mockClient1, Mockito.atLeastOnce()).renewLease();
Mockito.verify(mockClient2, Mockito.atLeastOnce()).renewLease();
return true;
} catch (AssertionError err) {
LeaseRenewer.LOG.warn("Not yet satisfied", err);
return false;
} catch (IOException e) {
// should not throw!
throw new RuntimeException(e);
}
}
}, 100, 10000);
renewer.closeFile(fileId, mockClient1);
renewer.closeFile(fileId, mockClient2);
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class LeaseRenewer method closeClient.
/** Close the given client. */
public synchronized void closeClient(final DFSClient dfsc) {
dfsclients.remove(dfsc);
if (dfsclients.isEmpty()) {
if (!isRunning() || isRenewerExpired()) {
Factory.INSTANCE.remove(LeaseRenewer.this);
return;
}
if (emptyTime == Long.MAX_VALUE) {
//discover the first time that the client list is empty.
emptyTime = Time.monotonicNow();
}
}
//update renewal time
if (renewal == dfsc.getConf().getHdfsTimeout() / 2) {
long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
for (DFSClient c : dfsclients) {
final int timeout = c.getConf().getHdfsTimeout();
if (timeout > 0 && timeout < min) {
min = timeout;
}
}
renewal = min / 2;
}
}
use of org.apache.hadoop.hdfs.DFSClient in project hadoop by apache.
the class RpcProgramNfs3 method access.
@VisibleForTesting
ACCESS3Response access(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
ACCESS3Request request;
try {
request = ACCESS3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid ACCESS request");
return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
Nfs3FileAttributes attrs;
if (LOG.isDebugEnabled()) {
LOG.debug("NFS ACCESS fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
try {
attrs = writeManager.getFileAttr(dfsClient, handle, iug);
if (attrs == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE);
}
if (iug.getUserName(securityHandler.getUid(), "unknown").equals(superuser)) {
int access = Nfs3Constant.ACCESS3_LOOKUP | Nfs3Constant.ACCESS3_DELETE | Nfs3Constant.ACCESS3_EXECUTE | Nfs3Constant.ACCESS3_EXTEND | Nfs3Constant.ACCESS3_MODIFY | Nfs3Constant.ACCESS3_READ;
return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
}
int access = Nfs3Utils.getAccessRightsForUserGroup(securityHandler.getUid(), securityHandler.getGid(), securityHandler.getAuxGids(), attrs);
return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new ACCESS3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new ACCESS3Response(status);
}
}
Aggregations