use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class NameNodeProxiesClient method createNonHAProxyWithClientProtocol.
public static ClientProtocol createNonHAProxyWithClientProtocol(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries, AtomicBoolean fallbackToSimpleAuth) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(ClientNamenodeProtocolPB.class, version, address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy, fallbackToSimpleAuth).getProxy();
if (withRetries) {
// create the proxy with retries
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create(ClientProtocol.class, new DefaultFailoverProxyProvider<>(ClientProtocol.class, translatorProxy), methodNameToPolicyMap, defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.
the class TestBlockReorder method getNamenode.
private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
Field nf = DFSClient.class.getDeclaredField("namenode");
nf.setAccessible(true);
return (ClientProtocol) nf.get(dfsc);
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createBlockAdder.
private static BlockAdder createBlockAdder() throws NoSuchMethodException {
for (Method method : ClientProtocol.class.getMethods()) {
if (method.getName().equals("addBlock")) {
Method addBlockMethod = method;
Class<?>[] paramTypes = addBlockMethod.getParameterTypes();
if (paramTypes[paramTypes.length - 1] == String[].class) {
return new BlockAdder() {
@Override
public LocatedBlock addBlock(ClientProtocol namenode, String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws IOException {
try {
return (LocatedBlock) addBlockMethod.invoke(namenode, src, clientName, previous, excludeNodes, fileId, favoredNodes);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
throw new RuntimeException(e);
}
}
};
} else {
return new BlockAdder() {
@Override
public LocatedBlock addBlock(ClientProtocol namenode, String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws IOException {
try {
return (LocatedBlock) addBlockMethod.invoke(namenode, src, clientName, previous, excludeNodes, fileId, favoredNodes, null);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
} catch (InvocationTargetException e) {
Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
throw new RuntimeException(e);
}
}
};
}
}
}
throw new NoSuchMethodException("Can not find addBlock method in ClientProtocol");
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
Configuration conf = dfs.getConf();
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
HdfsFileStatus stat;
try {
stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
List<Channel> datanodeList = new ArrayList<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
for (Future<Channel> future : futureList) {
// fail the creation if there are connection failures since we are fail-fast. The upper
// layer should retry itself if needed.
datanodeList.add(future.syncUninterruptibly().getNow());
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
succ = true;
return output;
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
}
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.
the class HFileSystem method addLocationsOrderInterceptor.
/**
* Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient
* linked to this FileSystem. See HBASE-6435 for the background.
* <p/>
* There should be no reason, except testing, to create a specific ReorderBlocks.
*
* @return true if the interceptor was added, false otherwise.
*/
static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) {
if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) {
// activated by default
LOG.debug("addLocationsOrderInterceptor configured to false");
return false;
}
FileSystem fs;
try {
fs = FileSystem.get(conf);
} catch (IOException e) {
LOG.warn("Can't get the file system from the conf.", e);
return false;
}
if (!(fs instanceof DistributedFileSystem)) {
LOG.debug("The file system is not a DistributedFileSystem. " + "Skipping on block location reordering");
return false;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
DFSClient dfsc = dfs.getClient();
if (dfsc == null) {
LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + "block reordering interceptor. Continuing, but this is unexpected.");
return false;
}
try {
Field nf = DFSClient.class.getDeclaredField("namenode");
nf.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);
ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
if (namenode == null) {
LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + " reordering interceptor. Continuing, but this is unexpected.");
return false;
}
ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
nf.set(dfsc, cp1);
LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + " using class " + lrb.getClass().getName());
} catch (NoSuchFieldException e) {
LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
} catch (IllegalAccessException e) {
LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
return false;
}
return true;
}
Aggregations