use of org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB in project hadoop by apache.
the class NameNodeProxiesClient method createNonHAProxyWithClientProtocol.
public static ClientProtocol createNonHAProxyWithClientProtocol(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries, AtomicBoolean fallbackToSimpleAuth) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(ClientNamenodeProtocolPB.class, version, address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy, fallbackToSimpleAuth).getProxy();
if (withRetries) {
// create the proxy with retries
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create(ClientProtocol.class, new DefaultFailoverProxyProvider<>(ClientProtocol.class, translatorProxy), methodNameToPolicyMap, defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
}
Aggregations