use of org.apache.hadoop.hdds.scm.proxy.SCMClientConfig in project ozone by apache.
the class TestFailoverWithSCMHA method testFailover.
@Test
public void testFailover() throws Exception {
SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class);
scmClientConfig.setRetryCount(1);
scmClientConfig.setRetryInterval(100);
scmClientConfig.setMaxRetryTimeout(1500);
Assert.assertEquals(scmClientConfig.getRetryCount(), 15);
conf.setFromObject(scmClientConfig);
StorageContainerManager scm = getLeader(cluster);
Assert.assertNotNull(scm);
SCMBlockLocationFailoverProxyProvider failoverProxyProvider = new SCMBlockLocationFailoverProxyProvider(conf);
failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId());
ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = new ScmBlockLocationProtocolClientSideTranslatorPB(failoverProxyProvider);
GenericTestUtils.setLogLevel(SCMBlockLocationFailoverProxyProvider.LOG, Level.DEBUG);
GenericTestUtils.LogCapturer logCapture = GenericTestUtils.LogCapturer.captureLogs(SCMBlockLocationFailoverProxyProvider.LOG);
ScmBlockLocationProtocol scmBlockLocationProtocol = TracingUtil.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf);
scmBlockLocationProtocol.getScmInfo();
Assert.assertTrue(logCapture.getOutput().contains("Performing failover to suggested leader"));
scm = getLeader(cluster);
SCMContainerLocationFailoverProxyProvider proxyProvider = new SCMContainerLocationFailoverProxyProvider(conf, null);
GenericTestUtils.setLogLevel(SCMContainerLocationFailoverProxyProvider.LOG, Level.DEBUG);
logCapture = GenericTestUtils.LogCapturer.captureLogs(SCMContainerLocationFailoverProxyProvider.LOG);
proxyProvider.changeCurrentProxy(scm.getSCMNodeId());
StorageContainerLocationProtocol scmContainerClient = TracingUtil.createProxy(new StorageContainerLocationProtocolClientSideTranslatorPB(proxyProvider), StorageContainerLocationProtocol.class, conf);
scmContainerClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, "ozone");
Assert.assertTrue(logCapture.getOutput().contains("Performing failover to suggested leader"));
}
use of org.apache.hadoop.hdds.scm.proxy.SCMClientConfig in project ozone by apache.
the class HddsServerUtil method getScmSecurityClientWithFixedDuration.
public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClientWithFixedDuration(OzoneConfiguration conf) throws IOException {
// As for OM during init, we need to wait for specific duration so that
// we can give response to user performed operation init in a definite
// period, instead of stuck for ever.
OzoneConfiguration configuration = new OzoneConfiguration(conf);
long duration = conf.getTimeDuration(OZONE_SCM_INFO_WAIT_DURATION, OZONE_SCM_INFO_WAIT_DURATION_DEFAULT, TimeUnit.SECONDS);
SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class);
int retryCount = (int) (duration / (scmClientConfig.getRetryInterval() / 1000));
// retry count.
if (retryCount > scmClientConfig.getRetryCount()) {
scmClientConfig.setRetryCount(retryCount);
configuration.setFromObject(scmClientConfig);
}
return new SCMSecurityProtocolClientSideTranslatorPB(new SCMSecurityProtocolFailoverProxyProvider(configuration, UserGroupInformation.getCurrentUser()));
}
use of org.apache.hadoop.hdds.scm.proxy.SCMClientConfig in project ozone by apache.
the class HAUtils method getScmInfo.
public static ScmInfo getScmInfo(OzoneConfiguration conf) throws IOException {
OzoneConfiguration configuration = new OzoneConfiguration(conf);
try {
long duration = conf.getTimeDuration(OZONE_SCM_INFO_WAIT_DURATION, OZONE_SCM_INFO_WAIT_DURATION_DEFAULT, TimeUnit.SECONDS);
SCMClientConfig scmClientConfig = configuration.getObject(SCMClientConfig.class);
int retryCount = (int) (duration / (scmClientConfig.getRetryInterval() / 1000));
// retry count.
if (retryCount > scmClientConfig.getRetryCount()) {
scmClientConfig.setRetryCount(retryCount);
configuration.setFromObject(scmClientConfig);
}
return getScmBlockClient(configuration).getScmInfo();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new IOException("Failed to get SCM info", e);
}
}
use of org.apache.hadoop.hdds.scm.proxy.SCMClientConfig in project ozone by apache.
the class HddsServerUtil method getScmSecurityClientWithMaxRetry.
public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClientWithMaxRetry(OzoneConfiguration conf) throws IOException {
// Certificate from SCM is required for DN startup to succeed, so retry
// for ever. In this way DN start up is resilient to SCM service running
// status.
OzoneConfiguration configuration = new OzoneConfiguration(conf);
SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class);
int retryCount = Integer.MAX_VALUE;
scmClientConfig.setRetryCount(retryCount);
configuration.setFromObject(scmClientConfig);
return new SCMSecurityProtocolClientSideTranslatorPB(new SCMSecurityProtocolFailoverProxyProvider(configuration, UserGroupInformation.getCurrentUser()));
}
use of org.apache.hadoop.hdds.scm.proxy.SCMClientConfig in project ozone by apache.
the class SCMThroughputBenchmark method createDatanodeScmClient.
private StorageContainerDatanodeProtocol createDatanodeScmClient() throws IOException {
int dnPort = conf.getInt(OZONE_SCM_DATANODE_PORT_KEY, OZONE_SCM_DATANODE_PORT_DEFAULT);
InetSocketAddress scmAddress = NetUtils.createSocketAddr(scm, dnPort);
Configuration hadoopConfig = LegacyHadoopConfigurationSource.asHadoopConfiguration(this.conf);
RPC.setProtocolEngine(hadoopConfig, StorageContainerDatanodeProtocolPB.class, ProtobufRpcEngine.class);
long version = RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class);
int rpcTimeout = (int) scmClientConfig.getRpcTimeOut();
RetryPolicy retryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(getScmRpcRetryCount(conf), getScmRpcRetryInterval(conf), TimeUnit.MILLISECONDS);
StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(StorageContainerDatanodeProtocolPB.class, version, scmAddress, UserGroupInformation.getCurrentUser(), hadoopConfig, NetUtils.getDefaultSocketFactory(hadoopConfig), rpcTimeout, retryPolicy).getProxy();
return new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
}
Aggregations