use of org.apache.hadoop.io.retry.RetryPolicy in project hadoop by apache.
the class TestWebHDFS method testReadRetryExceptionHelper.
private void testReadRetryExceptionHelper(WebHdfsFileSystem fs, Path fn, final IOException ex, String msg, boolean shouldAttemptRetry, int numTimesTried) throws Exception {
// Ovverride WebHdfsInputStream#getInputStream so that it returns
// an input stream that throws the specified exception when read
// is called.
FSDataInputStream in = fs.open(fn);
// Connection is made only when the first read() occurs.
in.read();
final WebHdfsInputStream webIn = (WebHdfsInputStream) (in.getWrappedStream());
final InputStream spyInputStream = spy(webIn.getReadRunner().getInputStream());
doThrow(ex).when(spyInputStream).read((byte[]) any(), anyInt(), anyInt());
final WebHdfsFileSystem.ReadRunner rr = spy(webIn.getReadRunner());
doReturn(spyInputStream).when(rr).initializeInputStream((HttpURLConnection) any());
rr.setInputStream(spyInputStream);
webIn.setReadRunner(rr);
// Override filesystem's retry policy in order to verify that
// WebHdfsInputStream is calling shouldRetry for the appropriate
// exceptions.
final RetryAction retryAction = new RetryAction(RetryDecision.RETRY);
final RetryAction failAction = new RetryAction(RetryDecision.FAIL);
RetryPolicy rp = new RetryPolicy() {
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
attemptedRetry = true;
if (retries > 3) {
return failAction;
} else {
return retryAction;
}
}
};
fs.setRetryPolicy(rp);
// If the retry logic is exercised, attemptedRetry will be true. Some
// exceptions should exercise the retry logic and others should not.
// Either way, the value of attemptedRetry should match shouldAttemptRetry.
attemptedRetry = false;
try {
webIn.read();
fail(msg + ": Read should have thrown exception.");
} catch (Exception e) {
assertTrue(e.getMessage().contains(msg));
}
assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ") + "have called shouldRetry. ", attemptedRetry, shouldAttemptRetry);
verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
webIn.close();
in.close();
}
use of org.apache.hadoop.io.retry.RetryPolicy in project hadoop by apache.
the class TestReuseRpcConnections method testDefaultRetryPolicyReuseConnections.
@Test(timeout = 60000)
public void testDefaultRetryPolicyReuseConnections() throws Exception {
RetryPolicy rp1 = null;
RetryPolicy rp2 = null;
RetryPolicy rp3 = null;
/* test the same setting */
rp1 = getDefaultRetryPolicy(true, "10000,2");
rp2 = getDefaultRetryPolicy(true, "10000,2");
verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
/* test enabled and different specifications */
rp1 = getDefaultRetryPolicy(true, "20000,3");
rp2 = getDefaultRetryPolicy(true, "20000,3");
rp3 = getDefaultRetryPolicy(true, "30000,4");
verifyRetryPolicyReuseConnections(rp1, rp2, rp3);
/* test disabled and the same specifications */
rp1 = getDefaultRetryPolicy(false, "40000,5");
rp2 = getDefaultRetryPolicy(false, "40000,5");
verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
/* test disabled and different specifications */
rp1 = getDefaultRetryPolicy(false, "50000,6");
rp2 = getDefaultRetryPolicy(false, "60000,7");
verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
/* test different remoteExceptionToRetry */
rp1 = getDefaultRetryPolicy(true, "70000,8", new RemoteException(RpcNoSuchMethodException.class.getName(), "no such method exception").getClassName());
rp2 = getDefaultRetryPolicy(true, "70000,8", new RemoteException(PathIOException.class.getName(), "path IO exception").getClassName());
verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
}
use of org.apache.hadoop.io.retry.RetryPolicy in project hadoop by apache.
the class NameNodeProxiesClient method createNonHAProxyWithClientProtocol.
public static ClientProtocol createNonHAProxyWithClientProtocol(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries, AtomicBoolean fallbackToSimpleAuth) throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(ClientNamenodeProtocolPB.class, version, address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy, fallbackToSimpleAuth).getProxy();
if (withRetries) {
// create the proxy with retries
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create(ClientProtocol.class, new DefaultFailoverProxyProvider<>(ClientProtocol.class, translatorProxy), methodNameToPolicyMap, defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
}
use of org.apache.hadoop.io.retry.RetryPolicy in project hadoop by apache.
the class NativeS3FileSystem method createDefaultStore.
private static NativeFileSystemStore createDefaultStore(Configuration conf) {
NativeFileSystemStore store = new Jets3tNativeFileSystemStore();
RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(conf.getInt(S3_NATIVE_MAX_RETRIES_KEY, S3_NATIVE_MAX_RETRIES_DEFAUL), conf.getLong(S3_NATIVE_SLEEP_TIME_KEY, S3_NATIVE_SLEEP_TIME_DEFAULT), TimeUnit.SECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(IOException.class, basePolicy);
exceptionToPolicyMap.put(S3Exception.class, basePolicy);
RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("storeFile", methodPolicy);
methodNameToPolicyMap.put("rename", methodPolicy);
return (NativeFileSystemStore) RetryProxy.create(NativeFileSystemStore.class, store, methodNameToPolicyMap);
}
use of org.apache.hadoop.io.retry.RetryPolicy in project hadoop by apache.
the class ServerProxy method createRetryPolicy.
protected static RetryPolicy createRetryPolicy(Configuration conf, String maxWaitTimeStr, long defMaxWaitTime, String connectRetryIntervalStr, long defRetryInterval) {
long maxWaitTime = conf.getLong(maxWaitTimeStr, defMaxWaitTime);
long retryIntervalMS = conf.getLong(connectRetryIntervalStr, defRetryInterval);
Preconditions.checkArgument((maxWaitTime == -1 || maxWaitTime > 0), "Invalid Configuration. " + maxWaitTimeStr + " should be either" + " positive value or -1.");
Preconditions.checkArgument(retryIntervalMS > 0, "Invalid Configuration. " + connectRetryIntervalStr + "should be a positive value.");
RetryPolicy retryPolicy = null;
if (maxWaitTime == -1) {
// wait forever.
retryPolicy = RetryPolicies.retryForeverWithFixedSleep(retryIntervalMS, TimeUnit.MILLISECONDS);
} else {
retryPolicy = RetryPolicies.retryUpToMaximumTimeWithFixedSleep(maxWaitTime, retryIntervalMS, TimeUnit.MILLISECONDS);
}
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(EOFException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectException.class, retryPolicy);
exceptionToPolicyMap.put(NoRouteToHostException.class, retryPolicy);
exceptionToPolicyMap.put(UnknownHostException.class, retryPolicy);
exceptionToPolicyMap.put(ConnectTimeoutException.class, retryPolicy);
exceptionToPolicyMap.put(RetriableException.class, retryPolicy);
exceptionToPolicyMap.put(SocketException.class, retryPolicy);
exceptionToPolicyMap.put(NMNotYetReadyException.class, retryPolicy);
return RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
}
Aggregations