use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestHDFSConcat method testConcatWithQuotaIncrease.
@Test
public void testConcatWithQuotaIncrease() throws IOException {
final short repl = 3;
final int srcNum = 10;
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path[] srcs = new Path[srcNum];
final Path target = new Path(bar, "target");
DFSTestUtil.createFile(dfs, target, blockSize, repl, 0L);
final long dsQuota = blockSize * repl + blockSize * srcNum * REPL_FACTOR;
dfs.setQuota(foo, Long.MAX_VALUE - 1, dsQuota);
for (int i = 0; i < srcNum; i++) {
srcs[i] = new Path(bar, "src" + i);
DFSTestUtil.createFile(dfs, srcs[i], blockSize, REPL_FACTOR, 0L);
}
ContentSummary summary = dfs.getContentSummary(bar);
Assert.assertEquals(11, summary.getFileCount());
Assert.assertEquals(dsQuota, summary.getSpaceConsumed());
try {
dfs.concat(target, srcs);
fail("QuotaExceededException expected");
} catch (RemoteException e) {
Assert.assertTrue(e.unwrapRemoteException() instanceof QuotaExceededException);
}
dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
dfs.concat(target, srcs);
summary = dfs.getContentSummary(bar);
Assert.assertEquals(1, summary.getFileCount());
Assert.assertEquals(blockSize * repl * (srcNum + 1), summary.getSpaceConsumed());
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestConnectionRetryPolicy method testDefaultRetryPolicyEquivalence.
@Test(timeout = 60000)
public void testDefaultRetryPolicyEquivalence() {
RetryPolicy rp1 = null;
RetryPolicy rp2 = null;
RetryPolicy rp3 = null;
/* test the same setting */
rp1 = getDefaultRetryPolicy(true, "10000,2");
rp2 = getDefaultRetryPolicy(true, "10000,2");
rp3 = getDefaultRetryPolicy(true, "10000,2");
verifyRetryPolicyEquivalence(new RetryPolicy[] { rp1, rp2, rp3 });
/* test different remoteExceptionToRetry */
rp1 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(PathIOException.class.getName(), "path IO exception").getClassName());
rp2 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(RpcNoSuchMethodException.class.getName(), "no such method exception").getClassName());
rp3 = getDefaultRetryPolicy(true, "10000,2", new RemoteException(RetriableException.class.getName(), "retriable exception").getClassName());
verifyRetryPolicyEquivalence(new RetryPolicy[] { rp1, rp2, rp3 });
/* test enabled and different specifications */
rp1 = getDefaultRetryPolicy(true, "20000,3");
rp2 = getDefaultRetryPolicy(true, "30000,4");
assertNotEquals("should not be equal", rp1, rp2);
assertNotEquals("should not have the same hash code", rp1.hashCode(), rp2.hashCode());
/* test disabled and the same specifications */
rp1 = getDefaultRetryPolicy(false, "40000,5");
rp2 = getDefaultRetryPolicy(false, "40000,5");
assertEquals("should be equal", rp1, rp2);
assertEquals("should have the same hash code", rp1, rp2);
/* test the disabled and different specifications */
rp1 = getDefaultRetryPolicy(false, "50000,6");
rp2 = getDefaultRetryPolicy(false, "60000,7");
assertEquals("should be equal", rp1, rp2);
assertEquals("should have the same hash code", rp1, rp2);
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class RpcProgramNfs3 method getattr.
@VisibleForTesting
GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
GETATTR3Request request;
try {
request = GETATTR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid GETATTR request");
response.setStatus(Nfs3Status.NFS3ERR_INVAL);
return response;
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("GETATTR for fileId: " + handle.getFileId() + " client: " + remoteAddress);
}
Nfs3FileAttributes attrs = null;
try {
attrs = writeManager.getFileAttr(dfsClient, handle, iug);
} catch (RemoteException r) {
LOG.warn("Exception ", r);
IOException io = r.unwrapRemoteException();
/**
* AuthorizationException can be thrown if the user can't be proxy'ed.
*/
if (io instanceof AuthorizationException) {
return new GETATTR3Response(Nfs3Status.NFS3ERR_ACCES);
} else {
return new GETATTR3Response(Nfs3Status.NFS3ERR_IO);
}
} catch (IOException e) {
LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
int status = mapErrorStatus(e);
response.setStatus(status);
return response;
}
if (attrs == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
response.setStatus(Nfs3Status.NFS3ERR_STALE);
return response;
}
response.setPostOpAttr(attrs);
return response;
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class WriteManager method handleWrite.
void handleWrite(DFSClient dfsClient, WRITE3Request request, Channel channel, int xid, Nfs3FileAttributes preOpAttr) throws IOException {
int count = request.getCount();
byte[] data = request.getData().array();
if (data.length < count) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("handleWrite " + request);
}
// Check if there is a stream to write
FileHandle fileHandle = request.getHandle();
OpenFileCtx openFileCtx = fileContextCache.get(fileHandle);
if (openFileCtx == null) {
LOG.info("No opened stream for fileId: " + fileHandle.getFileId());
String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId());
HdfsDataOutputStream fos = null;
Nfs3FileAttributes latestAttr = null;
try {
int bufferSize = config.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
fos = dfsClient.append(fileIdPath, bufferSize, EnumSet.of(CreateFlag.APPEND), null, null);
latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug);
} catch (RemoteException e) {
IOException io = e.unwrapRemoteException();
if (io instanceof AlreadyBeingCreatedException) {
LOG.warn("Can't append file: " + fileIdPath + ". Possibly the file is being closed. Drop the request: " + request + ", wait for the client to retry...");
return;
}
throw e;
} catch (IOException e) {
LOG.error("Can't append to file: " + fileIdPath, e);
if (fos != null) {
fos.close();
}
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, count, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
// Add open stream
String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY, NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + fileHandle.getFileId(), dfsClient, iug, aixCompatMode, config);
if (!addOpenFileStream(fileHandle, openFileCtx)) {
LOG.info("Can't add new stream. Close it. Tell client to retry.");
try {
fos.close();
} catch (IOException e) {
LOG.error("Can't close stream for fileId: " + handle.getFileId(), e);
}
// Notify client to retry
WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_JUKEBOX, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
Nfs3Utils.writeChannel(channel, response.serialize(new XDR(), xid, new VerifierNone()), xid);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Opened stream for appending file: " + fileHandle.getFileId());
}
}
// Add write into the async job queue
openFileCtx.receivedNewWrite(dfsClient, request, channel, xid, asyncDataService, iug);
return;
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestLease method testLeaseAbort.
@Test
public void testLeaseAbort() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
cluster.waitActive();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient dfs = new DFSClient(null, spyNN, conf, null);
byte[] buf = new byte[1024];
FSDataOutputStream c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
DFSInputStream c_in = dfs.open(dirString + "c");
FSDataOutputStream d_out = createFsOut(dfs, dirString + "d");
// stub the renew method.
doThrow(new RemoteException(InvalidToken.class.getName(), "Your token is worthless")).when(spyNN).renewLease(anyString());
// We don't need to wait the lease renewer thread to act.
// call renewLease() manually.
// make it look like the soft limit has been exceeded.
LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
try {
dfs.renewLease();
} catch (IOException e) {
}
// renewing.
try {
d_out.write(buf, 0, 1024);
LOG.info("Write worked beyond the soft limit as expected.");
} catch (IOException e) {
Assert.fail("Write failed.");
}
// make it look like the hard limit has been exceeded.
dfs.lastLeaseRenewal = Time.monotonicNow() - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
dfs.renewLease();
// this should not work.
try {
d_out.write(buf, 0, 1024);
d_out.close();
Assert.fail("Write did not fail even after the fatal lease renewal failure");
} catch (IOException e) {
LOG.info("Write failed as expected. ", e);
}
// If aborted, the renewer should be empty. (no reference to clients)
Thread.sleep(1000);
Assert.assertTrue(originalRenewer.isEmpty());
// unstub
doNothing().when(spyNN).renewLease(anyString());
// existing input streams should work
try {
int num = c_in.read(buf, 0, 1);
if (num != 1) {
Assert.fail("Failed to read 1 byte");
}
c_in.close();
} catch (IOException e) {
LOG.error("Read failed with ", e);
Assert.fail("Read after lease renewal failure failed");
}
// new file writes should work.
try {
c_out = createFsOut(dfs, dirString + "c");
c_out.write(buf, 0, 1024);
c_out.close();
} catch (IOException e) {
LOG.error("Write failed with ", e);
Assert.fail("Write failed");
}
} finally {
cluster.shutdown();
}
}
Aggregations