use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class ByteRangeInputStream method openInputStream.
@VisibleForTesting
protected InputStreamAndFileLength openInputStream(long startOffset) throws IOException {
if (startOffset < 0) {
throw new EOFException("Negative Position");
}
// Use the original url if no resolved url exists, eg. if
// it's the first time a request is made.
final boolean resolved = resolvedURL.getURL() != null;
final URLOpener opener = resolved ? resolvedURL : originalURL;
final HttpURLConnection connection = opener.connect(startOffset, resolved);
resolvedURL.setURL(getResolvedUrl(connection));
InputStream in = connection.getInputStream();
final Long length;
final Map<String, List<String>> headers = connection.getHeaderFields();
if (isChunkedTransferEncoding(headers)) {
// file length is not known
length = null;
} else {
// for non-chunked transfer-encoding, get content-length
final String cl = connection.getHeaderField(HttpHeaders.CONTENT_LENGTH);
if (cl == null) {
throw new IOException(HttpHeaders.CONTENT_LENGTH + " is missing: " + headers);
}
final long streamlength = Long.parseLong(cl);
length = startOffset + streamlength;
// Java has a bug with >2GB request streams. It won't bounds check
// the reads so the transfer blocks until the server times out
in = new BoundedInputStream(in, streamlength);
}
return new InputStreamAndFileLength(length, in);
}
use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class BlockTokenIdentifier method writeProtobuf.
@VisibleForTesting
void writeProtobuf(DataOutput out) throws IOException {
BlockTokenSecretProto secret = PBHelperClient.convert(this);
out.write(secret.toByteArray());
}
use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class BlockTokenIdentifier method readFieldsProtobuf.
@VisibleForTesting
void readFieldsProtobuf(DataInput in) throws IOException {
BlockTokenSecretProto blockTokenSecretProto = BlockTokenSecretProto.parseFrom((DataInputStream) in);
expiryDate = blockTokenSecretProto.getExpiryDate();
keyId = blockTokenSecretProto.getKeyId();
if (blockTokenSecretProto.hasUserId()) {
userId = blockTokenSecretProto.getUserId();
} else {
userId = null;
}
if (blockTokenSecretProto.hasBlockPoolId()) {
blockPoolId = blockTokenSecretProto.getBlockPoolId();
} else {
blockPoolId = null;
}
blockId = blockTokenSecretProto.getBlockId();
for (int i = 0; i < blockTokenSecretProto.getModesCount(); i++) {
AccessModeProto accessModeProto = blockTokenSecretProto.getModes(i);
modes.add(PBHelperClient.convert(accessModeProto));
}
useProto = true;
}
use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class RpcProgramNfs3 method write.
@VisibleForTesting
WRITE3Response write(XDR xdr, Channel channel, int xid, SecurityHandler securityHandler, SocketAddress remoteAddress) {
WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
WRITE3Request request;
try {
request = WRITE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid WRITE request");
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
}
long offset = request.getOffset();
int count = request.getCount();
WriteStableHow stableHow = request.getStableHow();
byte[] data = request.getData().array();
if (data.length < count) {
LOG.error("Invalid argument, data size is less than count in request");
return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle handle = request.getHandle();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS WRITE fileId: " + handle.getFileId() + " offset: " + offset + " length: " + count + " stableHow: " + stableHow.getValue() + " xid: " + xid + " client: " + remoteAddress);
}
Nfs3FileAttributes preOpAttr = null;
try {
preOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
if (preOpAttr == null) {
LOG.error("Can't get path for fileId: " + handle.getFileId());
return new WRITE3Response(Nfs3Status.NFS3ERR_STALE);
}
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
}
if (LOG.isDebugEnabled()) {
LOG.debug("requested offset=" + offset + " and current filesize=" + preOpAttr.getSize());
}
writeManager.handleWrite(dfsClient, request, channel, xid, preOpAttr);
} catch (IOException e) {
LOG.info("Error writing to fileId " + handle.getFileId() + " at offset " + offset + " and length " + data.length, e);
// Try to return WccData
Nfs3FileAttributes postOpAttr = null;
try {
postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
}
WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
WccData fileWcc = new WccData(attr, postOpAttr);
int status = mapErrorStatus(e);
return new WRITE3Response(status, fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
}
return null;
}
use of com.google.common.annotations.VisibleForTesting in project hadoop by apache.
the class RpcProgramNfs3 method lookup.
@VisibleForTesting
LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
return response;
}
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
LOOKUP3Request request;
try {
request = LOOKUP3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid LOOKUP request");
return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS LOOKUP dir fileId: " + dirHandle.getFileId() + " name: " + fileName + " client: " + remoteAddress);
}
try {
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes postOpObjAttr = writeManager.getFileAttr(dfsClient, dirHandle, fileName);
if (postOpObjAttr == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("NFS LOOKUP fileId: " + dirHandle.getFileId() + " name: " + fileName + " does not exist");
}
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
return new LOOKUP3Response(Nfs3Status.NFS3ERR_NOENT, null, null, postOpDirAttr);
}
Nfs3FileAttributes postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (postOpDirAttr == null) {
LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new LOOKUP3Response(Nfs3Status.NFS3ERR_STALE);
}
FileHandle fileHandle = new FileHandle(postOpObjAttr.getFileId());
return new LOOKUP3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr, postOpDirAttr);
} catch (IOException e) {
LOG.warn("Exception ", e);
int status = mapErrorStatus(e);
return new LOOKUP3Response(status);
}
}
Aggregations