use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class MD5MD5CRC32FileChecksum method valueOf.
/** Return the object represented in the attributes. */
public static MD5MD5CRC32FileChecksum valueOf(Attributes attrs) throws SAXException {
final String bytesPerCRC = attrs.getValue("bytesPerCRC");
final String crcPerBlock = attrs.getValue("crcPerBlock");
final String md5 = attrs.getValue("md5");
String crcType = attrs.getValue("crcType");
DataChecksum.Type finalCrcType;
if (bytesPerCRC == null || crcPerBlock == null || md5 == null) {
return null;
}
try {
// old versions don't support crcType.
if (crcType == null || crcType.equals("")) {
finalCrcType = DataChecksum.Type.CRC32;
} else {
finalCrcType = DataChecksum.Type.valueOf(crcType);
}
switch(finalCrcType) {
case CRC32:
return new MD5MD5CRC32GzipFileChecksum(Integer.parseInt(bytesPerCRC), Integer.parseInt(crcPerBlock), new MD5Hash(md5));
case CRC32C:
return new MD5MD5CRC32CastagnoliFileChecksum(Integer.parseInt(bytesPerCRC), Integer.parseInt(crcPerBlock), new MD5Hash(md5));
default:
// hold a valid type or we should have got an exception.
return null;
}
} catch (Exception e) {
throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC + ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType + ", md5=" + md5, e);
}
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class StripedBlockChecksumReconstructor method reconstruct.
public void reconstruct() throws IOException {
MessageDigest digester = MD5Hash.getDigester();
long maxTargetLength = getMaxTargetLength();
while (requestedLen > 0 && getPositionInBlock() < maxTargetLength) {
long remaining = maxTargetLength - getPositionInBlock();
final int toReconstructLen = (int) Math.min(getStripedReader().getBufferSize(), remaining);
// step1: read from minimum source DNs required for reconstruction.
// The returned success list is the source DNs we do real read from
getStripedReader().readMinimumSources(toReconstructLen);
// step2: decode to reconstruct targets
reconstructTargets(toReconstructLen);
// step3: calculate checksum
checksumDataLen += checksumWithTargetOutput(targetBuffer.array(), toReconstructLen, digester);
updatePositionInBlock(toReconstructLen);
requestedLen -= toReconstructLen;
clearBuffers();
}
byte[] digest = digester.digest();
md5 = new MD5Hash(digest);
md5.write(checksumWriter);
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class ImageServlet method setVerificationHeadersForGet.
/**
* Set headers for content length, and, if available, md5.
* @throws IOException
*/
public static void setVerificationHeadersForGet(HttpServletResponse response, File file) throws IOException {
response.setHeader(Util.CONTENT_LENGTH, String.valueOf(file.length()));
MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
if (hash != null) {
response.setHeader(Util.MD5_HEADER, hash.toString());
}
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class ImageServlet method setVerificationHeadersForPut.
/**
* Set headers for image length and if available, md5.
*
* @throws IOException
*/
static void setVerificationHeadersForPut(HttpURLConnection connection, File file) throws IOException {
connection.setRequestProperty(Util.CONTENT_LENGTH, String.valueOf(file.length()));
MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
if (hash != null) {
connection.setRequestProperty(Util.MD5_HEADER, hash.toString());
}
}
use of org.apache.hadoop.io.MD5Hash in project hadoop by apache.
the class ImageServlet method doPut.
@Override
protected void doPut(final HttpServletRequest request, final HttpServletResponse response) throws ServletException, IOException {
try {
ServletContext context = getServletContext();
final FSImage nnImage = NameNodeHttpServer.getFsImageFromContext(context);
final Configuration conf = (Configuration) getServletContext().getAttribute(JspHelper.CURRENT_CONF);
final PutImageParams parsedParams = new PutImageParams(request, response, conf);
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
validateRequest(context, conf, request, response, nnImage, parsedParams.getStorageInfoString());
UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// if its not the active NN, then we need to notify the caller it was was the wrong
// target (regardless of the fact that we got the image)
HAServiceProtocol.HAServiceState state = NameNodeHttpServer.getNameNodeStateFromContext(getServletContext());
if (state != HAServiceProtocol.HAServiceState.ACTIVE) {
// we need a different response type here so the client can differentiate this
// from the failure to upload due to (1) security, or (2) other checkpoints already
// present
response.sendError(HttpServletResponse.SC_EXPECTATION_FAILED, "Nameode " + request.getLocalAddr() + " is currently not in a state which can " + "accept uploads of new fsimages. State: " + state);
return null;
}
final long txid = parsedParams.getTxId();
String remoteAddr = request.getRemoteAddr();
ImageUploadRequest imageRequest = new ImageUploadRequest(txid, remoteAddr);
final NameNodeFile nnf = parsedParams.getNameNodeFile();
// if the node is attempting to upload an older transaction, we ignore it
SortedSet<ImageUploadRequest> larger = currentlyDownloadingCheckpoints.tailSet(imageRequest);
if (larger.size() > 0) {
response.sendError(HttpServletResponse.SC_CONFLICT, "Another checkpointer is already in the process of uploading a" + " checkpoint made up to transaction ID " + larger.last());
return null;
}
//make sure no one else has started uploading one
if (!currentlyDownloadingCheckpoints.add(imageRequest)) {
response.sendError(HttpServletResponse.SC_CONFLICT, "Either current namenode is checkpointing or another" + " checkpointer is already in the process of " + "uploading a checkpoint made at transaction ID " + txid);
return null;
}
try {
if (nnImage.getStorage().findImageFile(nnf, txid) != null) {
response.sendError(HttpServletResponse.SC_CONFLICT, "Either current namenode has checkpointed or " + "another checkpointer already uploaded an " + "checkpoint for txid " + txid);
return null;
}
InputStream stream = request.getInputStream();
try {
long start = monotonicNow();
MD5Hash downloadImageDigest = TransferFsImage.handleUploadImageRequest(request, txid, nnImage.getStorage(), stream, parsedParams.getFileSize(), getThrottler(conf));
nnImage.saveDigestAndRenameCheckpointImage(nnf, txid, downloadImageDigest);
// Metrics non-null only when used inside name node
if (metrics != null) {
long elapsed = monotonicNow() - start;
metrics.addPutImage(elapsed);
}
// Now that we have a new checkpoint, we might be able to
// remove some old ones.
nnImage.purgeOldStorage(nnf);
} finally {
// remove the request once we've processed it, or it threw an error, so we
// aren't using it either
currentlyDownloadingCheckpoints.remove(imageRequest);
stream.close();
}
} finally {
nnImage.removeFromCheckpointing(txid);
}
return null;
}
});
} catch (Throwable t) {
String errMsg = "PutImage failed. " + StringUtils.stringifyException(t);
response.sendError(HttpServletResponse.SC_GONE, errMsg);
throw new IOException(errMsg);
}
}
Aggregations