use of java.security.DigestInputStream in project nikita-noark5-core by HiOA-ABI.
the class DocumentObjectService method storeAndCalculateChecksum.
@Override
public /**
*
* Store an incoming file associated with a DocumentObject. When writing the Incoming filestream, calculate
* the checksum at the same time and update the DocumentObject with referenceToFile, size (bytes), checksum
* and checksum algorithm
*
* inputStream.read calculates the checksum while reading the input file as it is a DigestInputStream
*/
void storeAndCalculateChecksum(InputStream inputStream, DocumentObject documentObject) {
String checksumAlgorithm = documentObject.getChecksumAlgorithm();
if (null == checksumAlgorithm) {
checksumAlgorithm = defaultChecksumAlgorithm;
documentObject.setChecksumAlgorithm(checksumAlgorithm);
}
if (null != documentObject.getReferenceDocumentFile()) {
throw new StorageException("There is already a file associated with " + documentObject);
}
try {
MessageDigest md = MessageDigest.getInstance(checksumAlgorithm);
String filename = UUID.randomUUID().toString();
Path directory = Paths.get(rootLocation + File.separator);
Path file = Paths.get(rootLocation + File.separator + filename);
// TODO perhaps better to raise an error if somehow init failed to create it?
if (!Files.exists(directory)) {
Files.createDirectory(directory);
}
// Check if we actually can create the file
Path path = Files.createFile(file);
// Check if we can write something to the file
if (!Files.isWritable(file)) {
throw new StorageException("The file (" + file.getFileName() + ") is not writable server-side. This" + " file is being associated with " + documentObject);
}
// Create a DigestInputStream to be read with the
// checksumAlgorithm identified in the properties file
DigestInputStream digestInputStream = new DigestInputStream(inputStream, md);
FileOutputStream outputStream = new FileOutputStream(path.toFile());
long bytesTotal = -1;
try {
// Try close without exceptions if copy() threw an exception.
bytesTotal = IOUtils.copyLarge(digestInputStream, outputStream);
// Tidy up and close outputStream
outputStream.flush();
outputStream.close();
// Finished with inputStream now as well
digestInputStream.close();
} finally {
try {
// Try close without exceptions if copy() threw an exception.
digestInputStream.close();
} catch (IOException e) {
// swallow any error to expose exceptions from IOUtil.copy()
}
try {
// same for outputStream
outputStream.close();
} catch (IOException e) {
// empty
}
}
if (bytesTotal == 0L) {
Files.delete(file);
logger.warn("The file (" + file.getFileName() + ") has 0 length content and should have been deleted");
throw new StorageException("The file (" + file.getFileName() + ") has 0 length content. Rejecting " + "upload! This file is being associated with " + documentObject);
}
if (!documentObject.getFileSize().equals(bytesTotal)) {
Files.delete(file);
String logmsg = "The uploaded file (" + file.getFileName() + ") length " + bytesTotal + " did not match the dokumentobjekt filstoerrelse " + documentObject.getFileSize() + " and was deleted.";
logger.warn(logmsg);
String msg = logmsg + " Rejecting upload! This file is being associated with " + documentObject;
throw new StorageException(msg);
}
// Get the digest
byte[] digest = digestInputStream.getMessageDigest().digest();
// Convert digest to HEX
StringBuilder sb = new StringBuilder();
for (byte b : digest) {
sb.append(Integer.toString((b & 0xff) + 0x100, 16).substring(1));
}
// TODO figure what the spec will say about existing
// values in documentObject. For now, only set the values
// if they are blank, and reject the file if the checksum
// did not match.
String olddigest = documentObject.getChecksum();
String newdigest = sb.toString();
if (null == olddigest) {
documentObject.setChecksum(newdigest);
} else if (!olddigest.equals(newdigest)) {
Files.delete(file);
String msg = "The file (" + file.getFileName() + ") checksum " + newdigest + " do not match the already stored checksum. Rejecting " + "upload! This file is being associated with " + documentObject;
throw new StorageException(msg);
}
documentObject.setReferenceDocumentFile(file.toString());
} catch (IOException e) {
logger.error("When associating an uploaded file with " + documentObject + " an exception occurred." + "Exception is " + e);
throw new StorageException("Failed to store file to be associated with " + documentObject + " " + e.toString());
} catch (NoSuchAlgorithmException e) {
logger.error("When associating an uploaded file with " + documentObject + " an exception occurred." + "Exception is " + e);
throw new StorageException("Internal error, could not load checksum algorithm (" + checksumAlgorithm + ") when attempting to store a file associated with " + documentObject);
}
}
use of java.security.DigestInputStream in project zm-mailbox by Zimbra.
the class ExternalStoreManager method stage.
@Override
public StagedBlob stage(InputStream in, long actualSize, Mailbox mbox) throws ServiceException, IOException {
if (actualSize < 0) {
Blob blob = storeIncoming(in);
try {
return stage(blob, mbox);
} finally {
quietDelete(blob);
}
}
MessageDigest digest;
try {
digest = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw ServiceException.FAILURE("SHA-256 digest not found", e);
}
ByteUtil.PositionInputStream pin = new ByteUtil.PositionInputStream(new DigestInputStream(in, digest));
try {
String locator = writeStreamToStore(pin, actualSize, mbox);
if (locator != null) {
ZimbraLog.store.debug("wrote to locator %s", locator);
} else {
ZimbraLog.store.warn("blob staging returned null locator");
}
return new ExternalStagedBlob(mbox, ByteUtil.encodeFSSafeBase64(digest.digest()), pin.getPosition(), locator);
} catch (IOException e) {
throw ServiceException.FAILURE("unable to stage blob", e);
}
}
use of java.security.DigestInputStream in project zm-mailbox by Zimbra.
the class HttpStoreManager method writeStreamToStore.
@Override
public String writeStreamToStore(InputStream in, long actualSize, Mailbox mbox) throws IOException, ServiceException {
MessageDigest digest;
try {
digest = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw ServiceException.FAILURE("SHA-256 digest not found", e);
}
ByteUtil.PositionInputStream pin = new ByteUtil.PositionInputStream(new DigestInputStream(in, digest));
HttpClient client = ZimbraHttpConnectionManager.getInternalHttpConnMgr().newHttpClient();
PostMethod post = new PostMethod(getPostUrl(mbox));
try {
HttpClientUtil.addInputStreamToHttpMethod(post, pin, actualSize, "application/octet-stream");
int statusCode = HttpClientUtil.executeMethod(client, post);
if (statusCode == HttpStatus.SC_OK || statusCode == HttpStatus.SC_CREATED || statusCode == HttpStatus.SC_NO_CONTENT) {
return getLocator(post, ByteUtil.encodeFSSafeBase64(digest.digest()), pin.getPosition(), mbox);
} else {
throw ServiceException.FAILURE("error POSTing blob: " + post.getStatusText(), null);
}
} finally {
post.releaseConnection();
}
}
use of java.security.DigestInputStream in project zm-mailbox by Zimbra.
the class TritonBlobStoreManager method getHash.
@Override
public byte[] getHash(Blob blob) throws ServiceException, IOException {
MessageDigest digest = newDigest();
DigestInputStream dis = null;
InputStream bis = null;
try {
bis = blob.getInputStream();
dis = new DigestInputStream(bis, digest);
while (dis.read() >= 0) {
}
return digest.digest();
} finally {
ByteUtil.closeStream(bis);
ByteUtil.closeStream(dis);
}
}
use of java.security.DigestInputStream in project jackrabbit by apache.
the class DbDataStore method addRecord.
public DataRecord addRecord(InputStream stream) throws DataStoreException {
InputStream fileInput = null;
String tempId = null;
ResultSet rs = null;
try {
long tempModified;
while (true) {
try {
tempModified = System.currentTimeMillis();
String id = UUID.randomUUID().toString();
tempId = TEMP_PREFIX + id;
temporaryInUse.add(tempId);
// SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
rs = conHelper.query(selectMetaSQL, tempId);
boolean hasNext = rs.next();
DbUtility.close(rs);
rs = null;
if (hasNext) {
// re-try in the very, very unlikely event that the row already exists
continue;
}
// INSERT INTO DATASTORE VALUES(?, 0, ?, NULL)
conHelper.exec(insertTempSQL, tempId, tempModified);
break;
} catch (Exception e) {
throw convert("Can not insert new record", e);
} finally {
DbUtility.close(rs);
// prevent that rs.close() is called again
rs = null;
}
}
MessageDigest digest = getDigest();
DigestInputStream dIn = new DigestInputStream(stream, digest);
CountingInputStream in = new CountingInputStream(dIn);
StreamWrapper wrapper;
if (STORE_SIZE_MINUS_ONE.equals(storeStream)) {
wrapper = new StreamWrapper(in, -1);
} else if (STORE_SIZE_MAX.equals(storeStream)) {
wrapper = new StreamWrapper(in, Integer.MAX_VALUE);
} else if (STORE_TEMP_FILE.equals(storeStream)) {
File temp = moveToTempFile(in);
long length = temp.length();
wrapper = new StreamWrapper(new ResettableTempFileInputStream(temp), length);
} else {
throw new DataStoreException("Unsupported stream store algorithm: " + storeStream);
}
// UPDATE DATASTORE SET DATA=? WHERE ID=?
conHelper.exec(updateDataSQL, wrapper, tempId);
long length = in.getByteCount();
DataIdentifier identifier = new DataIdentifier(encodeHexString(digest.digest()));
usesIdentifier(identifier);
String id = identifier.toString();
long newModified;
while (true) {
newModified = System.currentTimeMillis();
if (checkExisting(tempId, length, identifier)) {
touch(identifier, newModified);
conHelper.exec(deleteSQL, tempId);
break;
}
try {
// UPDATE DATASTORE SET ID=?, LENGTH=?, LAST_MODIFIED=?
// WHERE ID=? AND LAST_MODIFIED=?
int count = conHelper.update(updateSQL, id, length, newModified, tempId, tempModified);
// collection could delete rows)
if (count != 0) {
// update was successful
break;
}
} catch (SQLException e) {
// duplicate key (the row already exists) - repeat
// we use exception handling for flow control here, which is bad,
// but the alternative is to use UPDATE ... WHERE ... (SELECT ...)
// which could cause a deadlock in some databases - also,
// duplicate key will only occur if somebody else concurrently
// added the same record (which is very unlikely)
}
// SELECT LENGTH, LAST_MODIFIED FROM DATASTORE WHERE ID=?
rs = conHelper.query(selectMetaSQL, tempId);
if (!rs.next()) {
// the row was deleted, which is unexpected / not allowed
String msg = DIGEST + " temporary entry deleted: " + " id=" + tempId + " length=" + length;
log.error(msg);
throw new DataStoreException(msg);
}
tempModified = rs.getLong(2);
DbUtility.close(rs);
rs = null;
}
usesIdentifier(identifier);
DbDataRecord record = new DbDataRecord(this, identifier, length, newModified);
return record;
} catch (Exception e) {
throw convert("Can not insert new record", e);
} finally {
if (tempId != null) {
temporaryInUse.remove(tempId);
}
DbUtility.close(rs);
if (fileInput != null) {
try {
fileInput.close();
} catch (IOException e) {
throw convert("Can not close temporary file", e);
}
}
}
}
Aggregations