use of org.apache.hadoop.util.DataChecksum in project hadoop by apache.
the class DFSClient method primitiveCreate.
/**
* Same as {{@link #create(String, FsPermission, EnumSet, short, long,
* Progressable, int, ChecksumOpt)} except that the permission
* is absolute (ie has already been masked with umask.
*/
public DFSOutputStream primitiveCreate(String src, FsPermission absPermission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, ChecksumOpt checksumOpt) throws IOException {
checkOpen();
CreateFlag.validate(flag);
DFSOutputStream result = primitiveAppend(src, flag, progress);
if (result == null) {
DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
result = DFSOutputStream.newStreamForCreate(this, src, absPermission, flag, createParent, replication, blockSize, progress, checksum, null);
}
beginFileLease(result.getFileId(), result);
return result;
}
use of org.apache.hadoop.util.DataChecksum in project hadoop by apache.
the class DFSStripedOutputStream method writeParity.
void writeParity(int index, ByteBuffer buffer, byte[] checksumBuf) throws IOException {
final StripedDataStreamer current = setCurrentStreamer(index);
final int len = buffer.limit();
final long oldBytes = current.getBytesCurBlock();
if (current.isHealthy()) {
try {
DataChecksum sum = getDataChecksum();
if (buffer.isDirect()) {
ByteBuffer directCheckSumBuf = BUFFER_POOL.getBuffer(true, checksumBuf.length);
sum.calculateChunkedSums(buffer, directCheckSumBuf);
directCheckSumBuf.get(checksumBuf);
BUFFER_POOL.putBuffer(directCheckSumBuf);
} else {
sum.calculateChunkedSums(buffer.array(), 0, len, checksumBuf, 0);
}
for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
super.writeChunk(buffer, chunkLen, checksumBuf, ckOffset, getChecksumSize());
}
} catch (Exception e) {
handleCurrentStreamerFailure("oldBytes=" + oldBytes + ", len=" + len, e);
}
}
}
use of org.apache.hadoop.util.DataChecksum in project hadoop by apache.
the class FsVolumeImpl method loadLastPartialChunkChecksum.
@Override
public byte[] loadLastPartialChunkChecksum(File blockFile, File metaFile) throws IOException {
// readHeader closes the temporary FileInputStream.
DataChecksum dcs;
try (FileInputStream fis = fileIoProvider.getFileInputStream(this, metaFile)) {
dcs = BlockMetadataHeader.readHeader(fis).getChecksum();
}
final int checksumSize = dcs.getChecksumSize();
final long onDiskLen = blockFile.length();
final int bytesPerChecksum = dcs.getBytesPerChecksum();
if (onDiskLen % bytesPerChecksum == 0) {
// because it will not be modified.
return null;
}
long offsetInChecksum = BlockMetadataHeader.getHeaderSize() + (onDiskLen / bytesPerChecksum) * checksumSize;
byte[] lastChecksum = new byte[checksumSize];
try (RandomAccessFile raf = fileIoProvider.getRandomAccessFile(this, metaFile, "r")) {
raf.seek(offsetInChecksum);
int readBytes = raf.read(lastChecksum, 0, checksumSize);
if (readBytes == -1) {
throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but reached end of file.");
} else if (readBytes != checksumSize) {
throw new IOException("Expected to read " + checksumSize + " bytes from offset " + offsetInChecksum + " but read " + readBytes + " bytes.");
}
}
return lastChecksum;
}
use of org.apache.hadoop.util.DataChecksum in project hbase by apache.
the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.
private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
Configuration conf = dfs.getConf();
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
DFSClient client = dfs.getClient();
String clientName = client.getClientName();
ClientProtocol namenode = client.getNamenode();
HdfsFileStatus stat;
try {
stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
} catch (Exception e) {
if (e instanceof RemoteException) {
throw (RemoteException) e;
} else {
throw new NameNodeException(e);
}
}
beginFileLease(client, stat.getFileId());
boolean succ = false;
LocatedBlock locatedBlock = null;
List<Future<Channel>> futureList = null;
try {
DataChecksum summer = createChecksum(client);
locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
List<Channel> datanodeList = new ArrayList<>();
futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
for (Future<Channel> future : futureList) {
// fail the creation if there are connection failures since we are fail-fast. The upper
// layer should retry itself if needed.
datanodeList.add(future.syncUninterruptibly().getNow());
}
Encryptor encryptor = createEncryptor(conf, stat, client);
FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
succ = true;
return output;
} finally {
if (!succ) {
if (futureList != null) {
for (Future<Channel> f : futureList) {
f.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (future.isSuccess()) {
future.getNow().close();
}
}
});
}
}
endFileLease(client, stat.getFileId());
fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
}
}
}
use of org.apache.hadoop.util.DataChecksum in project hadoop by apache.
the class BlockReaderLocalLegacy method newBlockReader.
/**
* The only way this object can be instantiated.
*/
static BlockReaderLocalLegacy newBlockReader(DfsClientConf conf, UserGroupInformation userGroupInformation, Configuration configuration, String file, ExtendedBlock blk, Token<BlockTokenIdentifier> token, DatanodeInfo node, long startOffset, long length, StorageType storageType, Tracer tracer) throws IOException {
final ShortCircuitConf scConf = conf.getShortCircuitConf();
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
// check the cache first
BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
if (pathinfo == null) {
if (userGroupInformation == null) {
userGroupInformation = UserGroupInformation.getCurrentUser();
}
pathinfo = getBlockPathInfo(userGroupInformation, blk, node, configuration, conf.getSocketTimeout(), token, conf.isConnectToDnViaHostname(), storageType);
}
// check to see if the file exists. It may so happen that the
// HDFS file has been deleted and this block-lookup is occurring
// on behalf of a new HDFS file. This time, the block file could
// be residing in a different portion of the fs.data.dir directory.
// In this case, we remove this entry from the cache. The next
// call to this method will re-populate the cache.
FileInputStream dataIn = null;
FileInputStream checksumIn = null;
BlockReaderLocalLegacy localBlockReader = null;
final boolean skipChecksumCheck = scConf.isSkipShortCircuitChecksums() || storageType.isTransient();
try {
// get a local file system
File blkfile = new File(pathinfo.getBlockPath());
dataIn = new FileInputStream(blkfile);
LOG.debug("New BlockReaderLocalLegacy for file {} of size {} startOffset " + "{} length {} short circuit checksum {}", blkfile, blkfile.length(), startOffset, length, !skipChecksumCheck);
if (!skipChecksumCheck) {
// get the metadata file
File metafile = new File(pathinfo.getMetaPath());
checksumIn = new FileInputStream(metafile);
final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(new DataInputStream(checksumIn), blk);
long firstChunkOffset = startOffset - (startOffset % checksum.getBytesPerChecksum());
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, startOffset, checksum, true, dataIn, firstChunkOffset, checksumIn, tracer);
} else {
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, startOffset, dataIn, tracer);
}
} catch (IOException e) {
// remove from cache
localDatanodeInfo.removeBlockLocalPathInfo(blk);
LOG.warn("BlockReaderLocalLegacy: Removing " + blk + " from cache because local file " + pathinfo.getBlockPath() + " could not be opened.");
throw e;
} finally {
if (localBlockReader == null) {
if (dataIn != null) {
dataIn.close();
}
if (checksumIn != null) {
checksumIn.close();
}
}
}
return localBlockReader;
}
Aggregations