use of alluxio.worker.block.io.BlockReader in project alluxio by Alluxio.
the class BlockDataServerHandler method handleUnderFileSystemBlockReadRequest.
/**
* Handles a {@link RPCUnderFileSystemBlockReadRequest} by reading the data through a
* {@link BlockReader} provided by the block worker. This method assumes the data is available
* in the UFS returns an error status if the data is not available.
*
* @param ctx The context of this request which handles the result of this operation
* @param req The initiating {@link RPCBlockReadRequest}
* @throws IOException if an I/O error occurs when reading the data requested
*/
public void handleUnderFileSystemBlockReadRequest(final ChannelHandlerContext ctx, final RPCUnderFileSystemBlockReadRequest req) throws IOException {
final long blockId = req.getBlockId();
final long offset = req.getOffset();
final long len = req.getLength();
final long sessionId = req.getSessionId();
final boolean noCache = req.getNoCache();
try {
DataBuffer buffer = null;
req.validate();
BlockReader reader = mWorker.readUfsBlock(sessionId, blockId, offset, noCache);
ByteBuffer data = reader.read(offset, len);
if (data != null && data.remaining() > 0) {
buffer = new DataByteBuffer(data, data.remaining());
Metrics.BYTES_READ_UFS.inc(buffer.getLength());
}
RPCBlockReadResponse resp = new RPCBlockReadResponse(blockId, offset, data.remaining(), buffer, RPCResponse.Status.SUCCESS);
ChannelFuture future = ctx.writeAndFlush(resp);
if (buffer != null) {
future.addListener(new ReleasableResourceChannelListener(buffer));
}
LOG.debug("Preparation for responding to remote block request for: {} done.", blockId);
} catch (Exception e) {
LOG.error("Exception reading block {}", blockId, e);
RPCBlockReadResponse resp;
if (e instanceof BlockDoesNotExistException) {
resp = RPCBlockReadResponse.createErrorResponse(req, RPCResponse.Status.FILE_DNE);
} else {
resp = RPCBlockReadResponse.createErrorResponse(req, RPCResponse.Status.UFS_READ_FAILED);
}
ChannelFuture future = ctx.writeAndFlush(resp);
future.addListener(ChannelFutureListener.CLOSE);
}
}
use of alluxio.worker.block.io.BlockReader in project alluxio by Alluxio.
the class DataServerUfsBlockReadHandler method getDataBuffer.
@Override
protected DataBuffer getDataBuffer(Channel channel, long offset, int len) throws IOException {
BlockReader blockReader = ((UfsBlockReadRequestInternal) mRequest).mBlockReader;
// This buf is released by netty.
ByteBuf buf = channel.alloc().buffer(len, len);
try {
while (buf.writableBytes() > 0 && blockReader.transferTo(buf) != -1) {
}
return new DataNettyBufferV2(buf);
} catch (Throwable e) {
buf.release();
throw e;
}
}
use of alluxio.worker.block.io.BlockReader in project alluxio by Alluxio.
the class FileDataManager method persistFile.
/**
* Persists the blocks of a file into the under file system.
*
* @param fileId the id of the file
* @param blockIds the list of block ids
* @throws AlluxioException if an unexpected Alluxio exception is thrown
* @throws IOException if the file persistence fails
*/
public void persistFile(long fileId, List<Long> blockIds) throws AlluxioException, IOException {
Map<Long, Long> blockIdToLockId;
synchronized (mLock) {
blockIdToLockId = mPersistingInProgressFiles.get(fileId);
if (blockIdToLockId == null || !blockIdToLockId.keySet().equals(new HashSet<>(blockIds))) {
throw new IOException("Not all the blocks of file " + fileId + " are locked");
}
}
String dstPath = prepareUfsFilePath(fileId);
UnderFileSystem ufs = UnderFileSystem.Factory.get(dstPath);
FileInfo fileInfo = mBlockWorker.getFileInfo(fileId);
OutputStream outputStream = ufs.create(dstPath, CreateOptions.defaults().setOwner(fileInfo.getOwner()).setGroup(fileInfo.getGroup()).setMode(new Mode((short) fileInfo.getMode())));
final WritableByteChannel outputChannel = Channels.newChannel(outputStream);
List<Throwable> errors = new ArrayList<>();
try {
for (long blockId : blockIds) {
long lockId = blockIdToLockId.get(blockId);
if (Configuration.getBoolean(PropertyKey.WORKER_FILE_PERSIST_RATE_LIMIT_ENABLED)) {
BlockMeta blockMeta = mBlockWorker.getBlockMeta(Sessions.CHECKPOINT_SESSION_ID, blockId, lockId);
mPersistenceRateLimiter.acquire((int) blockMeta.getBlockSize());
}
// obtain block reader
BlockReader reader = mBlockWorker.readBlockRemote(Sessions.CHECKPOINT_SESSION_ID, blockId, lockId);
// write content out
ReadableByteChannel inputChannel = reader.getChannel();
BufferUtils.fastCopy(inputChannel, outputChannel);
reader.close();
}
} catch (BlockDoesNotExistException | InvalidWorkerStateException e) {
errors.add(e);
} finally {
// make sure all the locks are released
for (long lockId : blockIdToLockId.values()) {
try {
mBlockWorker.unlockBlock(lockId);
} catch (BlockDoesNotExistException e) {
errors.add(e);
}
}
// Process any errors
if (!errors.isEmpty()) {
StringBuilder errorStr = new StringBuilder();
errorStr.append("the blocks of file").append(fileId).append(" are failed to persist\n");
for (Throwable e : errors) {
errorStr.append(e).append('\n');
}
throw new IOException(errorStr.toString());
}
}
outputStream.flush();
outputChannel.close();
outputStream.close();
synchronized (mLock) {
mPersistingInProgressFiles.remove(fileId);
mPersistedFiles.add(fileId);
}
}
use of alluxio.worker.block.io.BlockReader in project alluxio by Alluxio.
the class BlockDataServerHandler method handleBlockReadRequest.
/**
* Handles a {@link RPCBlockReadRequest} by reading the data through a {@link BlockReader}
* provided by the block worker. This method assumes the data is available in the local storage
* of the worker and returns an error status if the data is not available.
*
* @param ctx The context of this request which handles the result of this operation
* @param req The initiating {@link RPCBlockReadRequest}
* @throws IOException if an I/O error occurs when reading the data requested
*/
void handleBlockReadRequest(final ChannelHandlerContext ctx, final RPCBlockReadRequest req) throws IOException {
final long blockId = req.getBlockId();
final long offset = req.getOffset();
final long len = req.getLength();
final long lockId = req.getLockId();
final long sessionId = req.getSessionId();
BlockReader reader = null;
DataBuffer buffer;
try {
req.validate();
reader = mWorker.readBlockRemote(sessionId, blockId, lockId);
final long fileLength = reader.getLength();
validateBounds(req, fileLength);
final long readLength = returnLength(offset, len, fileLength);
buffer = getDataBuffer(req, reader, readLength);
Metrics.BYTES_READ_REMOTE.inc(buffer.getLength());
RPCBlockReadResponse resp = new RPCBlockReadResponse(blockId, offset, readLength, buffer, RPCResponse.Status.SUCCESS);
ChannelFuture future = ctx.writeAndFlush(resp);
future.addListener(new ClosableResourceChannelListener(reader));
future.addListener(new ReleasableResourceChannelListener(buffer));
mWorker.accessBlock(sessionId, blockId);
LOG.debug("Preparation for responding to remote block request for: {} done.", blockId);
} catch (Exception e) {
LOG.error("Exception reading block {}", blockId, e);
RPCBlockReadResponse resp;
if (e instanceof BlockDoesNotExistException) {
resp = RPCBlockReadResponse.createErrorResponse(req, RPCResponse.Status.FILE_DNE);
} else {
resp = RPCBlockReadResponse.createErrorResponse(req, RPCResponse.Status.UFS_READ_FAILED);
}
ChannelFuture future = ctx.writeAndFlush(resp);
future.addListener(ChannelFutureListener.CLOSE);
if (reader != null) {
reader.close();
}
}
}
use of alluxio.worker.block.io.BlockReader in project alluxio by Alluxio.
the class DataServerBlockReadHandler method getDataBuffer.
@Override
protected DataBuffer getDataBuffer(Channel channel, long offset, int len) throws IOException {
BlockReader blockReader = ((BlockReadRequestInternal) mRequest).mBlockReader;
Preconditions.checkArgument(blockReader.getChannel() instanceof FileChannel, "Only FileChannel is supported!");
switch(mTransferType) {
case MAPPED:
ByteBuf buf = channel.alloc().buffer(len, len);
try {
FileChannel fileChannel = (FileChannel) blockReader.getChannel();
Preconditions.checkState(fileChannel.position() == offset);
while (buf.writableBytes() > 0 && buf.writeBytes(fileChannel, buf.writableBytes()) != -1) {
}
return new DataNettyBufferV2(buf);
} catch (Throwable e) {
buf.release();
throw e;
}
// intend to fall through as TRANSFER is the default type.
case TRANSFER:
default:
return new DataFileChannel((FileChannel) blockReader.getChannel(), offset, len);
}
}
Aggregations