use of java.nio.channels.WritableByteChannel in project openhab1-addons by openhab.
the class CULNetworkHandlerImpl method processWrite.
private void processWrite(SelectionKey key) throws IOException {
WritableByteChannel ch = (WritableByteChannel) key.channel();
synchronized (writeBuf) {
writeBuf.flip();
int bytesOp = 0, bytesTotal = 0;
while (writeBuf.hasRemaining() && (bytesOp = ch.write(writeBuf)) > 0) {
bytesTotal += bytesOp;
}
logger.debug("Written {} bytes to the network", bytesTotal);
if (writeBuf.remaining() == 0) {
key.interestOps(key.interestOps() ^ SelectionKey.OP_WRITE);
}
if (bytesTotal > 0) {
writeBuf.notify();
} else if (bytesOp == -1) {
logger.info("peer closed write channel");
ch.close();
}
writeBuf.compact();
}
}
use of java.nio.channels.WritableByteChannel in project alluxio by Alluxio.
the class LocalFileBlockWriterTest method getChannel.
/**
* Test for the {@link LocalFileBlockWriter#getChannel()} method.
*/
@Test
public void getChannel() throws Exception {
WritableByteChannel channel = mWriter.getChannel();
Assert.assertNotNull(channel);
ByteBuffer buffer = BufferUtils.getIncreasingByteBuffer((int) TEST_BLOCK_SIZE);
Assert.assertEquals(TEST_BLOCK_SIZE, channel.write(buffer));
channel.close();
Assert.assertEquals(TEST_BLOCK_SIZE, new File(mTestFilePath).length());
}
use of java.nio.channels.WritableByteChannel in project alluxio by Alluxio.
the class UnderFileSystemDataServerHandler method handleFileWriteRequest.
/**
* Handles a {@link RPCFileWriteRequest} by writing the data through an output stream provided
* by the file worker. This method only allows appending data to the file and does not support
* writing at arbitrary offsets.
*
* @param ctx The context of this request which handles the result of this operation
* @param req The initiating {@link RPCFileWriteRequest}
* @throws IOException if an I/O error occurs when interacting with the UFS
*/
public void handleFileWriteRequest(ChannelHandlerContext ctx, RPCFileWriteRequest req) throws IOException {
long ufsFileId = req.getTempUfsFileId();
// Currently unused as only sequential write is supported
long offset = req.getOffset();
long length = req.getLength();
final DataBuffer data = req.getPayloadDataBuffer();
try {
OutputStream out = mWorker.getUfsOutputStream(ufsFileId);
// This channel will not be closed because the underlying stream should not be closed, the
// channel will be cleaned up when the underlying stream is closed.
WritableByteChannel channel = Channels.newChannel(out);
channel.write(data.getReadOnlyByteBuffer());
RPCFileWriteResponse resp = new RPCFileWriteResponse(ufsFileId, offset, length, RPCResponse.Status.SUCCESS);
ctx.writeAndFlush(resp);
} catch (Exception e) {
// TODO(peis): Fix this. The exception here should never be caused netty related issue.
LOG.error("Failed to write ufs file.", e);
RPCFileWriteResponse resp = RPCFileWriteResponse.createErrorResponse(req, RPCResponse.Status.UFS_WRITE_FAILED);
ChannelFuture future = ctx.writeAndFlush(resp);
future.addListener(ChannelFutureListener.CLOSE);
}
}
use of java.nio.channels.WritableByteChannel in project alluxio by Alluxio.
the class FileDataManager method persistFile.
/**
* Persists the blocks of a file into the under file system.
*
* @param fileId the id of the file
* @param blockIds the list of block ids
* @throws AlluxioException if an unexpected Alluxio exception is thrown
* @throws IOException if the file persistence fails
*/
public void persistFile(long fileId, List<Long> blockIds) throws AlluxioException, IOException {
Map<Long, Long> blockIdToLockId;
synchronized (mLock) {
blockIdToLockId = mPersistingInProgressFiles.get(fileId);
if (blockIdToLockId == null || !blockIdToLockId.keySet().equals(new HashSet<>(blockIds))) {
throw new IOException("Not all the blocks of file " + fileId + " are locked");
}
}
String dstPath = prepareUfsFilePath(fileId);
UnderFileSystem ufs = UnderFileSystem.Factory.get(dstPath);
FileInfo fileInfo = mBlockWorker.getFileInfo(fileId);
OutputStream outputStream = ufs.create(dstPath, CreateOptions.defaults().setOwner(fileInfo.getOwner()).setGroup(fileInfo.getGroup()).setMode(new Mode((short) fileInfo.getMode())));
final WritableByteChannel outputChannel = Channels.newChannel(outputStream);
List<Throwable> errors = new ArrayList<>();
try {
for (long blockId : blockIds) {
long lockId = blockIdToLockId.get(blockId);
if (Configuration.getBoolean(PropertyKey.WORKER_FILE_PERSIST_RATE_LIMIT_ENABLED)) {
BlockMeta blockMeta = mBlockWorker.getBlockMeta(Sessions.CHECKPOINT_SESSION_ID, blockId, lockId);
mPersistenceRateLimiter.acquire((int) blockMeta.getBlockSize());
}
// obtain block reader
BlockReader reader = mBlockWorker.readBlockRemote(Sessions.CHECKPOINT_SESSION_ID, blockId, lockId);
// write content out
ReadableByteChannel inputChannel = reader.getChannel();
BufferUtils.fastCopy(inputChannel, outputChannel);
reader.close();
}
} catch (BlockDoesNotExistException | InvalidWorkerStateException e) {
errors.add(e);
} finally {
// make sure all the locks are released
for (long lockId : blockIdToLockId.values()) {
try {
mBlockWorker.unlockBlock(lockId);
} catch (BlockDoesNotExistException e) {
errors.add(e);
}
}
// Process any errors
if (!errors.isEmpty()) {
StringBuilder errorStr = new StringBuilder();
errorStr.append("the blocks of file").append(fileId).append(" are failed to persist\n");
for (Throwable e : errors) {
errorStr.append(e).append('\n');
}
throw new IOException(errorStr.toString());
}
}
outputStream.flush();
outputChannel.close();
outputStream.close();
synchronized (mLock) {
mPersistingInProgressFiles.remove(fileId);
mPersistedFiles.add(fileId);
}
}
use of java.nio.channels.WritableByteChannel in project beam by apache.
the class FakeJobService method writeRowsHelper.
private void writeRowsHelper(List<TableRow> rows, Schema avroSchema, String destinationPattern, int shard) throws IOException {
String filename = destinationPattern.replace("*", String.format("%012d", shard));
try (WritableByteChannel channel = FileSystems.create(FileSystems.matchNewResource(filename, false), MimeTypes.BINARY);
DataFileWriter<GenericRecord> tableRowWriter = new DataFileWriter<>(new GenericDatumWriter<GenericRecord>(avroSchema)).create(avroSchema, Channels.newOutputStream(channel))) {
for (Map<String, Object> record : rows) {
GenericRecordBuilder genericRecordBuilder = new GenericRecordBuilder(avroSchema);
for (Map.Entry<String, Object> field : record.entrySet()) {
genericRecordBuilder.set(field.getKey(), field.getValue());
}
tableRowWriter.append(genericRecordBuilder.build());
}
} catch (IOException e) {
throw new IllegalStateException(String.format("Could not create destination for extract job %s", filename), e);
}
}
Aggregations