use of java.nio.channels.WritableByteChannel in project deeplearning4j by deeplearning4j.
the class ConnectionCostsCompiler method compile.
@Override
public void compile() throws IOException {
DataOutputStream dataOutput = new DataOutputStream(new BufferedOutputStream(output));
dataOutput.writeInt(cardinality);
dataOutput.writeInt(bufferSize * SHORT_BYTES);
ByteBuffer byteBuffer = ByteBuffer.allocate(costs.array().length * SHORT_BYTES);
for (short cost : this.costs.array()) {
byteBuffer.putShort(cost);
}
WritableByteChannel channel = Channels.newChannel(dataOutput);
byteBuffer.flip();
channel.write(byteBuffer);
dataOutput.close();
}
use of java.nio.channels.WritableByteChannel in project tomcat by apache.
the class NioReplicationTask method drainChannel.
/**
* The actual code which drains the channel associated with
* the given key. This method assumes the key has been
* modified prior to invocation to turn off selection
* interest in OP_READ. When this method completes it
* re-enables OP_READ and calls wakeup() on the selector
* so the selector will resume watching this channel.
* @param key The key to process
* @param reader The reader
* @throws Exception IO error
*/
protected void drainChannel(final SelectionKey key, ObjectReader reader) throws Exception {
reader.access();
ReadableByteChannel channel = (ReadableByteChannel) key.channel();
int count = -1;
// make buffer empty
buffer.clear();
SocketAddress saddr = null;
if (channel instanceof SocketChannel) {
// loop while data available, channel is non-blocking
while ((count = channel.read(buffer)) > 0) {
// make buffer readable
buffer.flip();
if (buffer.hasArray())
reader.append(buffer.array(), 0, count, false);
else
reader.append(buffer, count, false);
// make buffer empty
buffer.clear();
//do we have at least one package?
if (reader.hasPackage())
break;
}
} else if (channel instanceof DatagramChannel) {
DatagramChannel dchannel = (DatagramChannel) channel;
saddr = dchannel.receive(buffer);
// make buffer readable
buffer.flip();
if (buffer.hasArray())
reader.append(buffer.array(), 0, buffer.limit() - buffer.position(), false);
else
reader.append(buffer, buffer.limit() - buffer.position(), false);
// make buffer empty
buffer.clear();
//did we get a package
count = reader.hasPackage() ? 1 : -1;
}
int pkgcnt = reader.count();
if (count < 0 && pkgcnt == 0) {
//end of stream, and no more packages to process
remoteEof(key);
return;
}
ChannelMessage[] msgs = pkgcnt == 0 ? ChannelData.EMPTY_DATA_ARRAY : reader.execute();
//register to read new data, before we send it off to avoid dead locks
registerForRead(key, reader);
for (int i = 0; i < msgs.length; i++) {
/**
* Use send ack here if you want to ack the request to the remote
* server before completing the request
* This is considered an asynchronous request
*/
if (ChannelData.sendAckAsync(msgs[i].getOptions()))
sendAck(key, (WritableByteChannel) channel, Constants.ACK_COMMAND, saddr);
try {
if (Logs.MESSAGES.isTraceEnabled()) {
try {
Logs.MESSAGES.trace("NioReplicationThread - Received msg:" + new UniqueId(msgs[i].getUniqueId()) + " at " + new java.sql.Timestamp(System.currentTimeMillis()));
} catch (Throwable t) {
}
}
//process the message
getCallback().messageDataReceived(msgs[i]);
/**
* Use send ack here if you want the request to complete on this
* server before sending the ack to the remote server
* This is considered a synchronized request
*/
if (ChannelData.sendAckSync(msgs[i].getOptions()))
sendAck(key, (WritableByteChannel) channel, Constants.ACK_COMMAND, saddr);
} catch (RemoteProcessException e) {
if (log.isDebugEnabled())
log.error(sm.getString("nioReplicationTask.process.clusterMsg.failed"), e);
if (ChannelData.sendAckSync(msgs[i].getOptions()))
sendAck(key, (WritableByteChannel) channel, Constants.FAIL_ACK_COMMAND, saddr);
} catch (Exception e) {
log.error(sm.getString("nioReplicationTask.process.clusterMsg.failed"), e);
if (ChannelData.sendAckSync(msgs[i].getOptions()))
sendAck(key, (WritableByteChannel) channel, Constants.FAIL_ACK_COMMAND, saddr);
}
if (getUseBufferPool()) {
BufferPool.getBufferPool().returnBuffer(msgs[i].getMessage());
msgs[i].setMessage(null);
}
}
if (count < 0) {
remoteEof(key);
return;
}
}
use of java.nio.channels.WritableByteChannel in project AndroidAsync by koush.
the class NetworkEventReporterWrapper method interpretResponseEmitter.
public DataEmitter interpretResponseEmitter(final String requestId, @Nullable DataEmitter body, final boolean b64Encode) {
final NetworkPeerManager peerManager = getPeerManagerIfEnabled();
if (peerManager == null)
return null;
final WritableByteChannel channel;
try {
if (b64Encode) {
final Base64OutputStream b64out = new Base64OutputStream(peerManager.getResponseBodyFileManager().openResponseBodyFile(requestId, false), Base64.DEFAULT);
channel = Channels.newChannel(b64out);
} else {
channel = ((FileOutputStream) peerManager.getResponseBodyFileManager().openResponseBodyFile(requestId, false)).getChannel();
}
} catch (IOException e) {
return null;
}
FilteredDataEmitter ret = new FilteredDataEmitter() {
ByteBufferList pending = new ByteBufferList();
@Override
protected void report(Exception e) {
super.report(e);
StreamUtility.closeQuietly(channel);
if (e == null)
responseReadFinished(requestId);
else
responseReadFailed(requestId, e.toString());
}
@Override
public void onDataAvailable(DataEmitter emitter, ByteBufferList bb) {
int amount = bb.remaining();
ByteBuffer[] original = bb.getAllArray();
ByteBuffer[] copy = new ByteBuffer[original.length];
for (int i = 0; i < original.length; i++) {
copy[i] = original[i].duplicate();
}
try {
for (ByteBuffer c : copy) {
channel.write(c);
}
} catch (IOException ignored) {
StreamUtility.closeQuietly(channel);
}
pending.addAll(original);
dataReceived(requestId, amount, amount);
super.onDataAvailable(emitter, pending);
}
};
ret.setDataEmitter(body);
return ret;
}
use of java.nio.channels.WritableByteChannel in project AndroidAsync by koush.
the class StreamUtility method copyStream.
public static void copyStream(InputStream input, OutputStream output) throws IOException {
final ReadableByteChannel inputChannel = Channels.newChannel(input);
final WritableByteChannel outputChannel = Channels.newChannel(output);
// copy the channels
fastChannelCopy(inputChannel, outputChannel);
}
use of java.nio.channels.WritableByteChannel in project hadoop by apache.
the class TestFadvisedFileRegion method testCustomShuffleTransfer.
@Test(timeout = 100000)
public void testCustomShuffleTransfer() throws IOException {
File absLogDir = new File("target", TestFadvisedFileRegion.class.getSimpleName() + "LocDir").getAbsoluteFile();
String testDirPath = StringUtils.join(Path.SEPARATOR, new String[] { absLogDir.getAbsolutePath(), "testCustomShuffleTransfer" });
File testDir = new File(testDirPath);
testDir.mkdirs();
System.out.println(testDir.getAbsolutePath());
File inFile = new File(testDir, "fileIn.out");
File outFile = new File(testDir, "fileOut.out");
//Initialize input file
byte[] initBuff = new byte[FILE_SIZE];
Random rand = new Random();
rand.nextBytes(initBuff);
FileOutputStream out = new FileOutputStream(inFile);
try {
out.write(initBuff);
} finally {
IOUtils.cleanup(LOG, out);
}
//define position and count to read from a file region.
int position = 2 * 1024 * 1024;
int count = 4 * 1024 * 1024 - 1;
RandomAccessFile inputFile = null;
RandomAccessFile targetFile = null;
WritableByteChannel target = null;
FadvisedFileRegion fileRegion = null;
try {
inputFile = new RandomAccessFile(inFile.getAbsolutePath(), "r");
targetFile = new RandomAccessFile(outFile.getAbsolutePath(), "rw");
target = targetFile.getChannel();
Assert.assertEquals(FILE_SIZE, inputFile.length());
//create FadvisedFileRegion
fileRegion = new FadvisedFileRegion(inputFile, position, count, false, 0, null, null, 1024, false);
//test corner cases
customShuffleTransferCornerCases(fileRegion, target, count);
long pos = 0;
long size;
while ((size = fileRegion.customShuffleTransfer(target, pos)) > 0) {
pos += size;
}
//assert size
Assert.assertEquals(count, (int) pos);
Assert.assertEquals(count, targetFile.length());
} finally {
if (fileRegion != null) {
fileRegion.releaseExternalResources();
}
IOUtils.cleanup(LOG, target);
IOUtils.cleanup(LOG, targetFile);
IOUtils.cleanup(LOG, inputFile);
}
//Read the target file and verify that copy is done correctly
byte[] buff = new byte[FILE_SIZE];
FileInputStream in = new FileInputStream(outFile);
try {
int total = in.read(buff, 0, count);
Assert.assertEquals(count, total);
for (int i = 0; i < count; i++) {
Assert.assertEquals(initBuff[position + i], buff[i]);
}
} finally {
IOUtils.cleanup(LOG, in);
}
//delete files and folders
inFile.delete();
outFile.delete();
testDir.delete();
absLogDir.delete();
}
Aggregations