use of org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest in project ignite by apache.
the class HadoopIgfsIpcIo method sendPlain.
/**
* {@inheritDoc}
*/
@Override
public void sendPlain(IgfsMessage msg) throws IgniteCheckedException {
if (!busyLock.readLock().tryLock())
throw new HadoopIgfsCommunicationException("Failed to send message (client is being " + "concurrently closed).");
try {
if (stopping)
throw new HadoopIgfsCommunicationException("Failed to send message (client is being concurrently closed).");
assert msg.command() == IgfsIpcCommand.WRITE_BLOCK;
IgfsStreamControlRequest req = (IgfsStreamControlRequest) msg;
byte[] hdr = IgfsMarshaller.createHeader(-1, IgfsIpcCommand.WRITE_BLOCK);
U.longToBytes(req.streamId(), hdr, 12);
U.intToBytes(req.length(), hdr, 20);
synchronized (this) {
out.write(hdr);
out.write(req.data(), (int) req.position(), req.length());
out.flush();
}
} catch (IOException e) {
throw new HadoopIgfsCommunicationException(e);
} finally {
busyLock.readLock().unlock();
}
}
use of org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest in project ignite by apache.
the class HadoopIgfsOutProc method readData.
/**
* {@inheritDoc}
*/
@Override
public IgniteInternalFuture<byte[]> readData(HadoopIgfsStreamDelegate desc, long pos, int len, @Nullable final byte[] outBuf, final int outOff, final int outLen) {
assert len > 0;
final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
msg.command(READ_BLOCK);
msg.streamId((long) desc.target());
msg.position(pos);
msg.length(len);
try {
return io.send(msg, outBuf, outOff, outLen);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
}
use of org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest in project ignite by apache.
the class HadoopIgfsOutProc method writeData.
/**
* {@inheritDoc}
*/
@Override
public void writeData(HadoopIgfsStreamDelegate desc, byte[] data, int off, int len) throws IOException {
final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
msg.command(WRITE_BLOCK);
msg.streamId((long) desc.target());
msg.data(data);
msg.position(off);
msg.length(len);
try {
io.sendPlain(msg);
} catch (IgniteCheckedException e) {
throw HadoopIgfsUtils.cast(e);
}
}
use of org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest in project ignite by apache.
the class IgfsIpcHandler method processStreamControlRequest.
/**
* Processes stream control request.
*
* @param ses Session.
* @param cmd Command.
* @param msg Message.
* @param in Data input to read.
* @return Response message if needed.
* @throws IgniteCheckedException If failed.
* @throws IOException If failed.
*/
private IgfsMessage processStreamControlRequest(IgfsClientSession ses, IgfsIpcCommand cmd, IgfsMessage msg, DataInput in) throws IgniteCheckedException, IOException {
IgfsStreamControlRequest req = (IgfsStreamControlRequest) msg;
Long rsrcId = req.streamId();
IgfsControlResponse resp = new IgfsControlResponse();
switch(cmd) {
case CLOSE:
{
Closeable res = resource(ses, rsrcId);
if (log.isDebugEnabled())
log.debug("Requested to close resource [igfsName=" + igfs.name() + ", rsrcId=" + rsrcId + ", res=" + res + ']');
if (res == null)
throw new IgniteCheckedException("Resource to close not found: " + rsrcId);
try {
res.close();
} catch (IOException e) {
// Unwrap OutOfSpaceException, if has one.
IgfsOutOfSpaceException space = X.cause(e, IgfsOutOfSpaceException.class);
if (space != null)
throw space;
throw e;
}
boolean success = ses.unregisterResource(rsrcId, res);
assert success : "Failed to unregister resource [igfsName=" + igfs.name() + ", rsrcId=" + rsrcId + ", res=" + res + ']';
if (log.isDebugEnabled())
log.debug("Closed IGFS stream [igfsName=" + igfs.name() + ", streamId=" + rsrcId + ", ses=" + ses + ']');
resp.response(true);
break;
}
case READ_BLOCK:
{
long pos = req.position();
int size = req.length();
IgfsInputStreamImpl igfsIn = (IgfsInputStreamImpl) resource(ses, rsrcId);
if (igfsIn == null)
throw new IgniteCheckedException("Input stream not found (already closed?): " + rsrcId);
byte[][] chunks = igfsIn.readChunks(pos, size);
resp.response(chunks);
// Calculate number of read bytes.
// len = len(first) + (n - 2) * len(block) + len(last).
int len = 0;
if (chunks.length > 0)
len += chunks[0].length;
if (chunks.length > 1)
len += chunks[chunks.length - 1].length;
if (chunks.length > 2)
len += chunks[1].length * (chunks.length - 2);
resp.length(len);
break;
}
case WRITE_BLOCK:
{
IgfsOutputStream out = (IgfsOutputStream) resource(ses, rsrcId);
if (out == null)
throw new IgniteCheckedException("Output stream not found (already closed?): " + rsrcId);
int writeLen = req.length();
try {
out.transferFrom(in, writeLen);
if (errWrite)
throw new IOException("Failed to write data to server (test).");
// No response needed.
return null;
} catch (IOException e) {
resp.error(rsrcId, e.getMessage());
break;
}
}
default:
assert false;
break;
}
return resp;
}
use of org.apache.ignite.internal.igfs.common.IgfsStreamControlRequest in project ignite by apache.
the class HadoopIgfsOutProc method closeStream.
/**
* {@inheritDoc}
*/
@Override
public void closeStream(HadoopIgfsStreamDelegate desc) throws IOException {
final IgfsStreamControlRequest msg = new IgfsStreamControlRequest();
msg.command(CLOSE);
msg.streamId((long) desc.target());
try {
io.send(msg).chain(BOOL_RES).get();
} catch (IgniteCheckedException e) {
throw HadoopIgfsUtils.cast(e);
}
}
Aggregations