use of org.dcache.nfs.v4.NFS4Client in project dcache by dCache.
the class NFSv41Door method messageArrived.
public DoorValidateMoverMessage<org.dcache.chimera.nfs.v4.xdr.stateid4> messageArrived(DoorValidateMoverMessage<org.dcache.chimera.nfs.v4.xdr.stateid4> message) {
org.dcache.chimera.nfs.v4.xdr.stateid4 legacyStateid = message.getChallenge();
stateid4 stateid = new stateid4(legacyStateid.other, legacyStateid.seqid.value);
boolean isValid = false;
try {
NFS4Client nfsClient = _nfs4.getStateHandler().getClientIdByStateId(stateid);
// will throw exception if state does not exists
nfsClient.state(stateid);
isValid = true;
} catch (BadStateidException e) {
} catch (ChimeraNFSException e) {
_log.warn("Unexpected NFS exception: {}", e.getMessage());
}
message.setIsValid(isValid);
return message;
}
use of org.dcache.nfs.v4.NFS4Client in project dcache by dCache.
the class NFSv41Door method layoutReturn.
/*
* (non-Javadoc)
*
* @see org.dcache.chimera.nfsv4.NFSv41DeviceManager#releaseDevice(stateid4 stateid)
*/
@Override
public void layoutReturn(CompoundContext context, LAYOUTRETURN4args args) throws IOException {
if (args.lora_layoutreturn.lr_returntype == layoutreturn_type4.LAYOUTRETURN4_FILE) {
layouttype4 layoutType = layouttype4.valueOf(args.lora_layout_type);
final stateid4 stateid = Stateids.getCurrentStateidIfNeeded(context, args.lora_layoutreturn.lr_layout.lrf_stateid);
final NFS4Client client;
if (context.getMinorversion() > 0) {
client = context.getSession().getClient();
} else {
// v4.0 client use proxy adapter, which calls layoutreturn
client = context.getStateHandler().getClientIdByStateId(stateid);
}
final NFS4State layoutState = client.state(stateid);
final NFS4State openState = layoutState.getOpenState();
_log.debug("Releasing layout by stateid: {}, open-state: {}", stateid, openState.stateid());
getLayoutDriver(layoutType).acceptLayoutReturnData(context, args.lora_layoutreturn.lr_layout.lrf_body);
NfsTransfer transfer = _transfers.get(openState.stateid());
if (transfer != null) {
transfer.shutdownMover();
}
// any further use of this layout-stateid must fail with NFS4ERR_BAD_STATEID
client.releaseState(stateid);
}
}
use of org.dcache.nfs.v4.NFS4Client in project dcache by dCache.
the class NFSv41Door method layoutGet.
/**
* ask pool manager for a file
* <p>
* On successful reply from pool manager corresponding O request will be sent to the pool to
* start a NFS mover.
*
* @throws ChimeraNFSException in case of NFS friendly errors ( like ACCESS )
* @throws IOException in case of any other errors
*/
@Override
public Layout layoutGet(CompoundContext context, LAYOUTGET4args args) throws IOException {
Inode nfsInode = context.currentInode();
layouttype4 layoutType = layouttype4.valueOf(args.loga_layout_type);
final stateid4 stateid = Stateids.getCurrentStateidIfNeeded(context, args.loga_stateid);
LayoutDriver layoutDriver = getLayoutDriver(layoutType);
final NFS4Client client;
if (context.getMinorversion() == 0) {
/* if we need to run proxy-io with NFSv4.0 */
client = context.getStateHandler().getClientIdByStateId(stateid);
} else {
client = context.getSession().getClient();
}
CDC cdcContext = CDC.reset(getCellName(), getCellDomainName());
try {
FsInode inode = _chimeraVfs.inodeFromBytes(nfsInode.getFileId());
PnfsId pnfsId = new PnfsId(inode.getId());
deviceid4[] devices;
final NFS4State openStateId = client.state(stateid).getOpenState();
final NFS4State layoutStateId;
// serialize all requests by the same stateid
synchronized (openStateId) {
if (inode.type() != FsInodeType.INODE || inode.getLevel() != 0) {
/*
* all non regular files ( AKA pnfs dot files ) provided by door itself.
*/
throw new LayoutUnavailableException("special DOT file");
}
final InetSocketAddress remote = context.getRpcCall().getTransport().getRemoteSocketAddress();
final NFS4ProtocolInfo protocolInfo = new NFS4ProtocolInfo(remote, new org.dcache.chimera.nfs.v4.xdr.stateid4(stateid), nfsInode.toNfsHandle());
NfsTransfer transfer = _transfers.get(openStateId.stateid());
if (transfer == null) {
Transfer.initSession(false, false);
NDC.push(pnfsId.toString());
NDC.push(context.getRpcCall().getTransport().getRemoteSocketAddress().toString());
transfer = args.loga_iomode == layoutiomode4.LAYOUTIOMODE4_RW ? new WriteTransfer(_pnfsHandler, client, openStateId, nfsInode, context.getRpcCall().getCredential().getSubject()) : new ReadTransfer(_pnfsHandler, client, openStateId, nfsInode, context.getRpcCall().getCredential().getSubject());
transfer.setProtocolInfo(protocolInfo);
transfer.setCellAddress(getCellAddress());
transfer.setBillingStub(_billingStub);
transfer.setPoolStub(_poolStub);
transfer.setPoolManagerStub(_poolManagerStub);
transfer.setPnfsId(pnfsId);
transfer.setClientAddress(remote);
transfer.setIoQueue(_ioQueue);
transfer.setKafkaSender(_kafkaSender);
/*
* As all our layouts marked 'return-on-close', stop mover when
* open-state disposed on CLOSE.
*/
final NfsTransfer t = transfer;
openStateId.addDisposeListener(state -> {
/*
* Cleanup transfer when state invalidated.
*/
if (t.hasMover()) {
// to work correctly with proxy-io. As modern rhel clients (>= 7) use flex_files by default it's ok.
if (layoutType == layouttype4.LAYOUT4_FLEX_FILES && client.isLeaseValid() && client.getCB() != null) {
/*
* Due to race in the Linux kernel client, a server might see CLOSE before
* the last WRITE operation have been processed by a data server. Thus,
* recall the layout (enforce dirty page flushing) and return a NFS4ERR_DELAY.
*
* see: https://bugzilla.redhat.com/show_bug.cgi?id=1901524
*/
_log.warn("Deploying work-around for buggy client {} issuing CLOSE before LAYOUT_RETURN for transfer {}@{} of {}", t.getMoverId(), t.getPool(), t.getPnfsId(), t.getClient().getRemoteAddress());
t.recallLayout(_callbackExecutor);
throw new DelayException("Close before layoutreturn");
} else {
_log.warn("Removing orphan mover: {}@{} for {} by {}", t.getMoverId(), t.getPool(), t.getPnfsId(), t.getClient().getRemoteAddress());
t.shutdownMover();
}
}
if (t.isWrite()) {
/* write request keep in the message map to
* avoid re-creates and trigger errors.
*/
_transfers.remove(openStateId.stateid());
}
});
_transfers.put(openStateId.stateid(), transfer);
} else {
// keep debug context in sync
transfer.restoreSession();
NDC.push(pnfsId.toString());
NDC.push(context.getRpcCall().getTransport().getRemoteSocketAddress().toString());
}
layoutStateId = transfer.getStateid();
devices = transfer.getPoolDataServers(NFS_REQUEST_BLOCKING);
}
// -1 is special value, which means entire file
layout4 layout = new layout4();
layout.lo_iomode = args.loga_iomode;
layout.lo_offset = new offset4(0);
layout.lo_length = new length4(nfs4_prot.NFS4_UINT64_MAX);
layout.lo_content = layoutDriver.getLayoutContent(stateid, NFSv4Defaults.NFS4_STRIPE_SIZE, new nfs_fh4(nfsInode.toNfsHandle()), devices);
layoutStateId.bumpSeqid();
if (args.loga_iomode == layoutiomode4.LAYOUTIOMODE4_RW) {
// in case of WRITE, invalidate vfs cache on close
layoutStateId.addDisposeListener(state -> _vfsCache.invalidateStatCache(nfsInode));
}
return new Layout(true, layoutStateId.stateid(), new layout4[] { layout });
} catch (FileNotFoundCacheException e) {
/*
* The file is removed before we was able to start a mover.
* Invalidate state as client will not send CLOSE for a stale file
* handle.
*
* NOTICE: according POSIX, the opened file must be still accessible
* after remove as long as it not closed. We violate that requirement
* in favor of dCache shared state simplicity.
*/
client.releaseState(stateid);
throw new StaleException("File is removed", e);
} catch (CacheException | ChimeraFsException | TimeoutException | ExecutionException e) {
throw asNfsException(e, LayoutTryLaterException.class);
} catch (InterruptedException e) {
throw new LayoutTryLaterException(e.getMessage(), e);
} finally {
cdcContext.close();
}
}
use of org.dcache.nfs.v4.NFS4Client in project dcache by dCache.
the class NFSv41Door method layoutCommit.
@Override
public OptionalLong layoutCommit(CompoundContext context, LAYOUTCOMMIT4args args) throws IOException {
final stateid4 stateid = Stateids.getCurrentStateidIfNeeded(context, args.loca_stateid);
final NFS4Client client = context.getStateHandler().getClientIdByStateId(stateid);
final NFS4State layoutState = client.state(stateid);
final NFS4State openState = layoutState.getOpenState();
Inode nfsInode = context.currentInode();
_log.debug("Committing layout by stateid: {}, open-state: {}", stateid, openState.stateid());
if (args.loca_last_write_offset.no_newoffset) {
long currentSize = _chimeraVfs.getattr(nfsInode).getSize();
long newSize = args.loca_last_write_offset.no_offset.value + 1;
if (newSize > currentSize) {
Stat newStat = new Stat();
newStat.setSize(newSize);
_chimeraVfs.setattr(nfsInode, newStat);
_vfsCache.invalidateStatCache(nfsInode);
return OptionalLong.of(newSize);
}
}
return OptionalLong.empty();
}
use of org.dcache.nfs.v4.NFS4Client in project dcache by dCache.
the class NfsProxyIoFactory method getOrCreateProxy.
@Override
public ProxyIoAdapter getOrCreateProxy(Inode inode, stateid4 stateid, CompoundContext context, boolean isWrite) throws IOException {
try {
return _proxyIO.get(stateid, () -> {
final NFS4Client nfsClient;
if (context.getMinorversion() == 1) {
nfsClient = context.getSession().getClient();
} else {
nfsClient = context.getStateHandler().getClientIdByStateId(stateid);
}
final NFS4State state = nfsClient.state(stateid);
final ProxyIoAdapter adapter = createIoAdapter(inode, stateid, context, isWrite);
state.addDisposeListener(s -> {
tryToClose(adapter);
_proxyIO.invalidate(s.stateid());
});
return adapter;
});
} catch (ExecutionException e) {
Throwable t = e.getCause();
_log.debug("failed to create IO adapter: {}", t.getMessage());
throw asNfsException(t, NfsIoException.class);
}
}
Aggregations