use of diskCacheV111.poolManager.Pool in project dcache by dCache.
the class NFSv41Door method getDeviceInfo.
// NFSv41DeviceManager interface
/*
The most important calls is LAYOUTGET, OPEN, CLOSE, LAYOUTRETURN
The READ, WRITE and COMMIT goes to storage device.
We assume the following mapping between nfs and dcache:
NFS | dCache
_____________|________________________________________
LAYOUTGET : get pool, bind the answer to the client
OPEN : send IO request to the pool
CLOSE : sent end-of-IO to the pool, LAYOUTRECALL
LAYOUTRETURN : unbind pool from client
*/
@Override
public device_addr4 getDeviceInfo(CompoundContext context, GETDEVICEINFO4args args) throws ChimeraNFSException {
layouttype4 layoutType = layouttype4.valueOf(args.gdia_layout_type);
LayoutDriver layoutDriver = getLayoutDriver(layoutType);
PoolDS ds = _poolDeviceMap.getByDeviceId(args.gdia_device_id);
if (ds == null) {
return null;
}
// limit addresses returned to client to the same 'type' as clients own address
// NOTICE: according to rfc1918 we allow access to private networks from public ip address
// Site must take care that private IP space is not visible to site external clients.
InetAddress clientAddress = context.getRemoteSocketAddress().getAddress();
InetSocketAddress[] usableAddresses = Stream.of(ds.getDeviceAddr()).filter(a -> !a.getAddress().isLoopbackAddress() || clientAddress.isLoopbackAddress()).filter(a -> !a.getAddress().isLinkLocalAddress() || clientAddress.isLinkLocalAddress()).filter(a -> clientAddress.getAddress().length >= a.getAddress().getAddress().length).toArray(InetSocketAddress[]::new);
return layoutDriver.getDeviceAddress(usableAddresses);
}
use of diskCacheV111.poolManager.Pool in project dcache by dCache.
the class RemoteHttpDataTransferProtocol method verifyRemoteFile.
private void verifyRemoteFile(RemoteHttpDataTransferProtocolInfo info) throws ThirdPartyTransferFailedCacheException {
FileAttributes attributes = _channel.getFileAttributes();
boolean isFirstAttempt = true;
long t_max = maxRetryDuration(attributes.getSize());
long deadline = System.currentTimeMillis() + t_max;
try {
while (System.currentTimeMillis() < deadline) {
long sleepFor = Math.min(deadline - System.currentTimeMillis(), DELAY_BETWEEN_REQUESTS);
if (!isFirstAttempt && sleepFor > 0) {
Thread.sleep(sleepFor);
}
isFirstAttempt = false;
HttpClientContext context = storeContext(new HttpClientContext());
HttpHead head = buildHeadRequest(info, deadline);
buildWantDigest().ifPresent(v -> head.addHeader("Want-Digest", v));
try {
try (CloseableHttpResponse response = _client.execute(head, context)) {
StatusLine status = response.getStatusLine();
if (status.getStatusCode() >= 300) {
checkThirdPartyTransferSuccessful(!info.isVerificationRequired(), "rejected HEAD: %d %s", status.getStatusCode(), status.getReasonPhrase());
return;
}
if (shouldRetry(response)) {
continue;
}
OptionalLong contentLengthHeader = contentLength(response);
if (contentLengthHeader.isPresent()) {
long contentLength = contentLengthHeader.getAsLong();
long fileSize = attributes.getSize();
checkThirdPartyTransferSuccessful(contentLength == fileSize, "HEAD Content-Length (%d) does not match file size (%d)", contentLength, fileSize);
} else {
LOGGER.debug("HEAD response did not contain Content-Length");
}
String rfc3230 = headerValue(response, "Digest");
checkChecksums(info, rfc3230, attributes.getChecksumsIfPresent());
return;
} catch (IOException e) {
throw new ThirdPartyTransferFailedCacheException("failed to " + "connect to server: " + e.toString(), e);
}
} catch (ThirdPartyTransferFailedCacheException e) {
List<URI> redirections = context.getRedirectLocations();
if (redirections != null && !redirections.isEmpty()) {
throw new ThirdPartyTransferFailedCacheException(e.getMessage() + "; redirections " + redirections, e.getCause());
} else {
throw e;
}
}
}
} catch (InterruptedException e) {
throw new ThirdPartyTransferFailedCacheException("pool is shutting down", e);
}
throw new ThirdPartyTransferFailedCacheException("remote server failed " + "to provide length after " + describeDuration(GET_RETRY_DURATION, MILLISECONDS));
}
use of diskCacheV111.poolManager.Pool in project dcache by dCache.
the class RemoteHttpDataTransferProtocol method receiveFile.
private void receiveFile(final RemoteHttpDataTransferProtocolInfo info) throws ThirdPartyTransferFailedCacheException {
Set<Checksum> checksums;
long deadline = System.currentTimeMillis() + GET_RETRY_DURATION;
HttpClientContext context = storeContext(new HttpClientContext());
try {
try (CloseableHttpResponse response = doGet(info, context, deadline)) {
String rfc3230 = headerValue(response, "Digest");
checksums = Checksums.decodeRfc3230(rfc3230);
checksums.forEach(_integrityChecker);
HttpEntity entity = response.getEntity();
if (entity == null) {
throw new ThirdPartyTransferFailedCacheException("GET response contains no content");
}
long length = entity.getContentLength();
if (length > 0) {
_channel.truncate(length);
}
if (response.getStatusLine() != null && response.getStatusLine().getStatusCode() < 300 && length > -1) {
_expectedTransferSize = length;
}
entity.writeTo(Channels.newOutputStream(_channel));
} catch (SocketTimeoutException e) {
String message = "socket timeout on GET (received " + describeSize(_channel.getBytesTransferred()) + " of data; " + describeSize(e.bytesTransferred) + " pending)";
if (e.getMessage() != null) {
message += ": " + e.getMessage();
}
throw new ThirdPartyTransferFailedCacheException(message, e);
} catch (IOException e) {
throw new ThirdPartyTransferFailedCacheException(messageOrClassName(e), e);
} catch (InterruptedException e) {
throw new ThirdPartyTransferFailedCacheException("pool is shutting down", e);
}
} catch (ThirdPartyTransferFailedCacheException e) {
List<URI> redirections = context.getRedirectLocations();
if (redirections != null && !redirections.isEmpty()) {
StringBuilder message = new StringBuilder(e.getMessage());
message.append("; redirects ").append(redirections);
throw new ThirdPartyTransferFailedCacheException(message.toString(), e.getCause());
} else {
throw e;
}
}
// HEAD requests.
if (checksums.isEmpty() && info.isVerificationRequired()) {
HttpHead head = buildHeadRequest(info, deadline);
head.addHeader("Want-Digest", WANT_DIGEST_VALUE);
try {
try (CloseableHttpResponse response = _client.execute(head)) {
String rfc3230 = headerValue(response, "Digest");
checkThirdPartyTransferSuccessful(rfc3230 != null, "no checksums in HEAD response");
checksums = Checksums.decodeRfc3230(rfc3230);
checkThirdPartyTransferSuccessful(!checksums.isEmpty(), "no useful checksums in HEAD response: %s", rfc3230);
// Ensure integrety. If we're lucky, this won't trigger
// rescanning the uploaded file.
checksums.forEach(_integrityChecker);
}
} catch (IOException e) {
throw new ThirdPartyTransferFailedCacheException("HEAD request failed: " + messageOrClassName(e), e);
}
}
}
use of diskCacheV111.poolManager.Pool in project dcache by dCache.
the class ConsistentReplicaStore method get.
/**
* Retrieves a CacheRepositoryEntry from the wrapped meta data store. If the entry is missing or
* fails consistency checks, the entry is reconstructed with information from PNFS.
*/
@Override
public ReplicaRecord get(PnfsId id) throws IllegalArgumentException, CacheException {
ReplicaRecord entry = _replicaStore.get(id);
if (entry != null && isBroken(entry)) {
LOGGER.warn("Recovering {}...", id);
try {
/* It is safe to remove FROM_STORE/FROM_POOL replicas: We have
* another copy anyway. Files in REMOVED or DESTROYED
* were about to be deleted, so we can finish the job.
*/
switch(entry.getState()) {
case FROM_POOL:
case FROM_STORE:
case REMOVED:
case DESTROYED:
_replicaStore.remove(id);
_pnfsHandler.clearCacheLocation(id);
LOGGER.info("Recovering: Removed {} because it was not fully staged.", id);
return null;
}
entry = rebuildEntry(entry);
} catch (IOException e) {
throw new DiskErrorCacheException("I/O error in healer: " + messageOrClassName(e), e);
} catch (FileNotFoundCacheException e) {
_replicaStore.remove(id);
LOGGER.warn("Recovering: Removed {} because name space entry was deleted.", id);
return null;
} catch (FileIsNewCacheException e) {
_replicaStore.remove(id);
LOGGER.warn("Recovering: Removed {}: {}", id, e.getMessage());
return null;
} catch (TimeoutCacheException e) {
throw e;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new CacheException("Pool is shutting down", e);
} catch (CacheException | NoSuchAlgorithmException e) {
entry.update("Failed to recover replica: " + e.getMessage(), r -> r.setState(ReplicaState.BROKEN));
LOGGER.error(AlarmMarkerFactory.getMarker(PredefinedAlarm.BROKEN_FILE, id.toString(), _poolName), "Marked {} bad: {}.", id, e.getMessage());
}
}
return entry;
}
use of diskCacheV111.poolManager.Pool in project dcache by dCache.
the class NettyTransferService method startServer.
/**
* Start netty server.
*
* @throws IOException Starting the server failed
*/
protected synchronized void startServer() throws IOException {
if (serverChannel == null) {
ServerBootstrap bootstrap = new ServerBootstrap().group(acceptGroup, socketGroup).channel(NioServerSocketChannel.class).childOption(ChannelOption.TCP_NODELAY, false).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
NettyTransferService.this.initChannel(ch);
ChannelCdcSessionHandlerWrapper.bindSessionToChannel(ch, "pool:" + address + ":" + name + ":" + ch.id());
}
});
serverChannel = portRange.bind(bootstrap);
lastServerAddress = (InetSocketAddress) serverChannel.localAddress();
LOGGER.debug("Started {} on {}", getClass().getSimpleName(), lastServerAddress);
}
}
Aggregations