use of diskCacheV111.util.CheckStagePermission in project dcache by dCache.
the class DcapDoorSettings method init.
public void init() {
isAuthorizationStrong = (auth != null) && auth.equals("strong");
isAuthorizationRequired = (auth != null) && (auth.equals("strong") || auth.equals("required"));
anonymousAccessLevel = (anon != null) ? UnionLoginStrategy.AccessLevel.valueOf(anon.toUpperCase()) : UnionLoginStrategy.AccessLevel.READONLY;
if (clientVersion != null) {
try {
List<String> values = Splitter.on(':').limit(2).trimResults().splitToList(clientVersion);
if (values.get(0).isEmpty()) {
throw new IllegalArgumentException("missing minimum version");
}
minClientVersion = new DCapDoorInterpreterV3.Version(values.get(0));
if (values.size() > 1) {
if (values.get(1).isEmpty()) {
throw new IllegalArgumentException("missing maximum version");
}
maxClientVersion = new DCapDoorInterpreterV3.Version(values.get(1));
}
} catch (IllegalArgumentException e) {
_log.error("Ignoring client version limits: syntax error with '{}': {}", clientVersion, e.getMessage());
}
}
ioQueueName = Strings.emptyToNull(ioQueueName);
doorRestriction = isReadOnly ? Restrictions.readOnly() : Restrictions.none();
checkStagePermission = new CheckStagePermission(stageConfigurationFilePath);
checkStagePermission.setAllowAnonymousStaging(allowAnonymousStaging);
if (isKafkaEnabled) {
_kafkaProducer = createKafkaProducer();
_log.warn("Creating KafkaProducer" + _kafkaProducer.hashCode());
}
}
use of diskCacheV111.util.CheckStagePermission in project dcache by dCache.
the class AbstractFtpDoorV1 method init.
public void init() throws Exception {
if (_settings.getPerformanceMarkerPeriod() > 0) {
_performanceMarkerPeriod = _settings.getPerformanceMarkerPeriodUnit().toMillis(_settings.getPerformanceMarkerPeriod());
}
_logAbortedTransfers = _settings.logAbortedTransfers();
_clientDataAddress = new InetSocketAddress(_remoteSocketAddress.getAddress(), DEFAULT_DATA_PORT);
_internalInetAddress = (_settings.getInternalAddress() == null) ? InetAddress.getLocalHost() : InetAddress.getByName(_settings.getInternalAddress());
_billingStub = _settings.createBillingStub(_cellEndpoint);
if (_settings.isKafkaEnabled()) {
_kafkaProducer = _settings.getKafkaProducer();
}
_poolManagerStub = _settings.createPoolManagerStub(_cellEndpoint, _cellAddress, _poolManagerHandler);
_poolStub = _settings.createPoolStub(_cellEndpoint);
_gPlazmaStub = _settings.createGplazmaStub(_cellEndpoint);
_doorRestriction = _settings.isReadOnly() ? Restrictions.readOnly() : Restrictions.none();
_loginStrategy = new RemoteLoginStrategy(_gPlazmaStub);
/* Parallelism for mode E transfers.
*/
_parallel = _settings.getDefaultStreamsPerClient();
_origin = new Origin(_remoteSocketAddress.getAddress());
_readRetryPolicy = maximumTries(_settings.getMaxRetries()).pauseBeforeRetrying(_settings.getRetryWait(), TimeUnit.SECONDS).doNotTimeout();
_writeRetryPolicy = tryOnce().doNotTimeout();
_checkStagePermission = new CheckStagePermission(_settings.getStageConfigurationFilePath());
_checkStagePermission.setAllowAnonymousStaging(_settings.isAnonymousStagingAllowed());
buildClientConnectionHandler();
reply("220 " + _ftpDoorName + " door ready");
_isHello = false;
}
use of diskCacheV111.util.CheckStagePermission in project dcache by dCache.
the class RequestContainerV5 method setStageConfigurationFile.
public void setStageConfigurationFile(String path) {
_stagePolicyDecisionPoint = new CheckStagePermission(path);
_stagePolicyDecisionPoint.setAllowAnonymousStaging(_allowAnonymousStaging);
}
use of diskCacheV111.util.CheckStagePermission in project dcache by dCache.
the class TestPoolManagerStub method testPinning.
@Test
public void testPinning() throws Exception {
TestDao dao = new TestDao();
PinRequestProcessor processor = new PinRequestProcessor();
processor.setScheduledExecutor(new TestExecutor());
processor.setExecutor(MoreExecutors.directExecutor());
processor.setDao(dao);
processor.setPoolStub(new TestStub(new CellAddressCore("PinManager")) {
public PoolSetStickyMessage messageArrived(PoolSetStickyMessage msg) {
return msg;
}
});
processor.setPoolManagerStub(new TestPoolManagerStub(new CellAddressCore("PinManager")) {
public PoolMgrSelectReadPoolMsg messageArrived(PoolMgrSelectReadPoolMsg msg) {
msg.setPool(POOL1);
return msg;
}
});
processor.setMaxLifetime(-1);
processor.setStagePermission(new CheckStagePermission(null));
processor.setPoolMonitor(new PoolMonitorV5() {
@Override
public PoolSelector getPoolSelector(FileAttributes fileAttributes, ProtocolInfo protocolInfo, String linkGroup, Set<String> excludes) {
return new PoolMonitorV5.PnfsFileLocation(fileAttributes, protocolInfo, linkGroup, excludes) {
@Override
public SelectedPool selectPinPool() {
return new SelectedPool(new PoolInfo(POOL1.getAddress(), new PoolCostInfo(POOL1.getName(), IoQueueManager.DEFAULT_QUEUE), ImmutableMap.of()));
}
};
}
});
Date expiration = new Date(now() + 30);
PinManagerPinMessage message = new PinManagerPinMessage(getAttributes(PNFS_ID1), PROTOCOL_INFO, REQUEST_ID1, 30);
Date start = new Date();
message = processor.messageArrived(message).get();
Date stop = new Date();
assertEquals(0, message.getReturnCode());
assertFalse(message.getExpirationTime().before(expiration));
Pin pin = dao.get(dao.where().id(message.getPinId()));
assertEquals(PNFS_ID1, pin.getPnfsId());
assertBetween(start, stop, pin.getCreationTime());
assertEquals(message.getExpirationTime(), pin.getExpirationTime());
assertEquals(0, pin.getUid());
assertEquals(0, pin.getGid());
assertEquals(REQUEST_ID1, pin.getRequestId());
assertEquals(POOL1.getName(), pin.getPool());
assertEquals(PINNED, pin.getState());
assertValidSticky(pin.getSticky());
}
Aggregations