use of com.ibm.stocator.fs.common.exception.ConfigurationParseException in project stocator by SparkTC.
the class SwiftAPIClient method initiate.
@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
cachedSparkOriginated = new HashMap<String, Boolean>();
cachedSparkJobsStatus = new HashMap<String, Boolean>();
schemaProvided = scheme;
Properties props = ConfigurationHandler.initialize(filesystemURI, conf);
connectionConfiguration.setExecutionCount(conf.getInt(Constants.EXECUTION_RETRY, ConnectionConfiguration.DEFAULT_EXECUTION_RETRY));
connectionConfiguration.setMaxPerRoute(conf.getInt(Constants.MAX_PER_ROUTE, ConnectionConfiguration.DEFAULT_MAX_PER_ROUTE));
connectionConfiguration.setMaxTotal(conf.getInt(Constants.MAX_TOTAL_CONNECTIONS, ConnectionConfiguration.DEFAULT_MAX_TOTAL_CONNECTIONS));
connectionConfiguration.setReqConnectionRequestTimeout(conf.getInt(Constants.REQUEST_CONNECTION_TIMEOUT, ConnectionConfiguration.DEFAULT_REQUEST_CONNECTION_TIMEOUT));
connectionConfiguration.setReqConnectTimeout(conf.getInt(Constants.REQUEST_CONNECT_TIMEOUT, ConnectionConfiguration.DEFAULT_REQUEST_CONNECT_TIMEOUT));
connectionConfiguration.setReqSocketTimeout(conf.getInt(Constants.REQUEST_SOCKET_TIMEOUT, ConnectionConfiguration.DEFAULT_REQUEST_SOCKET_TIMEOUT));
connectionConfiguration.setSoTimeout(conf.getInt(Constants.SOCKET_TIMEOUT, ConnectionConfiguration.DEFAULT_SOCKET_TIMEOUT));
LOG.trace("{} set connection manager", filesystemURI.toString());
swiftConnectionManager = new SwiftConnectionManager(connectionConfiguration);
LOG.trace("{}", connectionConfiguration.toString());
bufferDir = props.getProperty(BUFFER_DIR_PROPERTY, "");
nonStreamingUpload = "true".equals(props.getProperty(NON_STREAMING_UPLOAD_PROPERTY, "false"));
AccountConfig config = new AccountConfig();
fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_PROPERTY, "false"));
blockSize = Long.valueOf(props.getProperty(SWIFT_BLOCK_SIZE_PROPERTY, "128")).longValue() * 1024 * 1024L;
String authMethod = props.getProperty(SWIFT_AUTH_METHOD_PROPERTY);
ObjectMapper mapper = new ObjectMapper();
mapper.configure(SerializationConfig.Feature.WRAP_ROOT_VALUE, true);
boolean syncWithServer = conf.getBoolean(Constants.JOSS_SYNC_SERVER_TIME, false);
if (!syncWithServer) {
LOG.trace("JOSS: disable sync time with server");
config.setAllowSynchronizeWithServer(false);
}
if (authMethod.equals(PUBLIC_ACCESS)) {
// we need to extract container name and path from the public URL
String publicURL = filesystemURI.toString().replace(schemaProvided, "https");
publicContainer = true;
LOG.debug("publicURL: {}", publicURL);
String accessURL = Utils.extractAccessURL(publicURL, scheme);
LOG.debug("auth url {}", accessURL);
config.setAuthUrl(accessURL);
config.setAuthenticationMethod(AuthenticationMethod.EXTERNAL);
container = Utils.extractDataRoot(publicURL, accessURL);
DummyAccessProvider p = new DummyAccessProvider(accessURL);
config.setAccessProvider(p);
mJossAccount = new JossAccount(config, null, true, swiftConnectionManager);
mJossAccount.createDummyAccount();
} else {
container = props.getProperty(SWIFT_CONTAINER_PROPERTY);
String isPubProp = props.getProperty(SWIFT_PUBLIC_PROPERTY, "false");
usePublicURL = "true".equals(isPubProp);
LOG.trace("Use public key value is {}. Use public {}", isPubProp, usePublicURL);
config.setPassword(props.getProperty(SWIFT_PASSWORD_PROPERTY));
config.setAuthUrl(Utils.getOption(props, SWIFT_AUTH_PROPERTY));
if (authMethod.equals("keystone")) {
preferredRegion = props.getProperty(SWIFT_REGION_PROPERTY);
if (preferredRegion != null) {
config.setPreferredRegion(preferredRegion);
}
config.setAuthenticationMethod(AuthenticationMethod.KEYSTONE);
config.setUsername(Utils.getOption(props, SWIFT_USERNAME_PROPERTY));
config.setTenantName(props.getProperty(SWIFT_TENANT_PROPERTY));
} else if (authMethod.equals(KEYSTONE_V3_AUTH)) {
preferredRegion = props.getProperty(SWIFT_REGION_PROPERTY, "dallas");
config.setPreferredRegion(preferredRegion);
config.setAuthenticationMethod(AuthenticationMethod.EXTERNAL);
String userId = props.getProperty(SWIFT_USER_ID_PROPERTY);
String projectId = props.getProperty(SWIFT_PROJECT_ID_PROPERTY);
PasswordScopeAccessProvider psap = new PasswordScopeAccessProvider(userId, config.getPassword(), projectId, config.getAuthUrl(), preferredRegion);
config.setAccessProvider(psap);
} else if (authMethod.equals("basic")) {
config.setAuthenticationMethod(AuthenticationMethod.BASIC);
config.setUsername(Utils.getOption(props, SWIFT_USERNAME_PROPERTY));
} else {
config.setAuthenticationMethod(AuthenticationMethod.TEMPAUTH);
config.setTenantName(Utils.getOption(props, SWIFT_USERNAME_PROPERTY));
config.setUsername(props.getProperty(SWIFT_TENANT_PROPERTY));
}
LOG.trace("{}", config.toString());
mJossAccount = new JossAccount(config, preferredRegion, usePublicURL, swiftConnectionManager);
try {
mJossAccount.createAccount();
} catch (Exception e) {
throw new IOException("Failed to create an account model." + " Please check the provided access credentials." + " Verify the validitiy of the auth url: " + config.getAuthUrl(), e);
}
}
Container containerObj = mJossAccount.getAccount().getContainer(container);
if (!authMethod.equals(PUBLIC_ACCESS) && !containerObj.exists()) {
try {
containerObj.create();
} catch (AlreadyExistsException e) {
LOG.debug("Create container failed. {} was already exists. ", container);
}
}
objectCache = new SwiftObjectCache(containerObj);
}
use of com.ibm.stocator.fs.common.exception.ConfigurationParseException in project stocator by SparkTC.
the class COSAPIClient method initiate.
@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
mCachedSparkOriginated = new HashMap<String, Boolean>();
mCachedSparkJobsStatus = new HashMap<String, Boolean>();
schemaProvided = scheme;
Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme);
// Set bucket name property
int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT);
memoryCache = MemoryCache.getInstance(cacheSize);
mBucket = props.getProperty(COS_BUCKET_PROPERTY);
workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI, getWorkingDirectory());
LOG.trace("Working directory set to {}", workingDir);
fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false"));
mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false"));
// Define COS client
String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY);
String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY);
if (accessKey == null) {
throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key");
}
if (secretKey == null) {
throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key");
}
BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConf = new ClientConfiguration();
int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS);
if (maxThreads < 2) {
LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
maxThreads = 2;
}
int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS);
long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks, keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");
unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));
boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS, DEFAULT_SECURE_CONNECTIONS);
clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);
String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, "");
int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1);
if (!proxyHost.isEmpty()) {
clientConf.setProxyHost(proxyHost);
if (proxyPort >= 0) {
clientConf.setProxyPort(proxyPort);
} else {
if (secureConnections) {
LOG.warn("Proxy host set without port. Using HTTPS default 443");
clientConf.setProxyPort(443);
} else {
LOG.warn("Proxy host set without port. Using HTTP default 80");
clientConf.setProxyPort(80);
}
}
String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME);
String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD);
if ((proxyUsername == null) != (proxyPassword == null)) {
String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
clientConf.setProxyUsername(proxyUsername);
clientConf.setProxyPassword(proxyPassword);
clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN));
clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION));
if (LOG.isDebugEnabled()) {
LOG.debug("Using proxy server {}:{} as user {} on " + "domain {} as workstation {}", clientConf.getProxyHost(), clientConf.getProxyPort(), String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyDomain(), clientConf.getProxyWorkstation());
}
} else if (proxyPort >= 0) {
String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
LOG.error(msg);
throw new IllegalArgumentException(msg);
}
initConnectionSettings(conf, clientConf);
if (mIsV2Signer) {
clientConf.withSignerOverride("S3SignerType");
}
mClient = new AmazonS3Client(creds, clientConf);
final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY);
if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) {
mClient.setEndpoint(serviceUrl);
}
mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());
// Set block size property
String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128");
mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L;
bufferDirectory = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
bufferDirectoryKey = Utils.getConfigKey(conf, FS_COS, FS_ALT_KEYS, BUFFER_DIR);
LOG.trace("Buffer directory is set to {} for the key {}", bufferDirectory, bufferDirectoryKey);
boolean autoCreateBucket = "true".equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false")));
partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD, DEFAULT_MIN_MULTIPART_THRESHOLD);
readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE);
LOG.debug(READAHEAD_RANGE + ":" + readAhead);
inputPolicy = COSInputPolicy.getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL));
initTransferManager();
maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING);
if (autoCreateBucket) {
try {
boolean bucketExist = mClient.doesBucketExist(mBucket);
if (bucketExist) {
LOG.trace("Bucket {} exists", mBucket);
} else {
LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket);
String mRegion = props.getProperty(REGION_COS_PROPERTY);
if (mRegion == null) {
mClient.createBucket(mBucket);
} else {
LOG.trace("Creating bucket {} in region {}", mBucket, mRegion);
mClient.createBucket(mBucket, mRegion);
}
}
} catch (AmazonServiceException ase) {
/*
* we ignore the BucketAlreadyExists exception since multiple processes or threads
* might try to create the bucket in parrallel, therefore it is expected that
* some will fail to create the bucket
*/
if (!ase.getErrorCode().equals("BucketAlreadyExists")) {
LOG.error(ase.getMessage());
throw (ase);
}
} catch (Exception e) {
LOG.error(e.getMessage());
throw (e);
}
}
initMultipartUploads(conf);
enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true);
blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD);
if (blockUploadEnabled) {
blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER, DEFAULT_FAST_UPLOAD_BUFFER);
partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer);
blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS, DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS);
LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}", blockOutputBuffer, partSize, blockOutputActiveBlocks);
} else {
LOG.debug("Using COSOutputStream");
}
}
Aggregations