Search in sources :

Example 1 with StorageStatisticsProvider

use of org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider in project hadoop by apache.

the class FileSystem method getStatistics.

/**
   * Get the statistics for a particular file system.
   * @param cls the class to lookup
   * @return a statistics object
   * @deprecated use {@link #getGlobalStorageStatistics()}
   */
@Deprecated
public static synchronized Statistics getStatistics(final String scheme, Class<? extends FileSystem> cls) {
    checkArgument(scheme != null, "No statistics is allowed for a file system with null scheme!");
    Statistics result = statisticsTable.get(cls);
    if (result == null) {
        final Statistics newStats = new Statistics(scheme);
        statisticsTable.put(cls, newStats);
        result = newStats;
        GlobalStorageStatistics.INSTANCE.put(scheme, new StorageStatisticsProvider() {

            @Override
            public StorageStatistics provide() {
                return new FileSystemStorageStatistics(scheme, newStats);
            }
        });
    }
    return result;
}
Also used : StorageStatisticsProvider(org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider)

Example 2 with StorageStatisticsProvider

use of org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider in project hadoop by apache.

the class WebHdfsFileSystem method initialize.

@Override
public synchronized void initialize(URI uri, Configuration conf) throws IOException {
    super.initialize(uri, conf);
    setConf(conf);
    // set user and acl patterns based on configuration file
    UserParam.setUserPattern(conf.get(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
    AclPermissionParam.setAclPermissionPattern(conf.get(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
    boolean isOAuth = conf.getBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY, HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);
    if (isOAuth) {
        LOG.debug("Enabling OAuth2 in WebHDFS");
        connectionFactory = URLConnectionFactory.newOAuth2URLConnectionFactory(conf);
    } else {
        LOG.debug("Not enabling OAuth2 in WebHDFS");
        connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(conf);
    }
    ugi = UserGroupInformation.getCurrentUser();
    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.nnAddrs = resolveNNAddr();
    boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri);
    boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri);
    // In non-HA or non-logical URI case, the code needs to call
    // getCanonicalUri() in order to handle the case where no port is
    // specified in the URI
    this.tokenServiceName = isLogicalUri ? HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri());
    if (!isHA) {
        this.retryPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_KEY, HdfsClientConfigKeys.HttpClient.RETRY_POLICY_SPEC_DEFAULT, HdfsConstants.SAFEMODE_EXCEPTION_CLASS_NAME);
    } else {
        int maxFailoverAttempts = conf.getInt(HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_MAX_ATTEMPTS_DEFAULT);
        int maxRetryAttempts = conf.getInt(HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_KEY, HdfsClientConfigKeys.HttpClient.RETRY_MAX_ATTEMPTS_DEFAULT);
        int failoverSleepBaseMillis = conf.getInt(HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_BASE_DEFAULT);
        int failoverSleepMaxMillis = conf.getInt(HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_KEY, HdfsClientConfigKeys.HttpClient.FAILOVER_SLEEPTIME_MAX_DEFAULT);
        this.retryPolicy = RetryPolicies.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, failoverSleepMaxMillis);
    }
    this.workingDir = makeQualified(new Path(getHomeDirectoryString(ugi)));
    this.canRefreshDelegationToken = UserGroupInformation.isSecurityEnabled();
    this.disallowFallbackToInsecureCluster = !conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY, CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
    this.initializeRestCsrf(conf);
    this.delegationToken = null;
    storageStatistics = (DFSOpsCountStatistics) GlobalStorageStatistics.INSTANCE.put(DFSOpsCountStatistics.NAME, new StorageStatisticsProvider() {

        @Override
        public StorageStatistics provide() {
            return new DFSOpsCountStatistics();
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics) StorageStatistics(org.apache.hadoop.fs.StorageStatistics) DFSOpsCountStatistics(org.apache.hadoop.hdfs.DFSOpsCountStatistics) StorageStatisticsProvider(org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider)

Example 3 with StorageStatisticsProvider

use of org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider in project hadoop by apache.

the class DistributedFileSystem method initialize.

@Override
public void initialize(URI uri, Configuration conf) throws IOException {
    super.initialize(uri, conf);
    setConf(conf);
    String host = uri.getHost();
    if (host == null) {
        throw new IOException("Incomplete HDFS URI, no host: " + uri);
    }
    homeDirPrefix = conf.get(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
    this.dfs = new DFSClient(uri, conf, statistics);
    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
    this.workingDir = getHomeDirectory();
    storageStatistics = (DFSOpsCountStatistics) GlobalStorageStatistics.INSTANCE.put(DFSOpsCountStatistics.NAME, new StorageStatisticsProvider() {

        @Override
        public StorageStatistics provide() {
            return new DFSOpsCountStatistics();
        }
    });
}
Also used : GlobalStorageStatistics(org.apache.hadoop.fs.GlobalStorageStatistics) StorageStatistics(org.apache.hadoop.fs.StorageStatistics) IOException(java.io.IOException) StorageStatisticsProvider(org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider)

Aggregations

StorageStatisticsProvider (org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider)3 GlobalStorageStatistics (org.apache.hadoop.fs.GlobalStorageStatistics)2 StorageStatistics (org.apache.hadoop.fs.StorageStatistics)2 IOException (java.io.IOException)1 Path (org.apache.hadoop.fs.Path)1 DFSOpsCountStatistics (org.apache.hadoop.hdfs.DFSOpsCountStatistics)1