Search in sources :

Example 1 with HadoopIgfsWrapper

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsWrapper in project ignite by apache.

the class IgniteHadoopFileSystem method initialize.

/**
 * @param name URI passed to constructor.
 * @param cfg Configuration passed to constructor.
 * @throws IOException If initialization failed.
 */
@SuppressWarnings("ConstantConditions")
private void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();
    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);
        A.notNull(name, "name");
        A.notNull(cfg, "cfg");
        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']');
        uriAuthority = name.getAuthority();
        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;
        // In Ignite replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);
        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
        File logDirFile = U.resolveIgnitePath(logDirCfg);
        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
        rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
        grpBlockSize = handshake.blockSize();
        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);
            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();
    } finally {
        leaveBusy();
    }
}
Also used : IgfsHandshakeResponse(org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse) HadoopIgfsWrapper(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsWrapper) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) IgfsFile(org.apache.ignite.igfs.IgfsFile) File(java.io.File)

Example 2 with HadoopIgfsWrapper

use of org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsWrapper in project ignite by apache.

the class IgniteHadoopFileSystem method initialize.

/**
 * {@inheritDoc}
 */
@SuppressWarnings("ConstantConditions")
@Override
public void initialize(URI name, Configuration cfg) throws IOException {
    enterBusy();
    try {
        if (rmtClient != null)
            throw new IOException("File system is already initialized: " + rmtClient);
        A.notNull(name, "name");
        A.notNull(cfg, "cfg");
        super.initialize(name, cfg);
        setConf(cfg);
        if (!IGFS_SCHEME.equals(name.getScheme()))
            throw new IOException("Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']');
        uri = name;
        uriAuthority = uri.getAuthority();
        user = getFsHadoopUser();
        // Override sequential reads before prefetch if needed.
        seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0);
        if (seqReadsBeforePrefetch > 0)
            seqReadsBeforePrefetchOverride = true;
        // In Ignite replication factor is controlled by data cache affinity.
        // We use replication factor to force the whole file to be stored on local node.
        dfltReplication = (short) cfg.getInt("dfs.replication", 3);
        // Get file colocation control flag.
        colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false);
        preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false);
        // Get log directory.
        String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR);
        File logDirFile = U.resolveIgnitePath(logDirCfg);
        String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null;
        rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user);
        // Handshake.
        IgfsHandshakeResponse handshake = rmtClient.handshake(logDir);
        igfsGrpBlockSize = handshake.blockSize();
        // Initialize client logger.
        Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false);
        if (handshake.sampling() != null ? handshake.sampling() : logEnabled) {
            // Initiate client logger.
            if (logDir == null)
                throw new IOException("Failed to resolve log directory: " + logDirCfg);
            Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE);
            clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize);
        } else
            clientLog = IgfsLogger.disabledLogger();
        // set working directory to the home directory of the current Fs user:
        setWorkingDirectory(null);
    } finally {
        leaveBusy();
    }
}
Also used : IgfsHandshakeResponse(org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse) HadoopIgfsWrapper(org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsWrapper) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) IgfsFile(org.apache.ignite.igfs.IgfsFile) File(java.io.File)

Aggregations

File (java.io.File)2 IOException (java.io.IOException)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 IgfsFile (org.apache.ignite.igfs.IgfsFile)2 HadoopIgfsWrapper (org.apache.ignite.internal.processors.hadoop.impl.igfs.HadoopIgfsWrapper)2 IgfsHandshakeResponse (org.apache.ignite.internal.processors.igfs.IgfsHandshakeResponse)2