Search in sources :

Example 41 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project flink by apache.

the class YarnClusterClient method shutdownCluster.

/**
	 * Shuts down the Yarn application
	 */
public void shutdownCluster() {
    if (hasBeenShutDown.getAndSet(true)) {
        return;
    }
    if (!isConnected) {
        throw new IllegalStateException("The cluster has been not been connected to the ApplicationMaster.");
    }
    try {
        Runtime.getRuntime().removeShutdownHook(clientShutdownHook);
    } catch (IllegalStateException e) {
    // we are already in the shutdown hook
    }
    LOG.info("Sending shutdown request to the Application Master");
    try {
        Future<Object> response = Patterns.ask(applicationClient.get(), new YarnMessages.LocalStopYarnSession(getApplicationStatus(), "Flink YARN Client requested shutdown"), new Timeout(akkaDuration));
        Await.ready(response, akkaDuration);
    } catch (Exception e) {
        LOG.warn("Error while stopping YARN cluster.", e);
    }
    try {
        File propertiesFile = FlinkYarnSessionCli.getYarnPropertiesLocation(flinkConfig);
        if (propertiesFile.isFile()) {
            if (propertiesFile.delete()) {
                LOG.info("Deleted Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString());
            } else {
                LOG.warn("Couldn't delete Yarn properties file at {}", propertiesFile.getAbsoluteFile().toString());
            }
        }
    } catch (Exception e) {
        LOG.warn("Exception while deleting the JobManager address file", e);
    }
    if (sessionFilesDir != null) {
        LOG.info("Deleting files in " + sessionFilesDir);
        try {
            FileSystem shutFS = FileSystem.get(hadoopConfig);
            // delete conf and jar file.
            shutFS.delete(sessionFilesDir, true);
            shutFS.close();
        } catch (IOException e) {
            LOG.error("Could not delete the Flink jar and configuration files in HDFS..", e);
        }
    } else {
        LOG.warn("Session file directory not set. Not deleting session files");
    }
    try {
        pollingRunner.stopRunner();
        pollingRunner.join(1000);
    } catch (InterruptedException e) {
        LOG.warn("Shutdown of the polling runner was interrupted", e);
        Thread.currentThread().interrupt();
    }
    try {
        ApplicationReport appReport = yarnClient.getApplicationReport(appId);
        LOG.info("Application " + appId + " finished with state " + appReport.getYarnApplicationState() + " and final state " + appReport.getFinalApplicationStatus() + " at " + appReport.getFinishTime());
        if (appReport.getYarnApplicationState() == YarnApplicationState.FAILED || appReport.getYarnApplicationState() == YarnApplicationState.KILLED) {
            LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics());
            LOG.warn("If log aggregation is activated in the Hadoop cluster, we recommend to retrieve " + "the full application log using this command:" + System.lineSeparator() + "\tyarn logs -applicationId " + appReport.getApplicationId() + System.lineSeparator() + "(It sometimes takes a few seconds until the logs are aggregated)");
        }
    } catch (Exception e) {
        LOG.warn("Couldn't get final report", e);
    }
    LOG.info("YARN Client is shutting down");
    // actorRunner is using the yarnClient.
    yarnClient.stop();
    // set null to clearly see if somebody wants to access it afterwards.
    yarnClient = null;
}
Also used : ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) Timeout(akka.util.Timeout) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) File(java.io.File) ProgramInvocationException(org.apache.flink.client.program.ProgramInvocationException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) IOException(java.io.IOException)

Example 42 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class Configuration method getLocalPath.

/** 
   * Get a local file under a directory named by <i>dirsProp</i> with
   * the given <i>path</i>.  If <i>dirsProp</i> contains multiple directories,
   * then one is chosen based on <i>path</i>'s hash code.  If the selected
   * directory does not exist, an attempt is made to create it.
   * 
   * @param dirsProp directory in which to locate the file.
   * @param path file-path.
   * @return local file under the directory with the given path.
   */
public Path getLocalPath(String dirsProp, String path) throws IOException {
    String[] dirs = getTrimmedStrings(dirsProp);
    int hashCode = path.hashCode();
    FileSystem fs = FileSystem.getLocal(this);
    for (int i = 0; i < dirs.length; i++) {
        // try each local dir
        int index = (hashCode + i & Integer.MAX_VALUE) % dirs.length;
        Path file = new Path(dirs[index], path);
        Path dir = file.getParent();
        if (fs.mkdirs(dir) || fs.exists(dir)) {
            return file;
        }
    }
    LOG.warn("Could not make " + path + " in local directories from " + dirsProp);
    for (int i = 0; i < dirs.length; i++) {
        int index = (hashCode + i & Integer.MAX_VALUE) % dirs.length;
        LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]);
    }
    throw new IOException("No valid local directories in property: " + dirsProp);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 43 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestKeyProviderFactory method testJksProvider.

@Test
public void testJksProvider() throws Exception {
    Configuration conf = new Configuration();
    final Path jksPath = new Path(testRootDir.toString(), "test.jks");
    final String ourUrl = JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
    File file = new File(testRootDir, "test.jks");
    file.delete();
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl);
    checkSpecificProvider(conf, ourUrl);
    // START : Test flush error by failure injection
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(JavaKeyStoreProvider.SCHEME_NAME, FailureInjectingJavaKeyStoreProvider.SCHEME_NAME));
    // get a new instance of the provider to ensure it was saved correctly
    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
    // inject failure during keystore write
    FailureInjectingJavaKeyStoreProvider fProvider = (FailureInjectingJavaKeyStoreProvider) provider;
    fProvider.setWriteFail(true);
    provider.createKey("key5", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
    assertNotNull(provider.getCurrentKey("key5"));
    try {
        provider.flush();
        Assert.fail("Should not succeed");
    } catch (Exception e) {
    // Ignore
    }
    // SHould be reset to pre-flush state
    Assert.assertNull(provider.getCurrentKey("key5"));
    // Un-inject last failure and
    // inject failure during keystore backup
    fProvider.setWriteFail(false);
    fProvider.setBackupFail(true);
    provider.createKey("key6", new byte[] { 1 }, KeyProvider.options(conf).setBitLength(8));
    assertNotNull(provider.getCurrentKey("key6"));
    try {
        provider.flush();
        Assert.fail("Should not succeed");
    } catch (Exception e) {
    // Ignore
    }
    // SHould be reset to pre-flush state
    Assert.assertNull(provider.getCurrentKey("key6"));
    // END : Test flush error by failure injection
    conf.set(KeyProviderFactory.KEY_PROVIDER_PATH, ourUrl.replace(FailureInjectingJavaKeyStoreProvider.SCHEME_NAME, JavaKeyStoreProvider.SCHEME_NAME));
    Path path = ProviderUtils.unnestUri(new URI(ourUrl));
    FileSystem fs = path.getFileSystem(conf);
    FileStatus s = fs.getFileStatus(path);
    assertTrue(s.getPermission().toString().equals("rw-------"));
    assertTrue(file + " should exist", file.isFile());
    // Corrupt file and Check if JKS can reload from _OLD file
    File oldFile = new File(file.getPath() + "_OLD");
    file.renameTo(oldFile);
    file.delete();
    file.createNewFile();
    assertTrue(oldFile.exists());
    provider = KeyProviderFactory.getProviders(conf).get(0);
    assertTrue(file.exists());
    assertTrue(oldFile + "should be deleted", !oldFile.exists());
    verifyAfterReload(file, provider);
    assertTrue(!oldFile.exists());
    // _NEW and current file should not exist together
    File newFile = new File(file.getPath() + "_NEW");
    newFile.createNewFile();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.fail("_NEW and current file should not exist together !!");
    } catch (Exception e) {
    // Ignore
    } finally {
        if (newFile.exists()) {
            newFile.delete();
        }
    }
    // Load from _NEW file
    file.renameTo(newFile);
    file.delete();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.assertFalse(newFile.exists());
        Assert.assertFalse(oldFile.exists());
    } catch (Exception e) {
        Assert.fail("JKS should load from _NEW file !!");
    // Ignore
    }
    verifyAfterReload(file, provider);
    // _NEW exists but corrupt.. must load from _OLD
    newFile.createNewFile();
    file.renameTo(oldFile);
    file.delete();
    try {
        provider = KeyProviderFactory.getProviders(conf).get(0);
        Assert.assertFalse(newFile.exists());
        Assert.assertFalse(oldFile.exists());
    } catch (Exception e) {
        Assert.fail("JKS should load from _OLD file !!");
    // Ignore
    } finally {
        if (newFile.exists()) {
            newFile.delete();
        }
    }
    verifyAfterReload(file, provider);
    // check permission retention after explicit change
    fs.setPermission(path, new FsPermission("777"));
    checkPermissionRetention(conf, ourUrl, path);
    // Check that an uppercase keyname results in an error
    provider = KeyProviderFactory.getProviders(conf).get(0);
    try {
        provider.createKey("UPPERCASE", KeyProvider.options(conf));
        Assert.fail("Expected failure on creating key name with uppercase " + "characters");
    } catch (IllegalArgumentException e) {
        GenericTestUtils.assertExceptionContains("Uppercase key names", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) File(java.io.File) URI(java.net.URI) IOException(java.io.IOException) Test(org.junit.Test)

Example 44 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project hadoop by apache.

the class TestKeyProviderFactory method checkPermissionRetention.

public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
    KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
    // let's add a new key and flush and check that permissions are still set to 777
    byte[] key = new byte[16];
    for (int i = 0; i < key.length; ++i) {
        key[i] = (byte) i;
    }
    // create a new key
    try {
        provider.createKey("key5", key, KeyProvider.options(conf));
    } catch (Exception e) {
        e.printStackTrace();
        throw e;
    }
    provider.flush();
    // get a new instance of the provider to ensure it was saved correctly
    provider = KeyProviderFactory.getProviders(conf).get(0);
    assertArrayEquals(key, provider.getCurrentKey("key5").getMaterial());
    FileSystem fs = path.getFileSystem(conf);
    FileStatus s = fs.getFileStatus(path);
    assertTrue("Permissions should have been retained from the preexisting keystore.", s.getPermission().toString().equals("rwxrwxrwx"));
}
Also used : FileStatus(org.apache.hadoop.fs.FileStatus) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException)

Example 45 with FileSystem

use of org.apache.hadoop.fs.FileSystem in project flink by apache.

the class HadoopIOFormatsITCase method preSubmit.

@Override
protected void preSubmit() throws Exception {
    resultPath = new String[] { getTempDirPath("result0"), getTempDirPath("result1") };
    File sequenceFile = createAndRegisterTempFile("seqFile");
    sequenceFileInPath = sequenceFile.toURI().toString();
    // Create a sequence file
    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    FileSystem fs = FileSystem.get(URI.create(sequenceFile.getAbsolutePath()), conf);
    Path path = new Path(sequenceFile.getAbsolutePath());
    //  ------------------ Long / Text Key Value pair: ------------
    int kvCount = 4;
    LongWritable key = new LongWritable();
    Text value = new Text();
    SequenceFile.Writer writer = null;
    try {
        writer = SequenceFile.createWriter(fs, conf, path, key.getClass(), value.getClass());
        for (int i = 0; i < kvCount; i++) {
            if (i == 1) {
                // write key = 0 a bit more often.
                for (int a = 0; a < 15; a++) {
                    key.set(i);
                    value.set(i + " - somestring");
                    writer.append(key, value);
                }
            }
            key.set(i);
            value.set(i + " - somestring");
            writer.append(key, value);
        }
    } finally {
        IOUtils.closeStream(writer);
    }
    //  ------------------ Long / Text Key Value pair: ------------
    File sequenceFileNull = createAndRegisterTempFile("seqFileNullKey");
    sequenceFileInPathNull = sequenceFileNull.toURI().toString();
    path = new Path(sequenceFileInPathNull);
    LongWritable value1 = new LongWritable();
    SequenceFile.Writer writer1 = null;
    try {
        writer1 = SequenceFile.createWriter(fs, conf, path, NullWritable.class, value1.getClass());
        for (int i = 0; i < kvCount; i++) {
            value1.set(i);
            writer1.append(NullWritable.get(), value1);
        }
    } finally {
        IOUtils.closeStream(writer1);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.flink.configuration.Configuration) Text(org.apache.hadoop.io.Text) NullWritable(org.apache.hadoop.io.NullWritable) SequenceFile(org.apache.hadoop.io.SequenceFile) FileSystem(org.apache.hadoop.fs.FileSystem) LongWritable(org.apache.hadoop.io.LongWritable) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File)

Aggregations

FileSystem (org.apache.hadoop.fs.FileSystem)2611 Path (org.apache.hadoop.fs.Path)2199 Test (org.junit.Test)1034 Configuration (org.apache.hadoop.conf.Configuration)890 IOException (java.io.IOException)757 FileStatus (org.apache.hadoop.fs.FileStatus)419 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)264 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)227 ArrayList (java.util.ArrayList)208 File (java.io.File)181 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)165 JobConf (org.apache.hadoop.mapred.JobConf)163 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)151 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)145 URI (java.net.URI)135 SequenceFile (org.apache.hadoop.io.SequenceFile)118 Text (org.apache.hadoop.io.Text)112 FileNotFoundException (java.io.FileNotFoundException)102 FsPermission (org.apache.hadoop.fs.permission.FsPermission)94 Job (org.apache.hadoop.mapreduce.Job)81