Search in sources :

Example 81 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TestGlobbedCopyListing method touchFile.

private static void touchFile(String path) throws Exception {
    FileSystem fileSystem = null;
    DataOutputStream outputStream = null;
    try {
        fileSystem = cluster.getFileSystem();
        outputStream = fileSystem.create(new Path(path), true, 0);
        recordInExpectedValues(path);
    } finally {
        IOUtils.cleanup(null, fileSystem, outputStream);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DataOutputStream(java.io.DataOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem)

Example 82 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class UnmanagedAMLauncher method launchAM.

public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException {
    Credentials credentials = new Credentials();
    Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId());
    // Service will be empty but that's okay, we are just passing down only
    // AMRMToken down to the real AM which eventually sets the correct
    // service-address.
    credentials.addToken(token.getService(), token);
    File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir")));
    try {
        FileUtil.chmod(tokenFile.getAbsolutePath(), "600");
    } catch (InterruptedException ex) {
        throw new RuntimeException(ex);
    }
    tokenFile.deleteOnExit();
    try (DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true))) {
        credentials.writeTokenStorageToStream(os);
    }
    Map<String, String> env = System.getenv();
    ArrayList<String> envAMList = new ArrayList<String>();
    boolean setClasspath = false;
    for (Map.Entry<String, String> entry : env.entrySet()) {
        String key = entry.getKey();
        String value = entry.getValue();
        if (key.equals("CLASSPATH")) {
            setClasspath = true;
            if (classpath != null) {
                value = value + File.pathSeparator + classpath;
            }
        }
        envAMList.add(key + "=" + value);
    }
    if (!setClasspath && classpath != null) {
        envAMList.add("CLASSPATH=" + classpath);
    }
    ContainerId containerId = ContainerId.newContainerId(attemptId, 0);
    String hostname = InetAddress.getLocalHost().getHostName();
    envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
    envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
    envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
    envAMList.add(Environment.NM_PORT.name() + "=0");
    envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
    envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis());
    envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath());
    String[] envAM = new String[envAMList.size()];
    Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM));
    final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream(), Charset.forName("UTF-8")));
    final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream(), Charset.forName("UTF-8")));
    // read error and input streams as this would free up the buffers
    // free the error stream buffer
    Thread errThread = new Thread() {

        @Override
        public void run() {
            try {
                String line = errReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.err.println(line);
                    line = errReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the error stream", ioe);
            }
        }
    };
    Thread outThread = new Thread() {

        @Override
        public void run() {
            try {
                String line = inReader.readLine();
                while ((line != null) && !isInterrupted()) {
                    System.out.println(line);
                    line = inReader.readLine();
                }
            } catch (IOException ioe) {
                LOG.warn("Error reading the out stream", ioe);
            }
        }
    };
    try {
        errThread.start();
        outThread.start();
    } catch (IllegalStateException ise) {
    }
    // wait for the process to finish and check the exit code
    try {
        int exitCode = amProc.waitFor();
        LOG.info("AM process exited with value: " + exitCode);
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        amCompleted = true;
    }
    try {
        // make sure that the error thread exits
        // on Windows these threads sometimes get stuck and hang the execution
        // timeout and join later after destroying the process.
        errThread.join();
        outThread.join();
        errReader.close();
        inReader.close();
    } catch (InterruptedException ie) {
        LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie);
    } catch (IOException ioe) {
        LOG.warn("Error while closing the error/out stream", ioe);
    }
    amProc.destroy();
}
Also used : InputStreamReader(java.io.InputStreamReader) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) IOException(java.io.IOException) AMRMTokenIdentifier(org.apache.hadoop.yarn.security.AMRMTokenIdentifier) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) FileOutputStream(java.io.FileOutputStream) BufferedReader(java.io.BufferedReader) File(java.io.File) Map(java.util.Map) Credentials(org.apache.hadoop.security.Credentials)

Example 83 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TestTypedBytesWritable method testIO.

public void testIO() throws IOException {
    TypedBytesWritable tbw = new TypedBytesWritable();
    tbw.setValue(12345);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutput dout = new DataOutputStream(baos);
    tbw.write(dout);
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    DataInput din = new DataInputStream(bais);
    TypedBytesWritable readTbw = new TypedBytesWritable();
    readTbw.readFields(din);
    assertEquals(tbw, readTbw);
}
Also used : DataInput(java.io.DataInput) DataOutput(java.io.DataOutput) ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream)

Example 84 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TestSymLink method testSymLink.

@Test(timeout = 120000)
public void testSymLink() throws Exception {
    boolean mayExit = false;
    MiniMRCluster mr = null;
    MiniDFSCluster dfs = null;
    try {
        Configuration conf = new Configuration();
        dfs = new MiniDFSCluster.Builder(conf).build();
        FileSystem fileSys = dfs.getFileSystem();
        String namenode = fileSys.getUri().toString();
        mr = new MiniMRCluster(1, namenode, 3);
        List<String> args = new ArrayList<String>();
        for (Map.Entry<String, String> entry : mr.createJobConf()) {
            args.add("-jobconf");
            args.add(entry.getKey() + "=" + entry.getValue());
        }
        // During tests, the default Configuration will use a local mapred
        // So don't specify -config or -cluster
        String[] argv = new String[] { "-input", INPUT_FILE, "-output", OUTPUT_DIR, "-mapper", map, "-reducer", reduce, "-jobconf", "stream.tmpdir=" + System.getProperty("test.build.data", "/tmp"), "-jobconf", JobConf.MAPRED_MAP_TASK_JAVA_OPTS + "=" + "-Dcontrib.name=" + System.getProperty("contrib.name") + " " + "-Dbuild.test=" + System.getProperty("build.test") + " " + conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")), "-jobconf", JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS + "=" + "-Dcontrib.name=" + System.getProperty("contrib.name") + " " + "-Dbuild.test=" + System.getProperty("build.test") + " " + conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, conf.get(JobConf.MAPRED_TASK_JAVA_OPTS, "")), "-cacheFile", fileSys.getUri() + CACHE_FILE + "#testlink", "-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR };
        for (String arg : argv) {
            args.add(arg);
        }
        argv = args.toArray(new String[args.size()]);
        fileSys.delete(new Path(OUTPUT_DIR), true);
        DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
        file.writeBytes(mapString);
        file.close();
        file = fileSys.create(new Path(CACHE_FILE));
        file.writeBytes(cacheString);
        file.close();
        job = new StreamJob(argv, mayExit);
        job.go();
        fileSys = dfs.getFileSystem();
        String line = null;
        Path[] fileList = FileUtil.stat2Paths(fileSys.listStatus(new Path(OUTPUT_DIR), new Utils.OutputFileUtils.OutputFilesFilter()));
        for (int i = 0; i < fileList.length; i++) {
            System.out.println(fileList[i].toString());
            BufferedReader bread = new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
            line = bread.readLine();
            System.out.println(line);
        }
        assertEquals(cacheString + "\t", line);
    } finally {
        if (dfs != null) {
            dfs.shutdown();
        }
        if (mr != null) {
            mr.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) InputStreamReader(java.io.InputStreamReader) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) Utils(org.apache.hadoop.mapred.Utils) FileSystem(org.apache.hadoop.fs.FileSystem) BufferedReader(java.io.BufferedReader) Map(java.util.Map) Test(org.junit.Test)

Example 85 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TypedBytesMapApp method go.

public void go() throws IOException {
    TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
    TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(System.out));
    Object key = tbinput.readRaw();
    while (key != null) {
        Object value = tbinput.read();
        for (String part : value.toString().split(find)) {
            // write key
            tboutput.write(part);
            // write value
            tboutput.write(1);
        }
        System.err.println("reporter:counter:UserCounters,InputLines,1");
        key = tbinput.readRaw();
    }
    System.out.flush();
}
Also used : TypedBytesInput(org.apache.hadoop.typedbytes.TypedBytesInput) DataOutputStream(java.io.DataOutputStream) TypedBytesOutput(org.apache.hadoop.typedbytes.TypedBytesOutput) DataInputStream(java.io.DataInputStream)

Aggregations

DataOutputStream (java.io.DataOutputStream)2968 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1314 IOException (java.io.IOException)1024 Test (org.junit.Test)633 DataInputStream (java.io.DataInputStream)615 FileOutputStream (java.io.FileOutputStream)427 ByteArrayInputStream (java.io.ByteArrayInputStream)411 File (java.io.File)281 BufferedOutputStream (java.io.BufferedOutputStream)228 UnitTest (org.apache.geode.test.junit.categories.UnitTest)172 URL (java.net.URL)149 InputStreamReader (java.io.InputStreamReader)146 BufferedReader (java.io.BufferedReader)142 Path (org.apache.hadoop.fs.Path)137 DataInput (java.io.DataInput)124 ArrayList (java.util.ArrayList)122 HttpURLConnection (java.net.HttpURLConnection)120 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)117 FileInputStream (java.io.FileInputStream)107 InputStream (java.io.InputStream)107