Search in sources :

Example 51 with StringTokenizer

use of java.util.StringTokenizer in project hadoop by apache.

the class DFSCIOTest method analyzeResult.

private static void analyzeResult(FileSystem fs, int testType, long execTime, String resFileName) throws IOException {
    Path reduceFile;
    if (testType == TEST_TYPE_WRITE)
        reduceFile = new Path(WRITE_DIR, "part-00000");
    else
        reduceFile = new Path(READ_DIR, "part-00000");
    DataInputStream in;
    in = new DataInputStream(fs.open(reduceFile));
    BufferedReader lines;
    lines = new BufferedReader(new InputStreamReader(in));
    long tasks = 0;
    long size = 0;
    long time = 0;
    float rate = 0;
    float sqrate = 0;
    String line;
    while ((line = lines.readLine()) != null) {
        StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
        String attr = tokens.nextToken();
        if (attr.endsWith(":tasks"))
            tasks = Long.parseLong(tokens.nextToken());
        else if (attr.endsWith(":size"))
            size = Long.parseLong(tokens.nextToken());
        else if (attr.endsWith(":time"))
            time = Long.parseLong(tokens.nextToken());
        else if (attr.endsWith(":rate"))
            rate = Float.parseFloat(tokens.nextToken());
        else if (attr.endsWith(":sqrate"))
            sqrate = Float.parseFloat(tokens.nextToken());
    }
    double med = rate / 1000 / tasks;
    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med * med));
    String[] resultLines = { "----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" : (testType == TEST_TYPE_READ) ? "read" : "unknown"), "           Date & time: " + new Date(System.currentTimeMillis()), "       Number of files: " + tasks, "Total MBytes processed: " + size / MEGA, "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA), "Average IO rate mb/sec: " + med, " Std IO rate deviation: " + stdDev, "    Test exec time sec: " + (float) execTime / 1000, "" };
    PrintStream res = new PrintStream(new FileOutputStream(new File(resFileName), true));
    for (int i = 0; i < resultLines.length; i++) {
        LOG.info(resultLines[i]);
        res.println(resultLines[i]);
    }
}
Also used : PrintStream(java.io.PrintStream) InputStreamReader(java.io.InputStreamReader) DataInputStream(java.io.DataInputStream) Date(java.util.Date) StringTokenizer(java.util.StringTokenizer) FileOutputStream(java.io.FileOutputStream) BufferedReader(java.io.BufferedReader) SequenceFile(org.apache.hadoop.io.SequenceFile) File(java.io.File)

Example 52 with StringTokenizer

use of java.util.StringTokenizer in project hadoop by apache.

the class TestJobHistoryParsing method checkHistoryParsing.

private void checkHistoryParsing(final int numMaps, final int numReduces, final int numSuccessfulMaps) throws Exception {
    Configuration conf = new Configuration();
    conf.set(MRJobConfig.USER_NAME, System.getProperty("user.name"));
    long amStartTimeEst = System.currentTimeMillis();
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, MyResolver.class, DNSToSwitchMapping.class);
    RackResolver.init(conf);
    MRApp app = new MRAppWithHistory(numMaps, numReduces, true, this.getClass().getName(), true);
    app.submit(conf);
    Job job = app.getContext().getAllJobs().values().iterator().next();
    JobId jobId = job.getID();
    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
    app.waitForState(job, JobState.SUCCEEDED);
    // make sure all events are flushed
    app.waitForState(Service.STATE.STOPPED);
    String jobhistoryDir = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
    FileContext fc = null;
    try {
        fc = FileContext.getFileContext(conf);
    } catch (IOException ioe) {
        LOG.info("Can not get FileContext", ioe);
        throw (new Exception("Can not get File Context"));
    }
    if (numMaps == numSuccessfulMaps) {
        String summaryFileName = JobHistoryUtils.getIntermediateSummaryFileName(jobId);
        Path summaryFile = new Path(jobhistoryDir, summaryFileName);
        String jobSummaryString = getJobSummary(fc, summaryFile);
        Assert.assertNotNull(jobSummaryString);
        Assert.assertTrue(jobSummaryString.contains("resourcesPerMap=100"));
        Assert.assertTrue(jobSummaryString.contains("resourcesPerReduce=100"));
        Map<String, String> jobSummaryElements = new HashMap<String, String>();
        StringTokenizer strToken = new StringTokenizer(jobSummaryString, ",");
        while (strToken.hasMoreTokens()) {
            String keypair = strToken.nextToken();
            jobSummaryElements.put(keypair.split("=")[0], keypair.split("=")[1]);
        }
        Assert.assertEquals("JobId does not match", jobId.toString(), jobSummaryElements.get("jobId"));
        Assert.assertEquals("JobName does not match", "test", jobSummaryElements.get("jobName"));
        Assert.assertTrue("submitTime should not be 0", Long.parseLong(jobSummaryElements.get("submitTime")) != 0);
        Assert.assertTrue("launchTime should not be 0", Long.parseLong(jobSummaryElements.get("launchTime")) != 0);
        Assert.assertTrue("firstMapTaskLaunchTime should not be 0", Long.parseLong(jobSummaryElements.get("firstMapTaskLaunchTime")) != 0);
        Assert.assertTrue("firstReduceTaskLaunchTime should not be 0", Long.parseLong(jobSummaryElements.get("firstReduceTaskLaunchTime")) != 0);
        Assert.assertTrue("finishTime should not be 0", Long.parseLong(jobSummaryElements.get("finishTime")) != 0);
        Assert.assertEquals("Mismatch in num map slots", numSuccessfulMaps, Integer.parseInt(jobSummaryElements.get("numMaps")));
        Assert.assertEquals("Mismatch in num reduce slots", numReduces, Integer.parseInt(jobSummaryElements.get("numReduces")));
        Assert.assertEquals("User does not match", System.getProperty("user.name"), jobSummaryElements.get("user"));
        Assert.assertEquals("Queue does not match", "default", jobSummaryElements.get("queue"));
        Assert.assertEquals("Status does not match", "SUCCEEDED", jobSummaryElements.get("status"));
    }
    JobHistory jobHistory = new JobHistory();
    jobHistory.init(conf);
    HistoryFileInfo fileInfo = jobHistory.getJobFileInfo(jobId);
    JobInfo jobInfo;
    long numFinishedMaps;
    synchronized (fileInfo) {
        Path historyFilePath = fileInfo.getHistoryFile();
        FSDataInputStream in = null;
        LOG.info("JobHistoryFile is: " + historyFilePath);
        try {
            in = fc.open(fc.makeQualified(historyFilePath));
        } catch (IOException ioe) {
            LOG.info("Can not open history file: " + historyFilePath, ioe);
            throw (new Exception("Can not open History File"));
        }
        JobHistoryParser parser = new JobHistoryParser(in);
        final EventReader realReader = new EventReader(in);
        EventReader reader = Mockito.mock(EventReader.class);
        if (numMaps == numSuccessfulMaps) {
            reader = realReader;
        } else {
            // Hack!
            final AtomicInteger numFinishedEvents = new AtomicInteger(0);
            Mockito.when(reader.getNextEvent()).thenAnswer(new Answer<HistoryEvent>() {

                public HistoryEvent answer(InvocationOnMock invocation) throws IOException {
                    HistoryEvent event = realReader.getNextEvent();
                    if (event instanceof TaskFinishedEvent) {
                        numFinishedEvents.incrementAndGet();
                    }
                    if (numFinishedEvents.get() <= numSuccessfulMaps) {
                        return event;
                    } else {
                        throw new IOException("test");
                    }
                }
            });
        }
        jobInfo = parser.parse(reader);
        numFinishedMaps = computeFinishedMaps(jobInfo, numMaps, numSuccessfulMaps);
        if (numFinishedMaps != numMaps) {
            Exception parseException = parser.getParseException();
            Assert.assertNotNull("Didn't get expected parse exception", parseException);
        }
    }
    Assert.assertEquals("Incorrect username ", System.getProperty("user.name"), jobInfo.getUsername());
    Assert.assertEquals("Incorrect jobName ", "test", jobInfo.getJobname());
    Assert.assertEquals("Incorrect queuename ", "default", jobInfo.getJobQueueName());
    Assert.assertEquals("incorrect conf path", "test", jobInfo.getJobConfPath());
    Assert.assertEquals("incorrect finishedMap ", numSuccessfulMaps, numFinishedMaps);
    Assert.assertEquals("incorrect finishedReduces ", numReduces, jobInfo.getFinishedReduces());
    Assert.assertEquals("incorrect uberized ", job.isUber(), jobInfo.getUberized());
    Map<TaskID, TaskInfo> allTasks = jobInfo.getAllTasks();
    int totalTasks = allTasks.size();
    Assert.assertEquals("total number of tasks is incorrect  ", (numMaps + numReduces), totalTasks);
    // Verify aminfo
    Assert.assertEquals(1, jobInfo.getAMInfos().size());
    Assert.assertEquals(MRApp.NM_HOST, jobInfo.getAMInfos().get(0).getNodeManagerHost());
    AMInfo amInfo = jobInfo.getAMInfos().get(0);
    Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
    Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
    Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
    Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId().getApplicationAttemptId());
    Assert.assertTrue(amInfo.getStartTime() <= System.currentTimeMillis() && amInfo.getStartTime() >= amStartTimeEst);
    ContainerId fakeCid = MRApp.newContainerId(-1, -1, -1, -1);
    // Assert at taskAttempt level
    for (TaskInfo taskInfo : allTasks.values()) {
        int taskAttemptCount = taskInfo.getAllTaskAttempts().size();
        Assert.assertEquals("total number of task attempts ", 1, taskAttemptCount);
        TaskAttemptInfo taInfo = taskInfo.getAllTaskAttempts().values().iterator().next();
        Assert.assertNotNull(taInfo.getContainerId());
        // Verify the wrong ctor is not being used. Remove after mrv1 is removed.
        Assert.assertFalse(taInfo.getContainerId().equals(fakeCid));
    }
    // Deep compare Job and JobInfo
    for (Task task : job.getTasks().values()) {
        TaskInfo taskInfo = allTasks.get(TypeConverter.fromYarn(task.getID()));
        Assert.assertNotNull("TaskInfo not found", taskInfo);
        for (TaskAttempt taskAttempt : task.getAttempts().values()) {
            TaskAttemptInfo taskAttemptInfo = taskInfo.getAllTaskAttempts().get(TypeConverter.fromYarn((taskAttempt.getID())));
            Assert.assertNotNull("TaskAttemptInfo not found", taskAttemptInfo);
            Assert.assertEquals("Incorrect shuffle port for task attempt", taskAttempt.getShufflePort(), taskAttemptInfo.getShufflePort());
            if (numMaps == numSuccessfulMaps) {
                Assert.assertEquals(MRApp.NM_HOST, taskAttemptInfo.getHostname());
                Assert.assertEquals(MRApp.NM_PORT, taskAttemptInfo.getPort());
                // Verify rack-name
                Assert.assertEquals("rack-name is incorrect", taskAttemptInfo.getRackname(), RACK_NAME);
            }
        }
    }
    // test output for HistoryViewer
    PrintStream stdps = System.out;
    try {
        System.setOut(new PrintStream(outContent));
        HistoryViewer viewer;
        synchronized (fileInfo) {
            viewer = new HistoryViewer(fc.makeQualified(fileInfo.getHistoryFile()).toString(), conf, true);
        }
        viewer.print();
        for (TaskInfo taskInfo : allTasks.values()) {
            String test = (taskInfo.getTaskStatus() == null ? "" : taskInfo.getTaskStatus()) + " " + taskInfo.getTaskType() + " task list for " + taskInfo.getTaskId().getJobID();
            Assert.assertTrue(outContent.toString().indexOf(test) > 0);
            Assert.assertTrue(outContent.toString().indexOf(taskInfo.getTaskId().toString()) > 0);
        }
    } finally {
        System.setOut(stdps);
    }
}
Also used : MRAppWithHistory(org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory) Task(org.apache.hadoop.mapreduce.v2.app.job.Task) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) TaskInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo) HistoryViewer(org.apache.hadoop.mapreduce.jobhistory.HistoryViewer) JobInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) TaskAttempt(org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt) Job(org.apache.hadoop.mapreduce.v2.app.job.Job) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) MRApp(org.apache.hadoop.mapreduce.v2.app.MRApp) Path(org.apache.hadoop.fs.Path) EventReader(org.apache.hadoop.mapreduce.jobhistory.EventReader) PrintStream(java.io.PrintStream) HistoryFileInfo(org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo) TaskID(org.apache.hadoop.mapreduce.TaskID) IOException(java.io.IOException) HistoryEvent(org.apache.hadoop.mapreduce.jobhistory.HistoryEvent) IOException(java.io.IOException) AMInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo) StringTokenizer(java.util.StringTokenizer) TaskFinishedEvent(org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent) JobHistoryParser(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TaskAttemptInfo(org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FileContext(org.apache.hadoop.fs.FileContext)

Example 53 with StringTokenizer

use of java.util.StringTokenizer in project hadoop by apache.

the class ShellBasedUnixGroupsMapping method resolveFullGroupNames.

/**
   * Split group names into a linked list.
   *
   * @param groupNames a string representing the user's group names
   * @return a linked list of group names
   */
@VisibleForTesting
protected List<String> resolveFullGroupNames(String groupNames) {
    StringTokenizer tokenizer = new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX);
    List<String> groups = new LinkedList<String>();
    while (tokenizer.hasMoreTokens()) {
        groups.add(tokenizer.nextToken());
    }
    return groups;
}
Also used : StringTokenizer(java.util.StringTokenizer) LinkedList(java.util.LinkedList) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 54 with StringTokenizer

use of java.util.StringTokenizer in project hadoop by apache.

the class WebHdfsFileSystem method removeOffsetParam.

/** Remove offset parameter, if there is any, from the url */
static URL removeOffsetParam(final URL url) throws MalformedURLException {
    String query = url.getQuery();
    if (query == null) {
        return url;
    }
    final String lower = StringUtils.toLowerCase(query);
    if (!lower.startsWith(OFFSET_PARAM_PREFIX) && !lower.contains("&" + OFFSET_PARAM_PREFIX)) {
        return url;
    }
    //rebuild query
    StringBuilder b = null;
    for (final StringTokenizer st = new StringTokenizer(query, "&"); st.hasMoreTokens(); ) {
        final String token = st.nextToken();
        if (!StringUtils.toLowerCase(token).startsWith(OFFSET_PARAM_PREFIX)) {
            if (b == null) {
                b = new StringBuilder("?").append(token);
            } else {
                b.append('&').append(token);
            }
        }
    }
    query = b == null ? "" : b.toString();
    final String urlStr = url.toString();
    return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query);
}
Also used : StringTokenizer(java.util.StringTokenizer) URL(java.net.URL)

Example 55 with StringTokenizer

use of java.util.StringTokenizer in project sonarqube by SonarSource.

the class FieldChecks method validateUrl.

/**
     * Checks if a field has a valid url. Four optional variables can be
     * specified to configure url validation.
     *
     * <ul>
     *
     * <li>Variable <code>allow2slashes</code> can be set to <code>true</code>
     * or <code>false</code> to control whether two slashes are allowed -
     * default is <code>false</code> (i.e. two slashes are NOT allowed).</li>
     *
     * <li>Variable <code>nofragments</code> can be set to <code>true</code>
     * or <code>false</code> to control whether fragments are allowed -
     * default is <code>false</code> (i.e. fragments ARE allowed).</li>
     *
     * <li>Variable <code>allowallschemes</code> can be set to
     * <code>true</code> or <code>false</code> to control if all schemes are
     * allowed - default is <code>false</code> (i.e. all schemes are NOT
     * allowed).</li>
     *
     * <li>Variable <code>schemes</code> can be set to a comma delimited list
     * of valid schemes. This value is ignored if <code>allowallschemes</code>
     * is set to <code>true</code>. Default schemes allowed are "http",
     * "https" and "ftp" if this variable is not specified.</li>
     *
     * </ul>
     *
     * @param bean      The bean validation is being performed on.
     * @param va        The <code>ValidatorAction</code> that is currently
     *                  being performed.
     * @param field     The <code>Field</code> object associated with the
     *                  current field being validated.
     * @param errors    The <code>ActionMessages</code> object to add errors
     *                  to if any validation errors occur.
     * @param validator The <code>Validator</code> instance, used to access
     *                  other field values.
     * @param request   Current request object.
     * @return True if valid, false otherwise.
     */
public static boolean validateUrl(Object bean, ValidatorAction va, Field field, ActionMessages errors, Validator validator, HttpServletRequest request) {
    String value = null;
    value = evaluateBean(bean, field);
    if (GenericValidator.isBlankOrNull(value)) {
        return true;
    }
    // Get the options and schemes Vars
    String allowallschemesVar = Resources.getVarValue("allowallschemes", field, validator, request, false);
    boolean allowallschemes = "true".equalsIgnoreCase(allowallschemesVar);
    int options = allowallschemes ? UrlValidator.ALLOW_ALL_SCHEMES : 0;
    String allow2slashesVar = Resources.getVarValue("allow2slashes", field, validator, request, false);
    if ("true".equalsIgnoreCase(allow2slashesVar)) {
        options += UrlValidator.ALLOW_2_SLASHES;
    }
    String nofragmentsVar = Resources.getVarValue("nofragments", field, validator, request, false);
    if ("true".equalsIgnoreCase(nofragmentsVar)) {
        options += UrlValidator.NO_FRAGMENTS;
    }
    String schemesVar = allowallschemes ? null : Resources.getVarValue("schemes", field, validator, request, false);
    // No options or schemes - use GenericValidator as default
    if ((options == 0) && (schemesVar == null)) {
        if (GenericValidator.isUrl(value)) {
            return true;
        } else {
            errors.add(field.getKey(), Resources.getActionMessage(validator, request, va, field));
            return false;
        }
    }
    // Parse comma delimited list of schemes into a String[]
    String[] schemes = null;
    if (schemesVar != null) {
        StringTokenizer st = new StringTokenizer(schemesVar, ",");
        schemes = new String[st.countTokens()];
        int i = 0;
        while (st.hasMoreTokens()) {
            schemes[i++] = st.nextToken().trim();
        }
    }
    // Create UrlValidator and validate with options/schemes
    UrlValidator urlValidator = new UrlValidator(schemes, options);
    if (urlValidator.isValid(value)) {
        return true;
    } else {
        errors.add(field.getKey(), Resources.getActionMessage(validator, request, va, field));
        return false;
    }
}
Also used : StringTokenizer(java.util.StringTokenizer) UrlValidator(org.apache.commons.validator.UrlValidator)

Aggregations

StringTokenizer (java.util.StringTokenizer)4881 ArrayList (java.util.ArrayList)1083 IOException (java.io.IOException)506 File (java.io.File)392 BufferedReader (java.io.BufferedReader)380 HashMap (java.util.HashMap)375 HashSet (java.util.HashSet)263 FileReader (java.io.FileReader)224 List (java.util.List)200 InputStreamReader (java.io.InputStreamReader)191 Map (java.util.Map)152 FileInputStream (java.io.FileInputStream)135 Iterator (java.util.Iterator)114 Set (java.util.Set)114 URL (java.net.URL)108 NoSuchElementException (java.util.NoSuchElementException)90 Properties (java.util.Properties)84 InputStream (java.io.InputStream)83 BufferedWriter (java.io.BufferedWriter)80 FileNotFoundException (java.io.FileNotFoundException)78