Search in sources :

Example 1 with MutableCounterLong

use of org.apache.hadoop.metrics2.lib.MutableCounterLong in project hadoop by apache.

the class TestEntityGroupFSTimelineStore method testPluginRead.

@Test
public void testPluginRead() throws Exception {
    // Verify precondition
    assertEquals(EntityGroupPlugInForTest.class.getName(), store.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSES));
    List<TimelineEntityGroupPlugin> currPlugins = store.getPlugins();
    for (TimelineEntityGroupPlugin plugin : currPlugins) {
        ClassLoader pluginClassLoader = plugin.getClass().getClassLoader();
        assertTrue("Should set up ApplicationClassLoader", pluginClassLoader instanceof ApplicationClassLoader);
        URL[] paths = ((URLClassLoader) pluginClassLoader).getURLs();
        boolean foundJAR = false;
        for (URL path : paths) {
            if (path.toString().contains(testJar.getAbsolutePath())) {
                foundJAR = true;
            }
        }
        assertTrue("Not found path " + testJar.getAbsolutePath() + " for plugin " + plugin.getClass().getName(), foundJAR);
    }
    // Load data and cache item, prepare timeline store by making a cache item
    EntityGroupFSTimelineStore.AppLogs appLogs = store.new AppLogs(mainTestAppId, mainTestAppDirPath, AppState.COMPLETED);
    EntityCacheItem cacheItem = new EntityCacheItem(EntityGroupPlugInForTest.getStandardTimelineGroupId(mainTestAppId), config);
    cacheItem.setAppLogs(appLogs);
    store.setCachedLogs(EntityGroupPlugInForTest.getStandardTimelineGroupId(mainTestAppId), cacheItem);
    MutableCounterLong detailLogEntityRead = store.metrics.getGetEntityToDetailOps();
    MutableStat cacheRefresh = store.metrics.getCacheRefresh();
    long numEntityReadBefore = detailLogEntityRead.value();
    long cacheRefreshBefore = cacheRefresh.lastStat().numSamples();
    // Generate TDM
    TimelineDataManager tdm = PluginStoreTestUtils.getTdmWithStore(config, store);
    // Verify single entity read
    TimelineEntity entity3 = tdm.getEntity("type_3", mainTestAppId.toString(), EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
    assertNotNull(entity3);
    assertEquals(entityNew.getStartTime(), entity3.getStartTime());
    // Verify multiple entities read
    NameValuePair primaryFilter = new NameValuePair(EntityGroupPlugInForTest.APP_ID_FILTER_NAME, mainTestAppId.toString());
    TimelineEntities entities = tdm.getEntities("type_3", primaryFilter, null, null, null, null, null, null, EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
    assertEquals(1, entities.getEntities().size());
    for (TimelineEntity entity : entities.getEntities()) {
        assertEquals(entityNew.getStartTime(), entity.getStartTime());
    }
    // Verify metrics
    assertEquals(numEntityReadBefore + 2L, detailLogEntityRead.value());
    assertEquals(cacheRefreshBefore + 1L, cacheRefresh.lastStat().numSamples());
}
Also used : MutableCounterLong(org.apache.hadoop.metrics2.lib.MutableCounterLong) MutableStat(org.apache.hadoop.metrics2.lib.MutableStat) TimelineEntity(org.apache.hadoop.yarn.api.records.timeline.TimelineEntity) ApplicationClassLoader(org.apache.hadoop.util.ApplicationClassLoader) URL(java.net.URL) TimelineEntities(org.apache.hadoop.yarn.api.records.timeline.TimelineEntities) URLClassLoader(java.net.URLClassLoader) ApplicationClassLoader(org.apache.hadoop.util.ApplicationClassLoader) URLClassLoader(java.net.URLClassLoader) Test(org.junit.Test)

Example 2 with MutableCounterLong

use of org.apache.hadoop.metrics2.lib.MutableCounterLong in project hadoop by apache.

the class TestEntityGroupFSTimelineStore method testCleanLogs.

@Test
public void testCleanLogs() throws Exception {
    // Create test dirs and files
    // Irrelevant file, should not be reclaimed
    String appDirName = mainTestAppId.toString();
    String attemptDirName = ApplicationAttemptId.appAttemptIdStrPrefix + appDirName + "_1";
    Path irrelevantFilePath = new Path(testDoneDirPath, "irrelevant.log");
    FSDataOutputStream stream = fs.create(irrelevantFilePath);
    stream.close();
    // Irrelevant directory, should not be reclaimed
    Path irrelevantDirPath = new Path(testDoneDirPath, "irrelevant");
    fs.mkdirs(irrelevantDirPath);
    Path doneAppHomeDir = new Path(new Path(testDoneDirPath, "0000"), "001");
    // First application, untouched after creation
    Path appDirClean = new Path(doneAppHomeDir, appDirName);
    Path attemptDirClean = new Path(appDirClean, attemptDirName);
    fs.mkdirs(attemptDirClean);
    Path filePath = new Path(attemptDirClean, "test.log");
    stream = fs.create(filePath);
    stream.close();
    // Second application, one file touched after creation
    Path appDirHoldByFile = new Path(doneAppHomeDir, appDirName + "1");
    Path attemptDirHoldByFile = new Path(appDirHoldByFile, attemptDirName);
    fs.mkdirs(attemptDirHoldByFile);
    Path filePathHold = new Path(attemptDirHoldByFile, "test1.log");
    stream = fs.create(filePathHold);
    stream.close();
    // Third application, one dir touched after creation
    Path appDirHoldByDir = new Path(doneAppHomeDir, appDirName + "2");
    Path attemptDirHoldByDir = new Path(appDirHoldByDir, attemptDirName);
    fs.mkdirs(attemptDirHoldByDir);
    Path dirPathHold = new Path(attemptDirHoldByDir, "hold");
    fs.mkdirs(dirPathHold);
    // Fourth application, empty dirs
    Path appDirEmpty = new Path(doneAppHomeDir, appDirName + "3");
    Path attemptDirEmpty = new Path(appDirEmpty, attemptDirName);
    fs.mkdirs(attemptDirEmpty);
    Path dirPathEmpty = new Path(attemptDirEmpty, "empty");
    fs.mkdirs(dirPathEmpty);
    // Should retain all logs after this run
    MutableCounterLong dirsCleaned = store.metrics.getLogsDirsCleaned();
    long before = dirsCleaned.value();
    store.cleanLogs(testDoneDirPath, fs, 10000);
    assertTrue(fs.exists(irrelevantDirPath));
    assertTrue(fs.exists(irrelevantFilePath));
    assertTrue(fs.exists(filePath));
    assertTrue(fs.exists(filePathHold));
    assertTrue(fs.exists(dirPathHold));
    assertTrue(fs.exists(dirPathEmpty));
    // Make sure the created dir is old enough
    Thread.sleep(2000);
    // Touch the second application
    stream = fs.append(filePathHold);
    stream.writeBytes("append");
    stream.close();
    // Touch the third application by creating a new dir
    fs.mkdirs(new Path(dirPathHold, "holdByMe"));
    store.cleanLogs(testDoneDirPath, fs, 1000);
    // Verification after the second cleaner call
    assertTrue(fs.exists(irrelevantDirPath));
    assertTrue(fs.exists(irrelevantFilePath));
    assertTrue(fs.exists(filePathHold));
    assertTrue(fs.exists(dirPathHold));
    assertTrue(fs.exists(doneAppHomeDir));
    // appDirClean and appDirEmpty should be cleaned up
    assertFalse(fs.exists(appDirClean));
    assertFalse(fs.exists(appDirEmpty));
    assertEquals(before + 2L, dirsCleaned.value());
}
Also used : Path(org.apache.hadoop.fs.Path) MutableCounterLong(org.apache.hadoop.metrics2.lib.MutableCounterLong) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 3 with MutableCounterLong

use of org.apache.hadoop.metrics2.lib.MutableCounterLong in project hadoop by apache.

the class S3AInstrumentation method streamCounter.

/**
   * Create a counter in the stream map: these are unregistered in the public
   * metrics.
   * @param name counter name
   * @param desc counter description
   * @return a new counter
   */
protected final MutableCounterLong streamCounter(String name, String desc) {
    MutableCounterLong counter = new MutableCounterLong(Interns.info(name, desc), 0L);
    streamMetrics.put(name, counter);
    return counter;
}
Also used : MutableCounterLong(org.apache.hadoop.metrics2.lib.MutableCounterLong)

Example 4 with MutableCounterLong

use of org.apache.hadoop.metrics2.lib.MutableCounterLong in project hadoop by apache.

the class S3AInstrumentation method dump.

/**
   * Dump all the metrics to a string.
   * @param prefix prefix before every entry
   * @param separator separator between name and value
   * @param suffix suffix
   * @param all get all the metrics even if the values are not changed.
   * @return a string dump of the metrics
   */
public String dump(String prefix, String separator, String suffix, boolean all) {
    MetricStringBuilder metricBuilder = new MetricStringBuilder(null, prefix, separator, suffix);
    registry.snapshot(metricBuilder, all);
    for (Map.Entry<String, MutableCounterLong> entry : streamMetrics.entrySet()) {
        metricBuilder.tuple(entry.getKey(), Long.toString(entry.getValue().value()));
    }
    return metricBuilder.toString();
}
Also used : MetricStringBuilder(org.apache.hadoop.metrics2.MetricStringBuilder) MutableCounterLong(org.apache.hadoop.metrics2.lib.MutableCounterLong) HashMap(java.util.HashMap) Map(java.util.Map)

Example 5 with MutableCounterLong

use of org.apache.hadoop.metrics2.lib.MutableCounterLong in project hadoop by apache.

the class TestRPC method testReaderExceptions.

@Test(timeout = 30000)
public void testReaderExceptions() throws Exception {
    Server server = null;
    TestRpcService proxy = null;
    // will attempt to return this exception from a reader with and w/o
    // the connection closing.
    IOException expectedIOE = new TestReaderException("testing123");
    @SuppressWarnings("serial") IOException rseError = new RpcServerException("keepalive", expectedIOE) {

        @Override
        public RpcStatusProto getRpcStatusProto() {
            return RpcStatusProto.ERROR;
        }
    };
    @SuppressWarnings("serial") IOException rseFatal = new RpcServerException("disconnect", expectedIOE) {

        @Override
        public RpcStatusProto getRpcStatusProto() {
            return RpcStatusProto.FATAL;
        }
    };
    try {
        RPC.Builder builder = newServerBuilder(conf).setQueueSizePerHandler(1).setNumHandlers(1).setVerbose(true);
        server = setupTestServer(builder);
        Whitebox.setInternalState(server, "rpcRequestClass", FakeRequestClass.class);
        MutableCounterLong authMetric = (MutableCounterLong) Whitebox.getInternalState(server.getRpcMetrics(), "rpcAuthorizationSuccesses");
        proxy = getClient(addr, conf);
        boolean isDisconnected = true;
        Connection lastConn = null;
        long expectedAuths = 0;
        // fuzz the client.
        for (int i = 0; i < 128; i++) {
            String reqName = "request[" + i + "]";
            int r = ThreadLocalRandom.current().nextInt();
            final boolean doDisconnect = r % 4 == 0;
            LOG.info("TestDisconnect request[" + i + "] " + " shouldConnect=" + isDisconnected + " willDisconnect=" + doDisconnect);
            if (isDisconnected) {
                expectedAuths++;
            }
            try {
                FakeRequestClass.exception = doDisconnect ? rseFatal : rseError;
                proxy.ping(null, newEmptyRequest());
                fail(reqName + " didn't fail");
            } catch (ServiceException e) {
                RemoteException re = (RemoteException) e.getCause();
                assertEquals(reqName, expectedIOE, re.unwrapRemoteException());
            }
            // check authorizations to ensure new connection when expected,
            // then conclusively determine if connections are disconnected
            // correctly.
            assertEquals(reqName, expectedAuths, authMetric.value());
            if (!doDisconnect) {
                // if it wasn't fatal, verify there's only one open connection.
                Connection[] conns = server.getConnections();
                assertEquals(reqName, 1, conns.length);
                // verify whether the connection should have been reused.
                if (isDisconnected) {
                    assertNotSame(reqName, lastConn, conns[0]);
                } else {
                    assertSame(reqName, lastConn, conns[0]);
                }
                lastConn = conns[0];
            } else if (lastConn != null) {
                // avoid race condition in server where connection may not be
                // fully removed yet.  just make sure it's marked for being closed.
                // the open connection checks above ensure correct behavior.
                assertTrue(reqName, lastConn.shouldClose());
            }
            isDisconnected = doDisconnect;
        }
    } finally {
        stop(server, proxy);
    }
}
Also used : MutableCounterLong(org.apache.hadoop.metrics2.lib.MutableCounterLong) Connection(org.apache.hadoop.ipc.Server.Connection) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) ServiceException(com.google.protobuf.ServiceException) Test(org.junit.Test)

Aggregations

MutableCounterLong (org.apache.hadoop.metrics2.lib.MutableCounterLong)7 Test (org.junit.Test)5 TimelineEntities (org.apache.hadoop.yarn.api.records.timeline.TimelineEntities)2 TimelineEntity (org.apache.hadoop.yarn.api.records.timeline.TimelineEntity)2 ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 InterruptedIOException (java.io.InterruptedIOException)1 URL (java.net.URL)1 URLClassLoader (java.net.URLClassLoader)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 Connection (org.apache.hadoop.ipc.Server.Connection)1 MetricStringBuilder (org.apache.hadoop.metrics2.MetricStringBuilder)1 MutableStat (org.apache.hadoop.metrics2.lib.MutableStat)1 ApplicationClassLoader (org.apache.hadoop.util.ApplicationClassLoader)1