use of java.util.HashSet in project hadoop by apache.
the class TestStatsDMetrics method testPutMetrics2.
@Test(timeout = 3000)
public void testPutMetrics2() throws IOException {
StatsDSink sink = new StatsDSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Hostname, null));
tags.add(new MetricsTag(MsInfo.Context, "jvm"));
tags.add(new MetricsTag(MsInfo.ProcessName, "process"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1, MetricType.COUNTER));
metrics.add(makeMetric("foo2", 2, MetricType.GAUGE));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
try (DatagramSocket sock = new DatagramSocket()) {
sock.setReceiveBufferSize(8192);
final StatsDSink.StatsD mockStatsD = new StatsD(sock.getLocalAddress().getHostName(), sock.getLocalPort());
Whitebox.setInternalState(sink, "statsd", mockStatsD);
final DatagramPacket p = new DatagramPacket(new byte[8192], 8192);
sink.putMetrics(record);
sock.receive(p);
String result = new String(p.getData(), 0, p.getLength(), Charset.forName("UTF-8"));
assertTrue("Received data did not match data sent", result.equals("process.jvm.Context.foo1:1|c") || result.equals("process.jvm.Context.foo2:2|g"));
} finally {
sink.close();
}
}
use of java.util.HashSet in project hadoop by apache.
the class TestGraphiteMetrics method testPutMetrics.
@Test
public void testPutMetrics() {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1.25));
metrics.add(makeMetric("foo2", 2.25));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
sink.putMetrics(record);
try {
verify(mockGraphite).write(argument.capture());
} catch (IOException e) {
e.printStackTrace();
}
String result = argument.getValue();
assertEquals(true, result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
}
use of java.util.HashSet in project hadoop by apache.
the class TestGraphiteMetrics method testFailureAndPutMetrics.
@Test
public void testFailureAndPutMetrics() throws IOException {
GraphiteSink sink = new GraphiteSink();
List<MetricsTag> tags = new ArrayList<MetricsTag>();
tags.add(new MetricsTag(MsInfo.Context, "all"));
tags.add(new MetricsTag(MsInfo.Hostname, "host"));
Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
metrics.add(makeMetric("foo1", 1.25));
metrics.add(makeMetric("foo2", 2.25));
MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
final GraphiteSink.Graphite mockGraphite = makeGraphite();
Whitebox.setInternalState(sink, "graphite", mockGraphite);
// throw exception when first try
doThrow(new IOException("IO exception")).when(mockGraphite).write(anyString());
sink.putMetrics(record);
verify(mockGraphite).write(anyString());
verify(mockGraphite).close();
// reset mock and try again
reset(mockGraphite);
when(mockGraphite.isConnected()).thenReturn(false);
ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
sink.putMetrics(record);
verify(mockGraphite).write(argument.capture());
String result = argument.getValue();
assertEquals(true, result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") || result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
}
use of java.util.HashSet in project hadoop by apache.
the class TestMiniKdc method testKerberosLogin.
@Test
public void testKerberosLogin() throws Exception {
MiniKdc kdc = getKdc();
File workDir = getWorkDir();
LoginContext loginContext = null;
try {
String principal = "foo";
File keytab = new File(workDir, "foo.keytab");
kdc.createPrincipal(keytab, principal);
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
//client login
Subject subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
loginContext = new LoginContext("", subject, null, KerberosConfiguration.createClientConfig(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
Assert.assertEquals(1, subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class, subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName());
loginContext.logout();
//server login
subject = new Subject(false, principals, new HashSet<Object>(), new HashSet<Object>());
loginContext = new LoginContext("", subject, null, KerberosConfiguration.createServerConfig(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
Assert.assertEquals(1, subject.getPrincipals().size());
Assert.assertEquals(KerberosPrincipal.class, subject.getPrincipals().iterator().next().getClass());
Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName());
loginContext.logout();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
use of java.util.HashSet in project hadoop by apache.
the class DatasetVolumeChecker method checkVolume.
/**
* Check a single volume asynchronously, returning a {@link ListenableFuture}
* that can be used to retrieve the final result.
*
* If the volume cannot be referenced then it is already closed and
* cannot be checked. No error is propagated to the callback.
*
* @param volume the volume that is to be checked.
* @param callback callback to be invoked when the volume check completes.
* @return true if the check was scheduled and the callback will be invoked.
* false otherwise.
*/
public boolean checkVolume(final FsVolumeSpi volume, Callback callback) {
if (volume == null) {
LOG.debug("Cannot schedule check on null volume");
return false;
}
FsVolumeReference volumeReference;
try {
volumeReference = volume.obtainReference();
} catch (ClosedChannelException e) {
// The volume has already been closed.
return false;
}
Optional<ListenableFuture<VolumeCheckResult>> olf = delegateChecker.schedule(volume, IGNORED_CONTEXT);
if (olf.isPresent()) {
numVolumeChecks.incrementAndGet();
Futures.addCallback(olf.get(), new ResultHandler(volumeReference, new HashSet<>(), new HashSet<>(), new AtomicLong(1), callback));
return true;
} else {
IOUtils.cleanup(null, volumeReference);
}
return false;
}
Aggregations