use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class CountersBlock method render.
@Override
protected void render(Block html) {
if (job == null) {
html.p()._("Sorry, no counters for nonexistent", $(JOB_ID, "job"))._();
return;
}
if (!$(TASK_ID).isEmpty() && task == null) {
html.p()._("Sorry, no counters for nonexistent", $(TASK_ID, "task"))._();
return;
}
if (total == null || total.getGroupNames() == null || total.countCounters() == 0) {
String type = $(TASK_ID);
if (type == null || type.isEmpty()) {
type = $(JOB_ID, "the job");
}
html.p()._("Sorry it looks like ", type, " has no counters.")._();
return;
}
String urlBase;
String urlId;
if (task != null) {
urlBase = "singletaskcounter";
urlId = MRApps.toString(task.getID());
} else {
urlBase = "singlejobcounter";
urlId = MRApps.toString(job.getID());
}
int numGroups = 0;
TBODY<TABLE<DIV<Hamlet>>> tbody = html.div(_INFO_WRAP).table("#counters").thead().tr().th(".group.ui-state-default", "Counter Group").th(".ui-state-default", "Counters")._()._().tbody();
for (CounterGroup g : total) {
CounterGroup mg = map == null ? null : map.getGroup(g.getName());
CounterGroup rg = reduce == null ? null : reduce.getGroup(g.getName());
++numGroups;
// This is mostly for demonstration :) Typically we'd introduced
// a CounterGroup block to reduce the verbosity. OTOH, this
// serves as an indicator of where we're in the tag hierarchy.
TR<THEAD<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupHeadRow = tbody.tr().th().$title(g.getName()).$class("ui-state-default")._(fixGroupDisplayName(g.getDisplayName()))._().td().$class(C_TABLE).table(".dt-counters").$id(job.getID() + "." + g.getName()).thead().tr().th(".name", "Name");
if (map != null) {
groupHeadRow.th("Map").th("Reduce");
}
// Ditto
TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.th(map == null ? "Value" : "Total")._()._().tbody();
for (Counter counter : g) {
// Ditto
TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.tr();
if (task == null && mg == null && rg == null) {
groupRow.td().$title(counter.getName())._(counter.getDisplayName())._();
} else {
groupRow.td().$title(counter.getName()).a(url(urlBase, urlId, g.getName(), counter.getName()), counter.getDisplayName())._();
}
if (map != null) {
Counter mc = mg == null ? null : mg.findCounter(counter.getName());
Counter rc = rg == null ? null : rg.findCounter(counter.getName());
groupRow.td(mc == null ? "0" : String.format("%,d", mc.getValue())).td(rc == null ? "0" : String.format("%,d", rc.getValue()));
}
groupRow.td(String.format("%,d", counter.getValue()))._();
}
group._()._()._()._();
}
tbody._()._()._();
}
use of org.apache.hadoop.mapreduce.Counter in project hadoop by apache.
the class SingleCounterBlock method populateMembers.
private void populateMembers(AppContext ctx) {
JobId jobID = null;
TaskId taskID = null;
String tid = $(TASK_ID);
if ($(TITLE).contains("MAPS")) {
counterType = TaskType.MAP;
} else if ($(TITLE).contains("REDUCES")) {
counterType = TaskType.REDUCE;
} else {
counterType = null;
}
if (!tid.isEmpty()) {
taskID = MRApps.toTaskID(tid);
jobID = taskID.getJobId();
} else {
String jid = $(JOB_ID);
if (!jid.isEmpty()) {
jobID = MRApps.toJobID(jid);
}
}
if (jobID == null) {
return;
}
job = ctx.getJob(jobID);
if (job == null) {
return;
}
if (taskID != null) {
task = job.getTask(taskID);
if (task == null) {
return;
}
for (Map.Entry<TaskAttemptId, TaskAttempt> entry : task.getAttempts().entrySet()) {
long value = 0;
Counters counters = entry.getValue().getCounters();
CounterGroup group = (counters != null) ? counters.getGroup($(COUNTER_GROUP)) : null;
if (group != null) {
Counter c = group.findCounter($(COUNTER_NAME));
if (c != null) {
value = c.getValue();
}
}
values.put(MRApps.toString(entry.getKey()), value);
}
return;
}
// Get all types of counters
Map<TaskId, Task> tasks = job.getTasks();
for (Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
long value = 0;
Counters counters = entry.getValue().getCounters();
CounterGroup group = (counters != null) ? counters.getGroup($(COUNTER_GROUP)) : null;
if (group != null) {
Counter c = group.findCounter($(COUNTER_NAME));
if (c != null) {
value = c.getValue();
}
}
if (counterType == null || counterType == entry.getValue().getType()) {
values.put(MRApps.toString(entry.getKey()), value);
}
}
}
use of org.apache.hadoop.mapreduce.Counter in project h2o-2 by h2oai.
the class h2omapper method run2.
private int run2(Context context) throws IOException, InterruptedException {
Configuration conf = context.getConfiguration();
String mapredTaskId = conf.get("mapred.task.id");
Text textId = new Text(mapredTaskId);
emitLogHeader(context, mapredTaskId);
Log.POST(10, "After emitLogHeader");
Counter counter = context.getCounter(H2O_MAPPER_COUNTER.HADOOP_COUNTER_HEARTBEAT);
Thread counterThread = new CounterThread(context, counter);
counterThread.start();
String mapredLocalDir = conf.get("mapred.local.dir");
String ice_root;
if (mapredLocalDir.contains(",")) {
ice_root = mapredLocalDir.split(",")[0];
} else {
ice_root = mapredLocalDir;
}
String jobtrackerName = conf.get(H2O_JOBTRACKERNAME_KEY);
context.write(textId, new Text("mapred.local.dir is " + ice_root));
String driverIp = conf.get(H2O_DRIVER_IP_KEY);
String driverPortString = conf.get(H2O_DRIVER_PORT_KEY);
String network = conf.get(H2O_NETWORK_KEY);
String manyColsString = conf.get(H2O_MANYCOLS_KEY);
String chunkBytesString = conf.get(H2O_CHUNKBITS_KEY);
String dataMaxFactorLevelsString = conf.get(H2O_DATAMAXFACTORLEVELS_KEY);
String nthreadsString = conf.get(H2O_NTHREADS_KEY);
String basePortString = conf.get(H2O_BASE_PORT_KEY);
String betaString = conf.get(H2O_BETA_KEY);
String randomUdpDropString = conf.get(H2O_RANDOM_UDP_DROP_KEY);
String licenseData = conf.get(H2O_LICENSE_DATA_KEY);
String hadoopVersion = conf.get(H2O_HADOOP_VERSION);
String gaOptOut = conf.get(H2O_GA_OPTOUT);
ServerSocket ss = new ServerSocket();
InetSocketAddress sa = new InetSocketAddress("127.0.0.1", 0);
ss.bind(sa);
String localPortString = Integer.toString(ss.getLocalPort());
List<String> argsList = new ArrayList<String>();
// Options used by H2O.
argsList.add("-ice_root");
argsList.add(ice_root);
argsList.add("-name");
argsList.add(jobtrackerName);
argsList.add("-hdfs_skip");
if (network != null) {
if (network.length() > 0) {
argsList.add("-network");
argsList.add(network);
}
}
if (nthreadsString != null) {
if (nthreadsString.length() > 0) {
argsList.add("-nthreads");
int nthreads = Integer.parseInt(nthreadsString);
argsList.add(Integer.toString(nthreads));
}
}
if (basePortString != null) {
if (basePortString.length() > 0) {
argsList.add("-baseport");
int basePort = Integer.parseInt(basePortString);
argsList.add(Integer.toString(basePort));
}
}
if (dataMaxFactorLevelsString != null) {
if (dataMaxFactorLevelsString.length() > 0) {
argsList.add("-data_max_factor_levels");
int dataMaxFactorLevels = Integer.parseInt(dataMaxFactorLevelsString);
argsList.add(Integer.toString(dataMaxFactorLevels));
}
}
if (manyColsString != null) {
if (manyColsString.length() > 0) {
argsList.add("-many_cols");
}
}
if (chunkBytesString != null) {
if (chunkBytesString.length() > 0) {
argsList.add("-chunk_bytes");
int chunkBytes = Integer.parseInt(chunkBytesString);
argsList.add(Integer.toString(chunkBytes));
}
}
if (betaString != null) {
if (betaString.length() > 0) {
argsList.add(betaString);
}
}
if (randomUdpDropString != null) {
if (randomUdpDropString.length() > 0) {
argsList.add(randomUdpDropString);
}
}
if (licenseData != null) {
if (licenseData.length() > 0) {
Log.POST(100, "Before writing license file");
Log.POST(101, ice_root);
File f = new File(ice_root);
boolean b = f.exists();
Log.POST(102, b ? "exists" : "does not exist");
if (!b) {
Log.POST(103, "before mkdirs()");
f.mkdirs();
Log.POST(104, "after mkdirs()");
}
String fileName = ice_root + File.separator + "h2o_license.txt";
PrintWriter out = new PrintWriter(fileName);
out.print(licenseData);
out.close();
argsList.add("-license");
argsList.add(fileName);
}
}
if (hadoopVersion != null) {
argsList.add("-ga_hadoop_ver");
argsList.add(hadoopVersion);
}
if (gaOptOut != null)
argsList.add(gaOptOut);
// Options passed through to UserMain for configuring the EmbeddedH2OConfig.
argsList.add("-driverip");
argsList.add(driverIp);
argsList.add("-driverport");
argsList.add(driverPortString);
argsList.add("-mapperport");
argsList.add(localPortString);
context.write(textId, new Text("before water.Boot.main()"));
String[] args = (String[]) argsList.toArray(new String[0]);
try {
Log.POST(11, "Before boot");
water.Boot.main(UserMain.class, args);
Log.POST(12, "After boot");
} catch (Exception e) {
Log.POST(13, "Exception in boot");
Log.POST(13, "");
context.write(textId, new Text("exception in water.Boot.main()"));
String s = e.getMessage();
if (s == null) {
s = "(null exception message)";
}
Log.POST(13, s);
Log.POST(13, "");
context.write(textId, new Text(s));
s = e.toString();
if (s == null) {
s = "(null exception toString)";
}
Log.POST(13, s);
Log.POST(13, "");
context.write(textId, new Text(s));
StackTraceElement[] els = e.getStackTrace();
for (int i = 0; i < els.length; i++) {
StackTraceElement el = els[i];
s = el.toString();
Log.POST(13, s);
context.write(textId, new Text(" " + s));
}
} finally {
Log.POST(14, "Top of finally");
context.write(textId, new Text("after water.Boot.main()"));
}
Log.POST(15, "Waiting for exit");
// EmbeddedH2OConfig will send a one-byte exit status to this socket.
Socket sock = ss.accept();
System.out.println("Wait for exit woke up from accept");
byte[] b = new byte[1];
InputStream is = sock.getInputStream();
int expectedBytes = 1;
int receivedBytes = 0;
while (receivedBytes < expectedBytes) {
int n = is.read(b, receivedBytes, expectedBytes - receivedBytes);
System.out.println("is.read returned " + n);
if (n < 0) {
System.exit(112);
}
receivedBytes += n;
}
int exitStatus = (int) b[0];
System.out.println("Received exitStatus " + exitStatus);
return exitStatus;
}
use of org.apache.hadoop.mapreduce.Counter in project Gaffer by gchq.
the class SampleDataAndCreateSplitsFileTool method run.
@Override
public int run(final String[] strings) throws OperationException {
try {
LOGGER.info("Creating job using SampleDataForSplitPointsJobFactory");
job = new SampleDataForSplitPointsJobFactory().createJob(operation, store);
} catch (final IOException e) {
LOGGER.error("Failed to create Hadoop job: {}", e.getMessage());
throw new OperationException("Failed to create the Hadoop job: " + e.getMessage(), e);
}
try {
LOGGER.info("Running SampleDataForSplitPoints job (job name is {})", job.getJobName());
job.waitForCompletion(true);
} catch (final IOException | InterruptedException | ClassNotFoundException e) {
LOGGER.error("Exception running job: {}", e.getMessage());
throw new OperationException("Error while waiting for job to complete: " + e.getMessage(), e);
}
try {
if (!job.isSuccessful()) {
LOGGER.error("Job was not successful (job name is {})", job.getJobName());
throw new OperationException("Error running job");
}
} catch (final IOException e) {
LOGGER.error("Exception running job: {}", e.getMessage());
throw new OperationException("Error running job" + e.getMessage(), e);
}
// Find the number of records output
// NB In the following line use mapred.Task.Counter.REDUCE_OUTPUT_RECORDS rather than
// mapreduce.TaskCounter.REDUCE_OUTPUT_RECORDS as this is more compatible with earlier
// versions of Hadoop.
Counter counter;
try {
counter = job.getCounters().findCounter(Task.Counter.REDUCE_OUTPUT_RECORDS);
LOGGER.info("Number of records output = {}", counter);
} catch (final IOException e) {
LOGGER.error("Failed to get counter org.apache.hadoop.mapred.Task.Counter.REDUCE_OUTPUT_RECORDS from job: {}", e.getMessage());
throw new OperationException("Failed to get counter: " + Task.Counter.REDUCE_OUTPUT_RECORDS, e);
}
int numberTabletServers;
try {
numberTabletServers = store.getConnection().instanceOperations().getTabletServers().size();
LOGGER.info("Number of tablet servers is {}", numberTabletServers);
} catch (final StoreException e) {
LOGGER.error("Exception thrown getting number of tablet servers: {}", e.getMessage());
throw new OperationException(e.getMessage(), e);
}
long outputEveryNthRecord = counter.getValue() / (numberTabletServers - 1);
final Path resultsFile = new Path(operation.getOutputPath(), "part-r-00000");
LOGGER.info("Will output every {}-th record from {}", outputEveryNthRecord, resultsFile);
// Read through resulting file, pick out the split points and write to file.
final Configuration conf = getConf();
final FileSystem fs;
try {
fs = FileSystem.get(conf);
} catch (final IOException e) {
LOGGER.error("Exception getting filesystem: {}", e.getMessage());
throw new OperationException("Failed to get filesystem from configuration: " + e.getMessage(), e);
}
LOGGER.info("Writing splits to {}", operation.getResultingSplitsFilePath());
final Key key = new Key();
final Value value = new Value();
long count = 0;
int numberSplitPointsOutput = 0;
try (final SequenceFile.Reader reader = new SequenceFile.Reader(fs, resultsFile, conf);
final PrintStream splitsWriter = new PrintStream(new BufferedOutputStream(fs.create(new Path(operation.getResultingSplitsFilePath()), true)), false, CommonConstants.UTF_8)) {
while (reader.next(key, value) && numberSplitPointsOutput < numberTabletServers - 1) {
count++;
if (count % outputEveryNthRecord == 0) {
LOGGER.debug("Outputting split point number {} ({})", numberSplitPointsOutput, Base64.encodeBase64(key.getRow().getBytes()));
numberSplitPointsOutput++;
splitsWriter.println(new String(Base64.encodeBase64(key.getRow().getBytes()), CommonConstants.UTF_8));
}
}
LOGGER.info("Total number of records read was {}", count);
} catch (final IOException e) {
LOGGER.error("Exception reading results file and outputting split points: {}", e.getMessage());
throw new OperationException(e.getMessage(), e);
}
try {
fs.delete(resultsFile, true);
LOGGER.info("Deleted the results file {}", resultsFile);
} catch (final IOException e) {
LOGGER.error("Failed to delete the results file {}", resultsFile);
throw new OperationException("Failed to delete the results file: " + e.getMessage(), e);
}
return SUCCESS_RESPONSE;
}
use of org.apache.hadoop.mapreduce.Counter in project ignite by apache.
the class HadoopClientProtocolSelfTest method testJobCounters.
/**
* Tests job counters retrieval.
*
* @throws Exception If failed.
*/
public void testJobCounters() throws Exception {
IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName);
igfs.mkdirs(new IgfsPath(PATH_INPUT));
try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create(new IgfsPath(PATH_INPUT + "/test.file"), true)))) {
bw.write("alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n");
}
Configuration conf = config(HadoopAbstractSelfTest.REST_PORT);
final Job job = Job.getInstance(conf);
try {
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(TestCountingMapper.class);
job.setReducerClass(TestCountingReducer.class);
job.setCombinerClass(TestCountingCombiner.class);
FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT));
FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT));
job.submit();
final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1);
assertEquals(0, cntr.getValue());
cntr.increment(10);
assertEquals(10, cntr.getValue());
// Transferring to map phase.
setupLockFile.delete();
// Transferring to reduce phase.
mapLockFile.delete();
job.waitForCompletion(false);
assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState());
final Counters counters = job.getCounters();
assertNotNull("counters cannot be null", counters);
assertEquals("wrong counters count", 3, counters.countCounters());
assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue());
assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue());
} catch (Throwable t) {
log.error("Unexpected exception", t);
} finally {
job.getCluster().close();
}
}
Aggregations