use of ini.trakem2.Project in project camel by apache.
the class ProjectProducerTest method updateTest.
@Test
public void updateTest() throws Exception {
final String id = "myID";
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.UPDATE);
final String newName = "newName";
when(testOSproject.getId()).thenReturn(id);
when(testOSproject.getName()).thenReturn(newName);
when(testOSproject.getDescription()).thenReturn("desc");
when(projectService.update(any(Project.class))).thenReturn(testOSproject);
msg.setBody(testOSproject);
producer.process(exchange);
ArgumentCaptor<Project> captor = ArgumentCaptor.forClass(Project.class);
verify(projectService).update(captor.capture());
assertEqualsProject(testOSproject, captor.getValue());
assertNotNull(captor.getValue().getId());
assertEquals(newName, msg.getBody(Project.class).getName());
}
use of ini.trakem2.Project in project wildfly-camel by wildfly-extras.
the class OpenstackIntegrationTest method createKeystoneProject.
@Test
public void createKeystoneProject() throws Exception {
CamelContext camelContext = Mockito.mock(CamelContext.class);
when(camelContext.getHeadersMapFactory()).thenReturn(new DefaultHeadersMapFactory());
Message msg = new DefaultMessage(camelContext);
Exchange exchange = Mockito.mock(Exchange.class);
when(exchange.getIn()).thenReturn(msg);
msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.CREATE);
msg.setHeader(OpenstackConstants.NAME, dummyProject.getName());
msg.setHeader(KeystoneConstants.DESCRIPTION, dummyProject.getDescription());
msg.setHeader(KeystoneConstants.DOMAIN_ID, dummyProject.getDomainId());
msg.setHeader(KeystoneConstants.PARENT_ID, dummyProject.getParentId());
KeystoneEndpoint endpoint = Mockito.mock(KeystoneEndpoint.class);
Producer producer = new ProjectProducer(endpoint, client);
producer.process(exchange);
ArgumentCaptor<Project> captor = ArgumentCaptor.forClass(Project.class);
verify(projectService).create(captor.capture());
assertEqualsProject(dummyProject, captor.getValue());
}
use of ini.trakem2.Project in project TrakEM2 by trakem2.
the class Compare method compareAllToAll.
/**
* Gets pipes for all open projects, and generates a matrix of dissimilarities, which gets passed on to the Worker thread and also to a file, if desired.
*
* @param to_file Whether to save the results to a file and popup a save dialog for it or not. In any case the results are stored in the worker's load, which you can retrieve like:
* <pre>
* Bureaucrat bu = Compare.compareAllToAll(true, null, null);
* Object result = bu.getWorker().getResult();
* float[][] scores = (float[][])result[0];
* ArrayList<Compare.Chain> chains = (ArrayList<Compare.Chain>)result[1];
* </pre>
*/
public static Bureaucrat compareAllToAll(final boolean to_file, final String regex, final String[] ignore, final Project[] projects, final boolean crop, final boolean from_end, final int max_n_elements, final String outgroup) {
// gather all open projects
final Project[] p = null == projects ? Project.getProjects().toArray(new Project[0]) : projects;
final Worker worker = new Worker("Comparing all to all") {
@Override
public void run() {
startedWorking();
try {
final CATAParameters cp = new CATAParameters();
if (!cp.setup(to_file, regex, false, false)) {
finishedWorking();
return;
}
String filename = null, dir = null;
if (to_file) {
final SaveDialog sd = new SaveDialog("Save matrix", OpenDialog.getDefaultDirectory(), null, ".csv");
filename = sd.getFileName();
if (null == filename) {
finishedWorking();
return;
}
dir = sd.getDirectory().replace('\\', '/');
if (!dir.endsWith("/"))
dir += "/";
}
Object[] ob = gatherChains(p, cp, ignore);
final ArrayList<Chain> chains = (ArrayList<Chain>) ob[0];
// to keep track of each project's chains
final ArrayList[] p_chains = (ArrayList[]) ob[1];
ob = null;
if (null == chains) {
finishedWorking();
return;
}
final int n_chains = chains.size();
// crop chains if desired
if (crop) {
for (final Chain chain : chains) {
if (from_end) {
final int start = chain.vs.length() - max_n_elements;
if (start > 0) {
chain.vs = chain.vs.substring(start, chain.vs.length());
// BEFORE making it relative
chain.vs.resample(cp.delta, cp.with_source);
}
} else {
if (max_n_elements < chain.vs.length()) {
chain.vs = chain.vs.substring(0, max_n_elements);
// BEFORE making it relative
chain.vs.resample(cp.delta, cp.with_source);
}
}
}
}
// compare all to all
final VectorString3D[] vs = new VectorString3D[n_chains];
for (int i = 0; i < n_chains; i++) vs[i] = chains.get(i).vs;
final float[][] scores = Compare.scoreAllToAll(vs, cp.distance_type, cp.delta, cp.skip_ends, cp.max_mut, cp.min_chunk, cp.direct, cp.substring_matching, this);
if (null == scores) {
finishedWorking();
return;
}
// store matrix and chains into the worker
this.result = new Object[] { scores, chains };
// write to file
if (!to_file) {
finishedWorking();
return;
}
final File f = new File(dir + filename);
// encoding in Latin 1 (for macosx not to mess around
final OutputStreamWriter dos = new OutputStreamWriter(new BufferedOutputStream(new FileOutputStream(f)), "8859_1");
// Normalize matrix to largest value of 1.0
if (cp.normalize) {
float max = 0;
for (int i = 0; i < scores.length; i++) {
// traverse half matrix ony: it's mirrored
for (int j = i; j < scores[0].length; j++) {
if (scores[i][j] > max)
max = scores[i][j];
}
}
for (int i = 0; i < scores.length; i++) {
for (int j = i; j < scores[0].length; j++) {
scores[i][j] = scores[j][i] /= max;
}
}
}
// write chain titles, with project prefix
if (cp.format.equals(cp.formats[0])) {
// as csv:
try {
final StringBuffer[] titles = new StringBuffer[n_chains];
int next = 0;
for (int i = 0; i < p.length; i++) {
final String prefix = Utils.getCharacter(i + 1);
// empty upper left corner
dos.write("\"\"");
for (final Chain chain : (ArrayList<Chain>) p_chains[i]) {
dos.write(",");
titles[next] = new StringBuffer().append('\"').append(prefix).append(' ').append(chain.getCellTitle()).append('\"');
dos.write(titles[next].toString());
next++;
}
}
dos.write("\n");
for (int i = 0; i < n_chains; i++) {
final StringBuffer line = new StringBuffer();
line.append(titles[i]);
for (int j = 0; j < n_chains; j++) line.append(',').append(scores[i][j]);
line.append('\n');
dos.write(line.toString());
}
dos.flush();
} catch (final Exception e) {
e.printStackTrace();
}
} else if (cp.format.equals(cp.formats[1])) {
// as XML:
try {
final StringBuffer sb = new StringBuffer("<?xml version=\"1.0\"?>\n<!DOCTYPE ggobidata SYSTEM \"ggobi.dtd\">\n");
sb.append("<ggobidata count=\"2\">\n");
sb.append("<data name=\"Pipe Chains\">\n");
sb.append("<description />\n");
// ggobi: what a crappy XML parser it has
sb.append("<variables count=\"0\">\n</variables>\n");
sb.append("<records count=\"").append(chains.size()).append("\" glyph=\"fr 1\" color=\"3\">\n");
int next = 0;
for (int i = 0; i < p.length; i++) {
final String prefix = Utils.getCharacter(i + 1);
final String color = new StringBuffer("color=\"").append(i + 1).append('\"').toString();
for (final Chain chain : (ArrayList<Chain>) p_chains[i]) {
sb.append("<record id=\"").append(next + 1).append("\" label=\"").append(prefix).append(' ').append(chain.getCellTitle()).append("\" ").append(color).append("></record>\n");
next++;
}
}
sb.append("</records>\n</data>\n");
sb.append("<data name=\"distances\">\n");
sb.append("<description />\n");
sb.append("<variables count=\"1\">\n<realvariable name=\"D\" />\n</variables>\n");
sb.append("<records count=\"").append(n_chains * (n_chains - 1)).append("\" glyph=\"fr 1\" color=\"0\">\n");
for (int i = 0; i < n_chains; i++) {
for (int j = 0; j < n_chains; j++) {
if (i == j)
continue;
sb.append("<record source=\"").append(i + 1).append("\" destination=\"").append(j + 1).append("\">").append(scores[i][j]).append("</record>\n");
}
}
sb.append("</records>\n</data>\n");
sb.append("</ggobidata>");
dos.write(sb.toString());
dos.flush();
} catch (final Exception e) {
e.printStackTrace();
}
} else if (cp.format.equals(cp.formats[2])) {
// as Phylip .dis
try {
// collect different projects
final ArrayList<Project> projects = new ArrayList<Project>();
for (final Chain chain : chains) {
final Project p = chain.getRoot().getProject();
if (!projects.contains(p))
projects.add(p);
}
final HashSet names = new HashSet();
final StringBuffer sb = new StringBuffer();
sb.append(scores.length).append('\n');
dos.write(sb.toString());
// unique ids, since phylip cannot handle long names
final AtomicInteger ids = new AtomicInteger(0);
final File ftags = new File(dir + filename + ".tags");
// encoding in Latin 1 (for macosx not to mess around
final OutputStreamWriter dostags = new OutputStreamWriter(new BufferedOutputStream(new FileOutputStream(ftags)), "8859_1");
for (int i = 0; i < scores.length; i++) {
sb.setLength(0);
// String title = chains.get(i).getShortCellTitle().replace(' ', '_').replace('\t', '_').replace('[', '-').replace(']', '-');
final int id = ids.incrementAndGet();
final String sid = Utils.getCharacter(id);
String name = chains.get(i).getShortCellTitle();
// If sid.length() > 10 chars, trouble!
if (sid.length() > 10) {
Utils.log2("Ignoring " + name + " : id longer than 10 chars: " + id);
continue;
}
final int k = 1;
// Prepend a project char identifier to the name
String project_name = "";
if (projects.size() > 1) {
project_name = Utils.getCharacter(projects.indexOf(chains.get(i).getRoot().getProject()) + 1).toLowerCase();
name = project_name + name;
}
dostags.write(new StringBuilder().append(sid).append('\t').append(name).append('\n').toString());
if (null != outgroup && -1 != name.indexOf(outgroup)) {
Utils.logAll("Outgroup 0-based index is " + id + ", with id " + sid + ", with name " + name);
}
//
final int len = 12;
sb.append(sid);
// pad with spaces up to len
for (int j = len - sid.length(); j > 0; j--) sb.append(' ');
int count = 0;
for (int j = 0; j < scores[0].length; j++) {
sb.append(' ').append(scores[i][j]);
count++;
if (7 == count && j < scores[0].length - 1) {
sb.append('\n');
count = 0;
while (++count < len) sb.append(' ');
sb.append(' ');
count = 0;
}
}
sb.append('\n');
dos.write(sb.toString());
}
dos.flush();
dostags.flush();
dostags.close();
} catch (final Exception e) {
e.printStackTrace();
}
}
dos.close();
} catch (final Exception e) {
e.printStackTrace();
} finally {
finishedWorking();
}
}
};
return Bureaucrat.createAndStart(worker, p);
}
use of ini.trakem2.Project in project TrakEM2 by trakem2.
the class Compare method reliabilityAnalysis.
public static final Bureaucrat reliabilityAnalysis(final String[] ignore, final boolean output_arff, final boolean weka_classify, final boolean show_dialog, final double delta, final double wi, final double wd, final double wm) {
// gather all open projects
final Project[] p = Project.getProjects().toArray(new Project[0]);
final Worker worker = new Worker("Reliability by name") {
@Override
public void run() {
startedWorking();
try {
final CATAParameters cp = new CATAParameters();
cp.delta = delta;
if (show_dialog && !cp.setup(false, null, false, false)) {
finishedWorking();
return;
}
Object[] ob = gatherChains(p, cp, ignore);
final ArrayList<Chain> chains = (ArrayList<Chain>) ob[0];
// to keep track of each project's chains
final ArrayList[] p_chains = (ArrayList[]) ob[1];
ob = null;
if (null == chains) {
finishedWorking();
return;
}
// For each pipe in a brain:
// - score against all other brains in which that pipe name exists,
// - record the score position within that brain.
//
final ExecutorService exec = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
// for each individual lineage:
final TreeMap<String, ArrayList<Integer>> indices = new TreeMap<String, ArrayList<Integer>>();
final ArrayList<CITuple> cin = new ArrayList<CITuple>();
// for each family:
final TreeMap<String, ArrayList<Integer>> indices_f = new TreeMap<String, ArrayList<Integer>>();
final ArrayList<CITuple> cin_f = new ArrayList<CITuple>();
final ArrayList<Future> fus = new ArrayList<Future>();
// For neural network analysis:
final StringBuilder arff = output_arff ? new StringBuilder("@RELATION Lineages\n\n") : null;
if (output_arff) {
arff.append("@ATTRIBUTE APD NUMERIC\n");
arff.append("@ATTRIBUTE CPD NUMERIC\n");
arff.append("@ATTRIBUTE STD NUMERIC\n");
arff.append("@ATTRIBUTE MPD NUMERIC\n");
arff.append("@ATTRIBUTE PM NUMERIC\n");
arff.append("@ATTRIBUTE LEV NUMERIC\n");
arff.append("@ATTRIBUTE SIM NUMERIC\n");
arff.append("@ATTRIBUTE PRX NUMERIC\n");
arff.append("@ATTRIBUTE PRM NUMERIC\n");
// length ratio: len(query) / len(ref)
arff.append("@ATTRIBUTE LR NUMERIC\n");
arff.append("@ATTRIBUTE TR NUMERIC\n");
arff.append("@ATTRIBUTE CLASS {false,true}\n");
arff.append("\n@DATA\n");
}
// Count number of times when decision tree says it's good, versus number of times when it should be good
// observed
final AtomicInteger obs_good = new AtomicInteger(0);
// observed wrong
final AtomicInteger obs_wrong = new AtomicInteger(0);
// expected
final AtomicInteger exp_good = new AtomicInteger(0);
final AtomicInteger exp_bad = new AtomicInteger(0);
final AtomicInteger obs_bad_classified_good_ones = new AtomicInteger(0);
final AtomicInteger obs_well_classified_bad_ones = new AtomicInteger(0);
// inc by one when a lineage to compare is not found at all in the brain that works as reference
final AtomicInteger not_found = new AtomicInteger(0);
final AtomicInteger already_classified = new AtomicInteger(0);
Method classify_ = null;
if (weka_classify) {
try {
classify_ = Class.forName("lineage.LineageClassifier").getDeclaredMethod("classify", new Class[] { double[].class });
} catch (final Exception e) {
IJError.print(e);
}
}
final Method classify = classify_;
// All possible pairs of projects, with repetition (it's not the same, although the pipe pairwise comparison itself will be.)
for (int _i = 0; _i < p_chains.length; _i++) {
final int i = _i;
Utils.log2("Project " + p[i] + " has " + p_chains[i].size() + " chains.");
for (int _j = 0; _j < p_chains.length; _j++) {
final int j = _j;
// skip same project (would have a score of zero, identical.)
if (i == j)
continue;
final String[] titles_j = new String[p_chains[j].size()];
int next = 0;
for (final Chain cj : (ArrayList<Chain>) p_chains[j]) {
final String t = cj.getCellTitle();
titles_j[next++] = t.substring(0, t.indexOf(' '));
}
// families:
final TreeSet<String> ts_families = new TreeSet<String>();
for (int f = 0; f < titles_j.length; f++) {
// extract family name from title: read the first continuous string of capital letters
final String title = titles_j[f];
int u = 0;
for (; u < title.length(); u++) {
if (!Character.isUpperCase(title.charAt(u)))
break;
}
ts_families.add(title.substring(0, u));
}
final ArrayList<String> families = new ArrayList<String>(ts_families);
fus.add(exec.submit(new Callable() {
@Override
public Object call() {
// All chains of one project to all chains of the other:
for (final Chain chain : (ArrayList<Chain>) p_chains[i]) {
final VectorString3D vs1 = chain.vs;
// Prepare title
String title = chain.getCellTitle();
title = title.substring(0, title.indexOf(' '));
// check if the other project j contains a chain of name chain.getCellTitle() up to the space.
int title_index = -1;
for (int k = 0; k < titles_j.length; k++) {
if (title.equals(titles_j[k])) {
title_index = k;
break;
}
}
if (-1 == title_index) {
Utils.log2(title + " not found in project " + p[j]);
if (weka_classify)
not_found.incrementAndGet();
continue;
}
// should be there:
if (weka_classify) {
exp_good.incrementAndGet();
exp_bad.addAndGet(titles_j.length - 1);
}
final ArrayList<ChainMatch> list = new ArrayList<ChainMatch>();
// extract family name from title: read the first continuous string of capital letters
int u = 0;
for (; u < title.length(); u++) {
if (!Character.isUpperCase(title.charAt(u)))
break;
}
final String family_name = title.substring(0, u);
String last_classify = null;
int g = 0;
for (final Chain cj : (ArrayList<Chain>) p_chains[j]) {
final VectorString3D vs2 = cj.vs;
final Object[] ob = findBestMatch(vs1, vs2, cp.delta, cp.skip_ends, cp.max_mut, cp.min_chunk, cp.distance_type, cp.direct, cp.substring_matching, wi, wd, wm);
final Editions ed = (Editions) ob[0];
final double[] stats = ed.getStatistics(cp.skip_ends, cp.max_mut, cp.min_chunk, cp.score_mut_only);
final ChainMatch cm = new ChainMatch(cj, null, ed, stats, score(ed.getSimilarity(), ed.getDistance(), stats[3], Compare.W));
cm.title = titles_j[g];
list.add(cm);
g++;
if (weka_classify) {
// from decision tree: is it good?
final double[] param = new double[11];
for (int p = 0; p < stats.length; p++) param[p] = stats[p];
try {
if (((Boolean) classify.invoke(null, param)).booleanValue()) {
if (null != last_classify) {
Utils.log2("ALREADY CLASSIFIED " + title + " as " + last_classify + " (now: " + cm.title + " )");
already_classified.incrementAndGet();
}
last_classify = cm.title;
if (title.equals(cm.title)) {
obs_good.incrementAndGet();
} else {
Utils.log2("WRONG CLASSIFICATION of " + title + " as " + cm.title);
obs_wrong.incrementAndGet();
}
} else {
if (title.equals(cm.title)) {
obs_bad_classified_good_ones.incrementAndGet();
} else {
obs_well_classified_bad_ones.incrementAndGet();
}
}
} catch (final Exception ee) {
IJError.print(ee);
}
}
}
// sort scores:
Compare.sortMatches(list, cp.distance_type, cp.distance_type_2, cp.min_matches);
if (output_arff) {
// Take top 8 and put them into training set for WEKA in arff format
for (int h = 0; h < 8; h++) {
final ChainMatch cm = list.get(h);
final StringBuilder sb = new StringBuilder();
sb.append(cm.phys_dist).append(',').append(cm.cum_phys_dist).append(',').append(cm.stdDev).append(',').append(cm.median).append(',').append(cm.prop_mut).append(',').append(cm.ed.getDistance()).append(',').append(cm.seq_sim).append(',').append(cm.proximity).append(',').append(cm.proximity_mut).append(',').append(cm.prop_len).append(',').append(cm.tortuosity_ratio).append(',').append(title.equals(cm.title)).append(// append('-').append(cm.title.startsWith(family_name)).append('\n');
'\n');
synchronized (arff) {
arff.append(sb);
}
}
}
// record scoring index
int f = 0;
boolean found_specific = false;
boolean found_family = false;
for (final ChainMatch cm : list) {
// Exact match: for each individual lineage
if (!found_specific && title.equals(cm.title)) {
synchronized (indices) {
ArrayList<Integer> al = indices.get(title);
if (null == al) {
al = new ArrayList<Integer>();
indices.put(title, al);
// so I can keep a list of chains sorted by name
cin.add(new CITuple(title, chain, al));
}
al.add(f);
}
found_specific = true;
}
if (!found_family && cm.title.startsWith(family_name)) {
synchronized (indices_f) {
ArrayList<Integer> al = indices_f.get(family_name);
if (null == al) {
al = new ArrayList<Integer>();
indices_f.put(family_name, al);
cin_f.add(new CITuple(family_name, chain, al));
}
al.add(f);
}
found_family = true;
}
if (found_specific && found_family) {
break;
}
//
f++;
}
if (!found_specific) {
Utils.log2("NOT FOUND any match for " + title + " within a list of size " + list.size() + ", in project " + chain.getRoot().getProject());
}
}
return null;
}
}));
}
}
for (final Future fu : fus) {
try {
fu.get();
} catch (final Exception e) {
IJError.print(e);
}
}
exec.shutdownNow();
if (weka_classify) {
// so stateful ... it's a sin.
try {
Class.forName("lineage.LineageClassifier").getDeclaredMethod("flush", new Class[] {}).invoke(null, new Object[] {});
} catch (final Exception e) {
IJError.print(e);
}
}
// export ARFF for neural network training
if (output_arff) {
Utils.saveToFile(new File(System.getProperty("user.dir") + "/lineages.arff"), arff.toString());
}
// Show the results from indices map
final StringBuilder sb = new StringBuilder();
// scoring index vs count of occurrences
final TreeMap<Integer, Integer> sum = new TreeMap<Integer, Integer>();
// best scoring index of best family member vs count of ocurrences
final TreeMap<Integer, Integer> sum_f = new TreeMap<Integer, Integer>();
// scoring index vs count of ocurrences, within each family
final TreeMap<String, TreeMap<Integer, Integer>> sum_fw = new TreeMap<String, TreeMap<Integer, Integer>>();
// From collected data, several kinds of results:
// - a list of how well each chain scores: its index position in the sorted list of scores of one to many.
// - a list of how well each chain scores relative to family: the lowest (best) index position of a lineage of the same family in the sorted list of scores.
sb.append("List of scoring indices for each (starting at index 1, aka best possible score):\n");
for (final CITuple ci : cin) {
// sort indices in place
Collections.sort(ci.list);
// count occurrences of each scoring index
// lowest possible index
int last = 0;
int count = 1;
for (final int i : ci.list) {
if (last == i)
count++;
else {
sb.append(ci.title).append(' ').append(last + 1).append(' ').append(count).append('\n');
// reset
last = i;
count = 1;
}
// global count of occurrences
final Integer oi = new Integer(i);
sum.put(oi, (sum.containsKey(oi) ? sum.get(oi) : 0) + 1);
// Same thing but not for all lineages, but only for lineages within a family:
// extract family name from title: read the first continuous string of capital letters
int u = 0;
for (; u < ci.title.length(); u++) {
if (!Character.isUpperCase(ci.title.charAt(u)))
break;
}
final String family_name = ci.title.substring(0, u);
TreeMap<Integer, Integer> sfw = sum_fw.get(family_name);
if (null == sfw) {
sfw = new TreeMap<Integer, Integer>();
sum_fw.put(family_name, sfw);
}
sfw.put(oi, (sfw.containsKey(oi) ? sfw.get(oi) : 0) + 1);
}
if (0 != count)
sb.append(ci.title).append(' ').append(last + 1).append(' ').append(count).append('\n');
// find the very-off ones:
if (last > 6) {
Utils.log2("BAD index " + last + " for chain " + ci.title + " " + ci.chain.getRoot() + " of project " + ci.chain.getRoot().getProject());
}
}
sb.append("===============================\n");
// / family score:
for (final CITuple ci : cin_f) {
// sort indices in place
Collections.sort(ci.list);
// count occurrences of each scoring index
// lowest possible index
int last = 0;
int count = 1;
for (final int i : ci.list) {
if (last == i)
count++;
else {
// reset
last = i;
count = 1;
}
// global count of occurrences
final Integer oi = new Integer(i);
sum_f.put(oi, (sum_f.containsKey(oi) ? sum_f.get(oi) : 0) + 1);
}
}
sb.append("===============================\n");
// - a summarizing histogram that collects how many 1st, how many 2nd, etc. in total, normalized to total number of one-to-many matches performed (i.e. the number of scoring indices recorded.)
//
{
sb.append("Global count of index ocurrences:\n");
int total = 0;
int top2 = 0;
int top5 = 0;
for (final Map.Entry<Integer, Integer> e : sum.entrySet()) {
sb.append(e.getKey()).append(' ').append(e.getValue()).append('\n');
total += e.getValue();
if (e.getKey() < 2)
top2 += e.getValue();
if (e.getKey() < 5)
top5 += e.getValue();
}
sb.append("total: ").append(total).append('\n');
sb.append("top1: ").append(sum.get(sum.firstKey()) / (float) total).append('\n');
sb.append("top2: ").append(top2 / (float) total).append('\n');
sb.append("top5: ").append(top5 / (float) total).append('\n');
sb.append("===============================\n");
}
sb.append("Family-wise count of index ocurrences:\n");
for (final Map.Entry<String, TreeMap<Integer, Integer>> fe : sum_fw.entrySet()) {
int total = 0;
int top5 = 0;
for (final Map.Entry<Integer, Integer> e : fe.getValue().entrySet()) {
sb.append(fe.getKey()).append(' ').append(e.getKey()).append(' ').append(e.getValue()).append('\n');
total += e.getValue();
if (e.getKey() < 5)
top5 += e.getValue();
}
sb.append("total: ").append(total).append('\n');
sb.append("top1: ").append(fe.getValue().get(fe.getValue().firstKey()) / (float) total).append('\n');
sb.append("top5: ").append(top5 / (float) total).append('\n');
}
sb.append("===============================\n");
// - the percent of first score being the correct one:
double first = 0;
double first_5 = 0;
double all = 0;
for (final Map.Entry<Integer, Integer> e : sum.entrySet()) {
final int k = e.getKey();
final int a = e.getValue();
all += a;
if (0 == k)
first = a;
if (k < 5)
first_5 += a;
}
// STORE
this.result = new double[] { // Top one ratio
first / all, // Top 5 ratio
first_5 / all };
sb.append("Global count of index occurrences family-wise:\n");
for (final Map.Entry<Integer, Integer> e : sum_f.entrySet()) {
sb.append(e.getKey()).append(' ').append(e.getValue()).append('\n');
}
sb.append("===============================\n");
// - a summarizing histogram of how well each chain scores (4/4, 3/4, 2/4, 1/4, 0/4 only for those that have 4 homologous members.)
// Must consider that there are 5 projects taken in pairs with repetition.
sb.append("A summarizing histogram of how well each chain scores, for those that have 4 homologous members. It's the number of 1st scores (zeroes) versus the total number of scores:\n");
// First, classify them in having 4, 3, 2, 1
// For 5 brains: 5! / (5-2)! = 5 * 4 = 20 --- 5 elements taken in groups of 2, where order matters
// For 4 brains: 4! / (4-2)! = 4 * 3 = 12
// For 3 brains: 3! / (3-2)! = 3 * 2 = 6;
final TreeMap<Integer, ArrayList<String>> hsc = new TreeMap<Integer, ArrayList<String>>();
for (final CITuple ci : cin) {
final int size = ci.list.size();
ArrayList<String> al = hsc.get(size);
if (null == al) {
al = new ArrayList<String>();
hsc.put(size, al);
}
// Count the number of 0s -- top scoring
int count = 0;
for (final Integer i : ci.list) {
if (0 == i)
count++;
else
break;
}
al.add(new StringBuffer(ci.title).append(" =").append(count).append('/').append(ci.list.size()).append('\n').toString());
}
// Then just print:
for (final Map.Entry<Integer, ArrayList<String>> e : hsc.entrySet()) {
sb.append("For ").append(e.getKey()).append(" matches:\n");
for (final String s : e.getValue()) sb.append(s);
}
sb.append("=========================\n");
// Family-wise, count the number of zeros per family:
sb.append("Number of top scoring per family:\n");
final TreeMap<String, String> family_scores = new TreeMap<String, String>();
for (final CITuple ci : cin_f) {
int count = 0;
for (final Integer i : ci.list) {
if (0 == i)
count++;
else
// ci.list is sorted
break;
}
family_scores.put(ci.title, new StringBuilder().append(ci.title).append(" =").append(count).append('/').append(ci.list.size()).append('\n').toString());
}
// Now print sorted by family name:
for (final String s : family_scores.values()) {
sb.append(s);
}
sb.append("=========================\n");
if (weka_classify) {
sb.append("Decision tree:\n");
sb.append("Expected good matches: " + exp_good.get() + "\n");
sb.append("Expected bad matches: " + exp_bad.get() + "\n");
sb.append("Observed good matches: " + obs_good.get() + "\n");
sb.append("Observed bad matches: " + obs_wrong.get() + "\n");
sb.append("Observed well classified bad ones: " + obs_well_classified_bad_ones.get() + "\n");
sb.append("Observed bad classified good ones: " + obs_bad_classified_good_ones.get() + "\n");
sb.append("Not found, so skipped: " + not_found.get() + "\n");
sb.append("Already classified: " + already_classified.get() + "\n");
sb.append("=========================\n");
}
if (output_arff) {
Utils.log(sb.toString());
} else {
Utils.log2(sb.toString());
}
} catch (final Exception e) {
e.printStackTrace();
} finally {
finishedWorking();
}
}
};
return Bureaucrat.createAndStart(worker, p);
}
use of ini.trakem2.Project in project TrakEM2 by trakem2.
the class Compare method obtainOrigin.
/**
* Generate calibrated origin of coordinates.
*/
public static Object[] obtainOrigin(final Line3D[] axes, final int transform_type, final Vector3d[] o_ref) {
// pipe's axes
final VectorString3D[] vs = new VectorString3D[3];
for (int i = 0; i < 3; i++) vs[i] = axes[i].asVectorString3D();
final Calibration cal = (null != axes[0].getLayerSet() ? axes[0].getLayerSet().getCalibration() : null);
// 1 - calibrate
if (null != cal) {
for (int i = 0; i < 3; i++) vs[i].calibrate(cal);
}
// 2 - resample (although it's done before transforming, it's only for aesthetic purposes: it doesn't matter, won't ever go into dynamic programming machinery)
double delta = 0;
for (int i = 0; i < 3; i++) delta += vs[i].getAverageDelta();
delta /= 3;
for (int i = 0; i < 3; i++) vs[i].resample(delta);
// return origin vectors for pipe's project
// requires resampled vs
final Vector3d[] o = Compare.createOrigin(vs[0], vs[1], vs[2], transform_type, o_ref);
return new Object[] { vs, o };
}
Aggregations