use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class LDAGibbs4AC method crossValidation.
public void crossValidation(int k) {
m_trainSet = new ArrayList<_Doc>();
m_testSet = new ArrayList<_Doc>();
double[] perf = null;
_Corpus parentCorpus = new _Corpus();
ArrayList<_Doc> docs = m_corpus.getCollection();
ArrayList<_ParentDoc> parentDocs = new ArrayList<_ParentDoc>();
for (_Doc d : docs) {
if (d instanceof _ParentDoc) {
parentCorpus.addDoc(d);
parentDocs.add((_ParentDoc) d);
}
}
System.out.println("size of parent docs\t" + parentDocs.size());
parentCorpus.setMasks();
if (m_randomFold == true) {
perf = new double[k];
parentCorpus.shuffle(k);
int[] masks = parentCorpus.getMasks();
for (int i = 0; i < k; i++) {
for (int j = 0; j < masks.length; j++) {
if (masks[j] == i) {
m_testSet.add(parentDocs.get(j));
} else {
m_trainSet.add(parentDocs.get(j));
for (_ChildDoc d : parentDocs.get(j).m_childDocs) {
m_trainSet.add(d);
}
}
}
// writeFile(i, m_trainSet, m_testSet);
System.out.println("Fold number " + i);
infoWriter.println("Fold number " + i);
System.out.println("Train Set Size " + m_trainSet.size());
infoWriter.println("Train Set Size " + m_trainSet.size());
System.out.println("Test Set Size " + m_testSet.size());
infoWriter.println("Test Set Size " + m_testSet.size());
long start = System.currentTimeMillis();
EM();
perf[i] = Evaluation(i);
System.out.format("%s Train/Test finished in %.2f seconds...\n", this.toString(), (System.currentTimeMillis() - start) / 1000.0);
infoWriter.format("%s Train/Test finished in %.2f seconds...\n", this.toString(), (System.currentTimeMillis() - start) / 1000.0);
if (i < k - 1) {
m_trainSet.clear();
m_testSet.clear();
}
}
}
double mean = Utils.sumOfArray(perf) / k, var = 0;
for (int i = 0; i < perf.length; i++) var += (perf[i] - mean) * (perf[i] - mean);
var = Math.sqrt(var / k);
System.out.format("Perplexity %.3f+/-%.3f\n", mean, var);
infoWriter.format("Perplexity %.3f+/-%.3f\n", mean, var);
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class LDAGibbs4AC method initTest.
protected void initTest(ArrayList<_Doc> sampleTestSet, _Doc d) {
_ParentDoc pDoc = (_ParentDoc) d;
for (_Stn stnObj : pDoc.getSentences()) {
stnObj.setTopicsVct(number_of_topics);
}
int testLength = 0;
pDoc.setTopics4GibbsTest(number_of_topics, d_alpha, testLength);
sampleTestSet.add(pDoc);
pDoc.createSparseVct4Infer();
for (_ChildDoc cDoc : pDoc.m_childDocs) {
testLength = (int) (m_testWord4PerplexityProportion * cDoc.getTotalDocLength());
cDoc.setTopics4GibbsTest(number_of_topics, d_alpha, testLength);
sampleTestSet.add(cDoc);
cDoc.createSparseVct4Infer();
}
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class LDAGibbs4AC_test method printTopKChild4StnWithHybridPro.
protected void printTopKChild4StnWithHybridPro(String filePrefix, int topK) {
String topKChild4StnFile = filePrefix + "topChild4Stn_hybridPro.txt";
try {
PrintWriter pw = new PrintWriter(new File(topKChild4StnFile));
m_LM.generateReferenceModel();
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc) {
_ParentDoc pDoc = (_ParentDoc) d;
pw.println(pDoc.getName() + "\t" + pDoc.getSenetenceSize());
for (_Stn stnObj : pDoc.getSentences()) {
HashMap<String, Double> likelihoodMap = rankChild4StnByHybridPro(stnObj, pDoc);
pw.print((stnObj.getIndex() + 1) + "\t");
for (Map.Entry<String, Double> e : sortHashMap4String(likelihoodMap, true)) {
pw.print(e.getKey());
pw.print(":" + e.getValue());
pw.print("\t");
}
pw.println();
}
}
}
pw.flush();
pw.close();
} catch (Exception e) {
e.printStackTrace();
}
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class LDAGibbs4AC_test method rankChild4StnByHybridPro.
protected HashMap<String, Double> rankChild4StnByHybridPro(_Stn stnObj, _ParentDoc pDoc) {
HashMap<String, Double> childLikelihoodMap = new HashMap<String, Double>();
double smoothingMu = m_LM.m_smoothingMu;
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double cDocLen = cDoc.getTotalDocLength();
double stnLogLikelihood = 0;
double alphaDoc = smoothingMu / (smoothingMu + cDocLen);
_SparseFeature[] fv = cDoc.getSparse();
_SparseFeature[] sv = stnObj.getFv();
for (_SparseFeature svWord : sv) {
double wordLikelihood = 0;
int wid = svWord.getIndex();
double stnVal = svWord.getValue();
int featureIndex = Utils.indexOf(fv, wid);
double docVal = 0;
if (featureIndex != -1) {
docVal = fv[featureIndex].getValue();
}
double LMLikelihood = (1 - alphaDoc) * docVal / cDocLen;
LMLikelihood += alphaDoc * m_LM.getReferenceProb(wid);
double TMLikelihood = 0;
for (int k = 0; k < number_of_topics; k++) {
double wordPerTopicLikelihood = (word_topic_sstat[k][wid] / m_sstat[k]) * (topicInDocProb(k, cDoc) / (d_alpha * number_of_topics + cDocLen));
TMLikelihood += wordPerTopicLikelihood;
}
wordLikelihood = m_tau * LMLikelihood + (1 - m_tau) * TMLikelihood;
wordLikelihood = Math.log(wordLikelihood);
stnLogLikelihood += stnVal * wordLikelihood;
}
double cosineSim = computeSimilarity(stnObj.m_topics, cDoc.m_topics);
stnLogLikelihood = m_tau * stnLogLikelihood + (1 - m_tau) * cosineSim;
childLikelihoodMap.put(cDoc.getName(), stnLogLikelihood);
}
return childLikelihoodMap;
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class LDAGibbs4AC_test method rankStn4ChildBySim.
// comment is a query, retrieve stn by topical similarity
protected HashMap<Integer, Double> rankStn4ChildBySim(_ParentDoc pDoc, _ChildDoc cDoc) {
HashMap<Integer, Double> stnSimMap = new HashMap<Integer, Double>();
for (_Stn stnObj : pDoc.getSentences()) {
// double stnSim = computeSimilarity(cDoc.m_topics,
// stnObj.m_topics);
// stnSimMap.put(stnObj.getIndex()+1, stnSim);
//
double stnKL = Utils.klDivergence(cDoc.m_topics, stnObj.m_topics);
// double stnKL = Utils.KLsymmetric(cDoc.m_topics, stnObj.m_topics);
// double stnKL = Utils.klDivergence(stnObj.m_topics,
// cDoc.m_topics);
stnSimMap.put(stnObj.getIndex() + 1, -stnKL);
}
return stnSimMap;
}
Aggregations