use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA_test method rankChild4StnByLikelihood.
protected HashMap<String, Double> rankChild4StnByLikelihood(_Stn stnObj, _ParentDoc4DCM pDoc) {
HashMap<String, Double> likelihoodMap = new HashMap<String, Double>();
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double stnLogLikelihood = 0;
for (_Word w : stnObj.getWords()) {
double wordLikelihood = 0;
int wid = w.getIndex();
for (int k = 0; k < number_of_topics; k++) {
wordLikelihood += childTopicInDocProb(k, cDoc, pDoc) * childWordByTopicProb(k, wid, pDoc);
}
stnLogLikelihood += wordLikelihood;
}
likelihoodMap.put(cDoc.getName(), stnLogLikelihood);
}
return likelihoodMap;
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA_test method printWordTopicDistribution.
protected void printWordTopicDistribution(_Doc d, File wordTopicDistributionFolder, int k) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
String wordTopicDistributionFile = pDoc.getName() + ".txt";
try {
PrintWriter pw = new PrintWriter(new File(wordTopicDistributionFolder, wordTopicDistributionFile));
for (int i = 0; i < number_of_topics; i++) {
MyPriorityQueue<_RankItem> fVector = new MyPriorityQueue<_RankItem>(k);
for (int v = 0; v < vocabulary_size; v++) {
String featureName = m_corpus.getFeature(v);
double wordProb = pDoc.m_wordTopic_prob[i][v];
_RankItem ri = new _RankItem(featureName, wordProb);
fVector.add(ri);
}
pw.format("Topic %d(%.5f):\t", i, pDoc.m_topics[i]);
for (_RankItem it : fVector) pw.format("%s(%.5f)\t", it.m_name, m_logSpace ? Math.exp(it.m_value) : it.m_value);
pw.write("\n");
}
pw.flush();
pw.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMLDA4AC method calculate_M_step.
public void calculate_M_step(int iter, File weightFolder) {
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc4DCM)
collectParentStats((_ParentDoc4DCM) d);
else
collectChildStats((_ChildDoc) d);
}
for (int k = 0; k < number_of_topics; k++) for (int v = 0; v < vocabulary_size; v++) m_topic_word_prob[k][v] += word_topic_sstat[k][v] + m_beta[k][v];
File weightIterFolder = new File(weightFolder, "_" + iter);
if (!weightIterFolder.exists()) {
weightIterFolder.mkdir();
}
updateParameter(iter, weightIterFolder);
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMLDA4AC method initialize_probability.
protected void initialize_probability(Collection<_Doc> collection) {
m_alpha = new double[number_of_topics];
m_beta = new double[number_of_topics][vocabulary_size];
m_totalAlpha = 0;
m_totalBeta = new double[number_of_topics];
m_topic_word_prob = new double[number_of_topics][vocabulary_size];
for (_Doc d : collection) {
if (d instanceof _ParentDoc4DCM) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
pDoc.setTopics4Gibbs(number_of_topics, 0, vocabulary_size);
for (_Word w : pDoc.getWords()) {
int wid = w.getIndex();
int tid = w.getTopic();
word_topic_sstat[tid][wid]++;
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
cDoc.setTopics4Gibbs_LDA(number_of_topics, 0);
for (_Word w : cDoc.getWords()) {
int wid = w.getIndex();
int tid = w.getTopic();
pDoc.m_wordTopic_stat[tid][wid]++;
pDoc.m_topic_stat[tid]++;
word_topic_sstat[tid][wid]++;
}
}
}
}
initialAlphaBeta();
imposePrior();
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMLDA4AC method calculate_log_likelihood.
protected double calculate_log_likelihood(_ParentDoc4DCM d) {
double docLogLikelihood = 0.0;
double parentDocLength = d.getTotalDocLength();
for (int k = 0; k < number_of_topics; k++) {
double term = Utils.lgamma(d.m_sstat[k] + m_alpha[k]);
docLogLikelihood += term;
term = Utils.lgamma(m_alpha[k]);
docLogLikelihood -= term;
}
docLogLikelihood += Utils.lgamma(m_totalAlpha);
docLogLikelihood -= Utils.lgamma(parentDocLength + m_totalAlpha);
for (int k = 0; k < number_of_topics; k++) {
for (int v = 0; v < vocabulary_size; v++) {
double term = Utils.lgamma(d.m_wordTopic_stat[k][v] + m_beta[k][v]);
docLogLikelihood += term;
term = Utils.lgamma(m_beta[k][v]);
docLogLikelihood -= term;
}
docLogLikelihood += Utils.lgamma(m_totalBeta[k]);
docLogLikelihood -= Utils.lgamma(d.m_topic_stat[k] + m_totalBeta[k]);
}
for (_ChildDoc cDoc : d.m_childDocs) {
int cDocLength = cDoc.getTotalDocLength();
for (int k = 0; k < number_of_topics; k++) {
double term = Utils.lgamma(cDoc.m_sstat[k] + m_alpha[k]);
docLogLikelihood += term;
term = Utils.lgamma(m_alpha[k]);
docLogLikelihood -= term;
}
docLogLikelihood += Utils.lgamma(m_totalAlpha);
docLogLikelihood -= Utils.lgamma(cDocLength + m_totalAlpha);
}
return docLogLikelihood;
}
Aggregations