use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateEta4Child.
public void updateEta4Child(_ParentDoc4DCM pDoc) {
for (_ChildDoc cDoc : pDoc.m_childDocs) {
_SparseFeature[] fvs = cDoc.getSparse();
for (int n = 0; n < fvs.length; n++) {
int wId = fvs[n].getIndex();
double wVal = fvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
cDoc.m_phi[n][k] = Utils.digamma(pDoc.m_sstat[k]) + Utils.digamma(cDoc.m_sstat[k]);
cDoc.m_phi[n][k] += Utils.digamma(pDoc.m_lambda_stat[k][wId]) - Utils.digamma(pDoc.m_lambda_topicStat[k]);
}
double logSum = logSumOfExponentials(cDoc.m_phi[n]);
if (Double.isInfinite(logSum)) {
System.out.println("infinite");
System.out.println("this doc\t" + cDoc.getName() + "\t" + "this word has a total biased probability assignment\t" + m_corpus.getFeature(wId));
}
if (Double.isNaN(logSum)) {
System.out.println("nan");
for (int k = 0; k < number_of_topics; k++) System.out.println("cDoc.m_phi\t" + cDoc.m_phi[n][k]);
}
double phiSum = 0;
for (int k = 0; k < number_of_topics; k++) {
if ((cDoc.m_phi[n][k] - logSum) < -200) {
cDoc.m_phi[n][k] = 1e-20;
} else {
cDoc.m_phi[n][k] = Math.exp(cDoc.m_phi[n][k] - logSum);
}
phiSum += cDoc.m_phi[n][k];
}
if (Math.abs(phiSum - 1) > 1) {
System.out.println("phiSum for comment\t" + phiSum);
for (int k = 0; k < number_of_topics; k++) System.out.println("m_phi\t" + cDoc.m_phi[n][k]);
}
if (Double.isNaN(phiSum)) {
for (int k = 0; k < number_of_topics; k++) {
System.out.println("pDoc.m_sstat[k]\t" + pDoc.m_sstat[k]);
System.out.println("cDoc.m_sstat[k]\t" + cDoc.m_sstat[k]);
System.out.println("pDoc.m_lambda_stat[k][wId]\t" + pDoc.m_lambda_stat[k][wId]);
System.out.println("pDoc.m_lambda_topicStat[k]\t" + pDoc.m_lambda_topicStat[k]);
System.out.println("cDoc.m_phi[n][k]\t" + cDoc.m_phi[n][k]);
}
}
}
}
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA method initialAlphaBeta.
protected void initialAlphaBeta() {
double parentDocNum = 0;
double childDocNum = 0;
Arrays.fill(m_sstat, 0);
Arrays.fill(m_alphaAuxilary, 0);
for (int k = 0; k < number_of_topics; k++) {
Arrays.fill(topic_term_probabilty[k], 0);
Arrays.fill(word_topic_sstat[k], 0);
}
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc4DCM) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
for (int k = 0; k < number_of_topics; k++) {
double tempProb = pDoc.m_sstat[k] / pDoc.getTotalDocLength();
m_sstat[k] += tempProb;
if (pDoc.m_sstat[k] == 0)
continue;
for (int v = 0; v < vocabulary_size; v++) {
tempProb = pDoc.m_wordTopic_stat[k][v] / pDoc.m_topic_stat[k];
topic_term_probabilty[k][v] += tempProb;
}
}
parentDocNum += 1;
for (_ChildDoc cDoc : pDoc.m_childDocs) {
for (int k = 0; k < number_of_topics; k++) {
double tempProb = cDoc.m_sstat[k] / cDoc.getTotalDocLength();
m_alphaAuxilary[k] += tempProb;
}
childDocNum += 1;
}
}
}
for (int k = 0; k < number_of_topics; k++) {
m_sstat[k] /= parentDocNum;
m_alphaAuxilary[k] /= childDocNum;
for (int v = 0; v < vocabulary_size; v++) {
topic_term_probabilty[k][v] /= (parentDocNum + childDocNum);
}
}
for (int k = 0; k < number_of_topics; k++) {
m_alpha[k] = m_sstat[k];
m_alpha_c[k] = m_alphaAuxilary[k];
for (int v = 0; v < vocabulary_size; v++) m_beta[k][v] = topic_term_probabilty[k][v] + d_beta;
}
m_totalAlpha = Utils.sumOfArray(m_alpha);
m_totalAlpha_c = Utils.sumOfArray(m_alpha_c);
for (int k = 0; k < number_of_topics; k++) {
m_totalBeta[k] = Utils.sumOfArray(m_beta[k]);
}
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA method calculate_log_likelihood.
protected double calculate_log_likelihood(_ParentDoc4DCM d) {
double docLogLikelihood = 0;
int docID = d.getID();
double parentDocLength = d.getTotalDocLength();
for (int k = 0; k < number_of_topics; k++) {
double term = Utils.lgamma(d.m_sstat[k] + m_alpha[k]);
docLogLikelihood += term;
term = Utils.lgamma(m_alpha[k]);
docLogLikelihood -= term;
}
docLogLikelihood += Utils.lgamma(m_totalAlpha);
docLogLikelihood -= Utils.lgamma(parentDocLength + m_totalAlpha);
for (int k = 0; k < number_of_topics; k++) {
for (int v = 0; v < vocabulary_size; v++) {
double term = Utils.lgamma(d.m_wordTopic_stat[k][v] + m_beta[k][v]);
docLogLikelihood += term;
term = Utils.lgamma(m_beta[k][v]);
docLogLikelihood -= term;
}
docLogLikelihood += Utils.lgamma(m_totalBeta[k]);
docLogLikelihood -= Utils.lgamma(d.m_topic_stat[k] + m_totalBeta[k]);
}
for (_ChildDoc cDoc : d.m_childDocs) {
double muDp = cDoc.getMu() / parentDocLength;
docLogLikelihood += Utils.digamma(m_totalAlpha_c + cDoc.getMu());
docLogLikelihood += Utils.digamma(m_totalAlpha_c + cDoc.getMu() + cDoc.getTotalDocLength());
for (int k = 0; k < number_of_topics; k++) {
double term = Utils.digamma(m_alpha_c[k] + muDp * d.m_sstat[k] + cDoc.m_sstat[k]);
term -= Utils.digamma(m_alpha_c[k] + muDp * d.m_sstat[k]);
docLogLikelihood += term;
}
}
return docLogLikelihood;
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA method initialize_probability.
protected void initialize_probability(Collection<_Doc> collection) {
m_alpha_c = new double[number_of_topics];
m_alphaAuxilary = new double[number_of_topics];
m_alpha = new double[number_of_topics];
m_beta = new double[number_of_topics][vocabulary_size];
m_totalAlpha = 0;
m_totalAlpha_c = 0;
m_totalBeta = new double[number_of_topics];
m_topic_word_prob = new double[number_of_topics][vocabulary_size];
for (_Doc d : collection) {
if (d instanceof _ParentDoc4DCM) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
pDoc.setTopics4Gibbs(number_of_topics, 0, vocabulary_size);
for (_Stn stnObj : d.getSentences()) {
stnObj.setTopicsVct(number_of_topics);
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
cDoc.setTopics4Gibbs_LDA(number_of_topics, 0);
for (_Word w : cDoc.getWords()) {
int wid = w.getIndex();
int tid = w.getTopic();
pDoc.m_wordTopic_stat[tid][wid]++;
pDoc.m_topic_stat[tid]++;
}
computeMu4Doc(cDoc);
}
}
}
initialAlphaBeta();
imposePrior();
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMCorrLDA method sampleInParentDoc.
protected void sampleInParentDoc(_Doc d) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
int wid, tid;
double normalizedProb;
for (_Word w : pDoc.getWords()) {
tid = w.getTopic();
wid = w.getIndex();
pDoc.m_sstat[tid]--;
pDoc.m_topic_stat[tid]--;
pDoc.m_wordTopic_stat[tid][wid]--;
normalizedProb = 0;
for (tid = 0; tid < number_of_topics; tid++) {
double pWordTopic = parentWordByTopicProb(tid, wid, pDoc);
double pTopicPDoc = parentTopicInDocProb(tid, pDoc);
double pTopicCDoc = parentChildInfluenceProb(tid, pDoc);
m_topicProbCache[tid] = pWordTopic * pTopicPDoc * pTopicCDoc;
normalizedProb += m_topicProbCache[tid];
}
normalizedProb *= m_rand.nextDouble();
for (tid = 0; tid < number_of_topics; tid++) {
normalizedProb -= m_topicProbCache[tid];
if (normalizedProb <= 0)
break;
}
if (tid == number_of_topics)
tid--;
w.setTopic(tid);
pDoc.m_sstat[tid]++;
pDoc.m_topic_stat[tid]++;
pDoc.m_wordTopic_stat[tid][wid]++;
}
}
Aggregations