use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class DCMLDA4AC_test method printWordTopicDistribution.
protected void printWordTopicDistribution(_Doc d, File wordTopicDistributionFolder, int k) {
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
String wordTopicDistributionFile = pDoc.getName() + ".txt";
try {
PrintWriter pw = new PrintWriter(new File(wordTopicDistributionFolder, wordTopicDistributionFile));
for (int i = 0; i < number_of_topics; i++) {
MyPriorityQueue<_RankItem> fVector = new MyPriorityQueue<_RankItem>(k);
for (int v = 0; v < vocabulary_size; v++) {
String featureName = m_corpus.getFeature(v);
double wordProb = pDoc.m_wordTopic_prob[i][v];
_RankItem ri = new _RankItem(featureName, wordProb);
fVector.add(ri);
}
pw.format("Topic %d(%.5f):\t", i, d.m_topics[i]);
for (_RankItem it : fVector) pw.format("%s(%.5f)\t", it.m_name, m_logSpace ? Math.exp(it.m_value) : it.m_value);
pw.write("\n");
}
pw.flush();
pw.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateZeta4Child.
public void updateZeta4Child(_ParentDoc4DCM pDoc) {
double totalGamma4Parent = Utils.sumOfArray(pDoc.m_sstat);
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double totalPi4Child = Utils.sumOfArray(cDoc.m_sstat);
double gammaPiInnerProd = Utils.dotProduct(pDoc.m_sstat, cDoc.m_sstat);
cDoc.m_zeta = gammaPiInnerProd / (totalGamma4Parent * totalPi4Child);
}
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method gammaFuncGradientVal.
public double gammaFuncGradientVal(_ParentDoc4DCM pDoc, double[] gamma, double[] gammaGradient) {
Arrays.fill(gammaGradient, 0);
double funcVal = 0;
double[] expGamma = new double[number_of_topics];
double expGammaSum = 0;
for (int k = 0; k < number_of_topics; k++) {
expGamma[k] = Math.exp(gamma[k]);
expGammaSum += expGamma[k];
}
funcVal -= Utils.lgamma(expGammaSum);
double constantGradient = 0;
for (int k = 0; k < number_of_topics; k++) {
constantGradient += (m_alpha[k] - expGamma[k]) * Utils.trigamma(expGammaSum);
}
for (int k = 0; k < number_of_topics; k++) {
gammaGradient[k] = (m_alpha[k] - expGamma[k]) * Utils.trigamma(expGamma[k]);
funcVal += (m_alpha[k] - expGamma[k]) * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
funcVal += Utils.lgamma(expGamma[k]);
}
_SparseFeature[] fvs = pDoc.getSparse();
for (int n = 0; n < fvs.length; n++) {
int wID = fvs[n].getIndex();
double wVal = fvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
funcVal += pDoc.m_phi[n][k] * wVal * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
gammaGradient[k] += pDoc.m_phi[n][k] * wVal * Utils.trigamma(expGamma[k]);
constantGradient += pDoc.m_phi[n][k] * wVal * Utils.trigamma(expGammaSum);
// gammaGradient[k] -= pDoc.m_phi[n][k]*wVal*Utils.trigamma(expGammaSum);
}
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double piSum = Utils.sumOfArray(cDoc.m_sstat);
_SparseFeature[] cDocFvs = cDoc.getSparse();
for (int n = 0; n < cDocFvs.length; n++) {
int wID = cDocFvs[n].getIndex();
double wVal = cDocFvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
funcVal += cDoc.m_phi[n][k] * wVal * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
funcVal -= cDoc.m_phi[n][k] * wVal * (Utils.dotProduct(cDoc.m_sstat, expGamma)) / (piSum * expGammaSum * cDoc.m_zeta);
gammaGradient[k] += cDoc.m_phi[n][k] * wVal * Utils.trigamma(expGamma[k]);
constantGradient += cDoc.m_phi[n][k] * wVal * Utils.trigamma(expGammaSum);
// gammaGradient[k] -= cDoc.m_phi[n][k]*wVal*Utils.trigamma(expGammaSum);
double temp = cDoc.m_sstat[k] * expGammaSum - Utils.dotProduct(cDoc.m_sstat, expGamma);
gammaGradient[k] -= cDoc.m_phi[n][k] * wVal * temp / (piSum * expGammaSum * expGammaSum * cDoc.m_zeta);
}
}
}
for (int k = 0; k < number_of_topics; k++) {
gammaGradient[k] -= constantGradient;
gammaGradient[k] *= expGamma[k];
gammaGradient[k] = 0 - gammaGradient[k];
}
return -funcVal;
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method calculate_E_step.
@Override
public double calculate_E_step(_Doc d) {
if (d instanceof _ChildDoc)
return 0;
_ParentDoc4DCM pDoc = (_ParentDoc4DCM) d;
double last = 1;
if (m_varConverge > 0)
last = calculate_log_likelihood(pDoc);
double current = last, converge, logSum, wVal;
int iter = 0, wID;
do {
updateEta4Parent(pDoc);
updateGamma4Parent(pDoc);
// updateLambda(pDoc);
updatePi4Child(pDoc);
updateZeta4Child(pDoc);
updateEta4Child(pDoc);
updateLambda(pDoc);
if (m_varConverge > 0) {
current = calculate_log_likelihood(pDoc);
converge = Math.abs((current - last) / last);
last = current;
if (converge < m_varConverge)
break;
}
} while (++iter < m_varMaxIter);
collectStats(pDoc);
return current;
}
use of structures._ParentDoc4DCM in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateLambda.
public void updateLambda(_ParentDoc4DCM pDoc) {
_SparseFeature[] fvs = pDoc.getSparse();
int totalWord = 0;
double totalLambda = 0;
for (int k = 0; k < number_of_topics; k++) for (int v = 0; v < vocabulary_size; v++) pDoc.m_lambda_stat[k][v] = m_beta[k][v];
for (int n = 0; n < fvs.length; n++) {
int wID = fvs[n].getIndex();
double wVal = fvs[n].getValue();
totalWord += wVal;
double phiSum = Utils.sumOfArray(pDoc.m_phi[n]);
if (Math.abs(phiSum - 1) > 1) {
System.out.println("inequal to 1\t" + n + phiSum);
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_stat[k][wID] += wVal * pDoc.m_phi[n][k];
if (Double.isNaN(pDoc.m_lambda_stat[k][wID])) {
System.out.println("nan error article\t" + n + " " + wID);
}
}
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
_SparseFeature[] cFvs = cDoc.getSparse();
for (int n = 0; n < cFvs.length; n++) {
int wID = cFvs[n].getIndex();
double wVal = cFvs[n].getValue();
totalWord += wVal;
double phiSum = Utils.sumOfArray(cDoc.m_phi[n]);
if (Math.abs(phiSum - 1) > 1) {
System.out.println("inequal to 1\t" + n + phiSum);
for (int k = 0; k < number_of_topics; k++) {
System.out.println("\t\t" + cDoc.m_phi[n][k]);
}
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_stat[k][wID] += wVal * cDoc.m_phi[n][k];
if (Double.isNaN(pDoc.m_lambda_stat[k][wID])) {
System.out.println("nan error comment\t" + n + " " + wID);
}
}
}
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_topicStat[k] = Utils.sumOfArray(pDoc.m_lambda_stat[k]);
totalLambda += pDoc.m_lambda_topicStat[k];
}
// System.out.println("total Words in this doc\t"+pDoc.getName()+"\t"+totalWord+"\t"+totalLambda);
}
Aggregations