use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method gammaFuncGradientVal.
public double gammaFuncGradientVal(_ParentDoc4DCM pDoc, double[] gamma, double[] gammaGradient) {
Arrays.fill(gammaGradient, 0);
double funcVal = 0;
double[] expGamma = new double[number_of_topics];
double expGammaSum = 0;
for (int k = 0; k < number_of_topics; k++) {
expGamma[k] = Math.exp(gamma[k]);
expGammaSum += expGamma[k];
}
funcVal -= Utils.lgamma(expGammaSum);
double constantGradient = 0;
for (int k = 0; k < number_of_topics; k++) {
constantGradient += (m_alpha[k] - expGamma[k]) * Utils.trigamma(expGammaSum);
}
for (int k = 0; k < number_of_topics; k++) {
gammaGradient[k] = (m_alpha[k] - expGamma[k]) * Utils.trigamma(expGamma[k]);
funcVal += (m_alpha[k] - expGamma[k]) * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
funcVal += Utils.lgamma(expGamma[k]);
}
_SparseFeature[] fvs = pDoc.getSparse();
for (int n = 0; n < fvs.length; n++) {
int wID = fvs[n].getIndex();
double wVal = fvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
funcVal += pDoc.m_phi[n][k] * wVal * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
gammaGradient[k] += pDoc.m_phi[n][k] * wVal * Utils.trigamma(expGamma[k]);
constantGradient += pDoc.m_phi[n][k] * wVal * Utils.trigamma(expGammaSum);
// gammaGradient[k] -= pDoc.m_phi[n][k]*wVal*Utils.trigamma(expGammaSum);
}
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double piSum = Utils.sumOfArray(cDoc.m_sstat);
_SparseFeature[] cDocFvs = cDoc.getSparse();
for (int n = 0; n < cDocFvs.length; n++) {
int wID = cDocFvs[n].getIndex();
double wVal = cDocFvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
funcVal += cDoc.m_phi[n][k] * wVal * (Utils.digamma(expGamma[k]) - Utils.digamma(expGammaSum));
funcVal -= cDoc.m_phi[n][k] * wVal * (Utils.dotProduct(cDoc.m_sstat, expGamma)) / (piSum * expGammaSum * cDoc.m_zeta);
gammaGradient[k] += cDoc.m_phi[n][k] * wVal * Utils.trigamma(expGamma[k]);
constantGradient += cDoc.m_phi[n][k] * wVal * Utils.trigamma(expGammaSum);
// gammaGradient[k] -= cDoc.m_phi[n][k]*wVal*Utils.trigamma(expGammaSum);
double temp = cDoc.m_sstat[k] * expGammaSum - Utils.dotProduct(cDoc.m_sstat, expGamma);
gammaGradient[k] -= cDoc.m_phi[n][k] * wVal * temp / (piSum * expGammaSum * expGammaSum * cDoc.m_zeta);
}
}
}
for (int k = 0; k < number_of_topics; k++) {
gammaGradient[k] -= constantGradient;
gammaGradient[k] *= expGamma[k];
gammaGradient[k] = 0 - gammaGradient[k];
}
return -funcVal;
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateLambda.
public void updateLambda(_ParentDoc4DCM pDoc) {
_SparseFeature[] fvs = pDoc.getSparse();
int totalWord = 0;
double totalLambda = 0;
for (int k = 0; k < number_of_topics; k++) for (int v = 0; v < vocabulary_size; v++) pDoc.m_lambda_stat[k][v] = m_beta[k][v];
for (int n = 0; n < fvs.length; n++) {
int wID = fvs[n].getIndex();
double wVal = fvs[n].getValue();
totalWord += wVal;
double phiSum = Utils.sumOfArray(pDoc.m_phi[n]);
if (Math.abs(phiSum - 1) > 1) {
System.out.println("inequal to 1\t" + n + phiSum);
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_stat[k][wID] += wVal * pDoc.m_phi[n][k];
if (Double.isNaN(pDoc.m_lambda_stat[k][wID])) {
System.out.println("nan error article\t" + n + " " + wID);
}
}
}
for (_ChildDoc cDoc : pDoc.m_childDocs) {
_SparseFeature[] cFvs = cDoc.getSparse();
for (int n = 0; n < cFvs.length; n++) {
int wID = cFvs[n].getIndex();
double wVal = cFvs[n].getValue();
totalWord += wVal;
double phiSum = Utils.sumOfArray(cDoc.m_phi[n]);
if (Math.abs(phiSum - 1) > 1) {
System.out.println("inequal to 1\t" + n + phiSum);
for (int k = 0; k < number_of_topics; k++) {
System.out.println("\t\t" + cDoc.m_phi[n][k]);
}
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_stat[k][wID] += wVal * cDoc.m_phi[n][k];
if (Double.isNaN(pDoc.m_lambda_stat[k][wID])) {
System.out.println("nan error comment\t" + n + " " + wID);
}
}
}
}
for (int k = 0; k < number_of_topics; k++) {
pDoc.m_lambda_topicStat[k] = Utils.sumOfArray(pDoc.m_lambda_stat[k]);
totalLambda += pDoc.m_lambda_topicStat[k];
}
// System.out.println("total Words in this doc\t"+pDoc.getName()+"\t"+totalWord+"\t"+totalLambda);
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateEta4Child.
public void updateEta4Child(_ParentDoc4DCM pDoc) {
for (_ChildDoc cDoc : pDoc.m_childDocs) {
_SparseFeature[] fvs = cDoc.getSparse();
for (int n = 0; n < fvs.length; n++) {
int wId = fvs[n].getIndex();
double wVal = fvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
cDoc.m_phi[n][k] = Utils.digamma(pDoc.m_sstat[k]) + Utils.digamma(cDoc.m_sstat[k]);
cDoc.m_phi[n][k] += Utils.digamma(pDoc.m_lambda_stat[k][wId]) - Utils.digamma(pDoc.m_lambda_topicStat[k]);
}
double logSum = logSumOfExponentials(cDoc.m_phi[n]);
if (Double.isInfinite(logSum)) {
System.out.println("infinite");
System.out.println("this doc\t" + cDoc.getName() + "\t" + "this word has a total biased probability assignment\t" + m_corpus.getFeature(wId));
}
if (Double.isNaN(logSum)) {
System.out.println("nan");
for (int k = 0; k < number_of_topics; k++) System.out.println("cDoc.m_phi\t" + cDoc.m_phi[n][k]);
}
double phiSum = 0;
for (int k = 0; k < number_of_topics; k++) {
if ((cDoc.m_phi[n][k] - logSum) < -200) {
cDoc.m_phi[n][k] = 1e-20;
} else {
cDoc.m_phi[n][k] = Math.exp(cDoc.m_phi[n][k] - logSum);
}
phiSum += cDoc.m_phi[n][k];
}
if (Math.abs(phiSum - 1) > 1) {
System.out.println("phiSum for comment\t" + phiSum);
for (int k = 0; k < number_of_topics; k++) System.out.println("m_phi\t" + cDoc.m_phi[n][k]);
}
if (Double.isNaN(phiSum)) {
for (int k = 0; k < number_of_topics; k++) {
System.out.println("pDoc.m_sstat[k]\t" + pDoc.m_sstat[k]);
System.out.println("cDoc.m_sstat[k]\t" + cDoc.m_sstat[k]);
System.out.println("pDoc.m_lambda_stat[k][wId]\t" + pDoc.m_lambda_stat[k][wId]);
System.out.println("pDoc.m_lambda_topicStat[k]\t" + pDoc.m_lambda_topicStat[k]);
System.out.println("cDoc.m_phi[n][k]\t" + cDoc.m_phi[n][k]);
}
}
}
}
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class languageModelBaseLine method generateReferenceModelWithXVal.
protected void generateReferenceModelWithXVal() {
m_allWordFrequencyWithXVal = 0;
for (_Doc d : m_corpus.getCollection()) {
if (d instanceof _ParentDoc) {
for (_SparseFeature fv : d.getSparse()) {
int wid = fv.getIndex();
double val = fv.getValue();
m_allWordFrequencyWithXVal += val;
if (m_wordSstat.containsKey(wid)) {
double oldVal = m_wordSstat.get(wid);
m_wordSstat.put(wid, oldVal + val);
} else {
m_wordSstat.put(wid, val);
}
}
} else {
double docLenWithXVal = 0;
for (_Word w : d.getWords()) {
// double xProportion = w.getXProb();
int wid = w.getIndex();
double val = 0;
if (((_ChildDoc) d).m_wordXStat.containsKey(wid)) {
val = ((_ChildDoc) d).m_wordXStat.get(wid);
}
docLenWithXVal += val;
m_allWordFrequencyWithXVal += val;
if (m_wordSstat.containsKey(wid)) {
double oldVal = m_wordSstat.get(wid);
m_wordSstat.put(wid, oldVal + val);
} else {
m_wordSstat.put(wid, val);
}
}
((_ChildDoc) d).setChildDocLenWithXVal(docLenWithXVal);
}
}
for (int wid : m_wordSstat.keySet()) {
double val = m_wordSstat.get(wid);
double prob = val / m_allWordFrequencyWithXVal;
m_wordSstat.put(wid, prob);
}
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class languageModelBaseLine method rankChild4StnByLanguageModel.
protected HashMap<String, Double> rankChild4StnByLanguageModel(_Stn stnObj, _ParentDoc pDoc) {
HashMap<String, Double> childLikelihoodMap = new HashMap<String, Double>();
double smoothingMu = 1000;
for (_ChildDoc cDoc : pDoc.m_childDocs) {
int cDocLen = cDoc.getTotalDocLength();
_SparseFeature[] fv = cDoc.getSparse();
double stnLogLikelihood = 0;
double alphaDoc = smoothingMu / (smoothingMu + cDocLen);
_SparseFeature[] sv = stnObj.getFv();
for (_SparseFeature svWord : sv) {
double featureLikelihood = 0;
int wid = svWord.getIndex();
double stnVal = svWord.getValue();
int featureIndex = Utils.indexOf(fv, wid);
double docVal = 0;
if (featureIndex != -1) {
docVal = fv[featureIndex].getValue();
}
double smoothingProb = (1 - alphaDoc) * docVal / (cDocLen);
smoothingProb += alphaDoc * getReferenceProb(wid);
featureLikelihood = Math.log(smoothingProb);
stnLogLikelihood += stnVal * featureLikelihood;
}
childLikelihoodMap.put(cDoc.getName(), stnLogLikelihood);
}
return childLikelihoodMap;
}
Aggregations