use of structures._ChildDoc in project IR_Base by Linda-sunshine.
the class weightedCorrespondenceModel method updateEta4Child.
public void updateEta4Child(_ParentDoc4DCM pDoc) {
for (_ChildDoc cDoc : pDoc.m_childDocs) {
_SparseFeature[] fvs = cDoc.getSparse();
for (int n = 0; n < fvs.length; n++) {
int wId = fvs[n].getIndex();
double wVal = fvs[n].getValue();
for (int k = 0; k < number_of_topics; k++) {
cDoc.m_phi[n][k] = Utils.digamma(pDoc.m_sstat[k]) + Utils.digamma(cDoc.m_sstat[k]);
cDoc.m_phi[n][k] += Utils.digamma(pDoc.m_lambda_stat[k][wId]) - Utils.digamma(pDoc.m_lambda_topicStat[k]);
}
double logSum = logSumOfExponentials(cDoc.m_phi[n]);
if (Double.isInfinite(logSum)) {
System.out.println("infinite");
System.out.println("this doc\t" + cDoc.getName() + "\t" + "this word has a total biased probability assignment\t" + m_corpus.getFeature(wId));
}
if (Double.isNaN(logSum)) {
System.out.println("nan");
for (int k = 0; k < number_of_topics; k++) System.out.println("cDoc.m_phi\t" + cDoc.m_phi[n][k]);
}
double phiSum = 0;
for (int k = 0; k < number_of_topics; k++) {
if ((cDoc.m_phi[n][k] - logSum) < -200) {
cDoc.m_phi[n][k] = 1e-20;
} else {
cDoc.m_phi[n][k] = Math.exp(cDoc.m_phi[n][k] - logSum);
}
phiSum += cDoc.m_phi[n][k];
}
if (Math.abs(phiSum - 1) > 1) {
System.out.println("phiSum for comment\t" + phiSum);
for (int k = 0; k < number_of_topics; k++) System.out.println("m_phi\t" + cDoc.m_phi[n][k]);
}
if (Double.isNaN(phiSum)) {
for (int k = 0; k < number_of_topics; k++) {
System.out.println("pDoc.m_sstat[k]\t" + pDoc.m_sstat[k]);
System.out.println("cDoc.m_sstat[k]\t" + cDoc.m_sstat[k]);
System.out.println("pDoc.m_lambda_stat[k][wId]\t" + pDoc.m_lambda_stat[k][wId]);
System.out.println("pDoc.m_lambda_topicStat[k]\t" + pDoc.m_lambda_topicStat[k]);
System.out.println("cDoc.m_phi[n][k]\t" + cDoc.m_phi[n][k]);
}
}
}
}
}
use of structures._ChildDoc in project IR_Base by Linda-sunshine.
the class languageModelBaseLine method generateReferenceModelWithXVal.
protected void generateReferenceModelWithXVal() {
m_allWordFrequencyWithXVal = 0;
for (_Doc d : m_corpus.getCollection()) {
if (d instanceof _ParentDoc) {
for (_SparseFeature fv : d.getSparse()) {
int wid = fv.getIndex();
double val = fv.getValue();
m_allWordFrequencyWithXVal += val;
if (m_wordSstat.containsKey(wid)) {
double oldVal = m_wordSstat.get(wid);
m_wordSstat.put(wid, oldVal + val);
} else {
m_wordSstat.put(wid, val);
}
}
} else {
double docLenWithXVal = 0;
for (_Word w : d.getWords()) {
// double xProportion = w.getXProb();
int wid = w.getIndex();
double val = 0;
if (((_ChildDoc) d).m_wordXStat.containsKey(wid)) {
val = ((_ChildDoc) d).m_wordXStat.get(wid);
}
docLenWithXVal += val;
m_allWordFrequencyWithXVal += val;
if (m_wordSstat.containsKey(wid)) {
double oldVal = m_wordSstat.get(wid);
m_wordSstat.put(wid, oldVal + val);
} else {
m_wordSstat.put(wid, val);
}
}
((_ChildDoc) d).setChildDocLenWithXVal(docLenWithXVal);
}
}
for (int wid : m_wordSstat.keySet()) {
double val = m_wordSstat.get(wid);
double prob = val / m_allWordFrequencyWithXVal;
m_wordSstat.put(wid, prob);
}
}
use of structures._ChildDoc in project IR_Base by Linda-sunshine.
the class languageModelBaseLine method rankChild4StnByLanguageModel.
protected HashMap<String, Double> rankChild4StnByLanguageModel(_Stn stnObj, _ParentDoc pDoc) {
HashMap<String, Double> childLikelihoodMap = new HashMap<String, Double>();
double smoothingMu = 1000;
for (_ChildDoc cDoc : pDoc.m_childDocs) {
int cDocLen = cDoc.getTotalDocLength();
_SparseFeature[] fv = cDoc.getSparse();
double stnLogLikelihood = 0;
double alphaDoc = smoothingMu / (smoothingMu + cDocLen);
_SparseFeature[] sv = stnObj.getFv();
for (_SparseFeature svWord : sv) {
double featureLikelihood = 0;
int wid = svWord.getIndex();
double stnVal = svWord.getValue();
int featureIndex = Utils.indexOf(fv, wid);
double docVal = 0;
if (featureIndex != -1) {
docVal = fv[featureIndex].getValue();
}
double smoothingProb = (1 - alphaDoc) * docVal / (cDocLen);
smoothingProb += alphaDoc * getReferenceProb(wid);
featureLikelihood = Math.log(smoothingProb);
stnLogLikelihood += stnVal * featureLikelihood;
}
childLikelihoodMap.put(cDoc.getName(), stnLogLikelihood);
}
return childLikelihoodMap;
}
use of structures._ChildDoc in project IR_Base by Linda-sunshine.
the class outputFile method outputFiles.
public static void outputFiles(String filePrefix, _Corpus c) {
try {
String selectedSentencesinParentFile = filePrefix + "/selected_Stn.txt";
String selectedCommentsFile = filePrefix + "/selected_Comments.txt";
String sctmFormatParentFile = filePrefix + "/abagf.AT.txt";
String sctmFormatChildFile = filePrefix + "/cbagf.AT.txt";
String sctmWordFile = filePrefix + "/words.AT.txt";
String stnLengthFile = filePrefix + "/selected_StnLength.txt";
String shortStnFile = filePrefix + "/selected_ShortStn.txt";
String longStnFile = filePrefix + "/selected_LongStn.txt";
if (c.getFeatureSize() != 0) {
PrintWriter wordPW = new PrintWriter(new File(sctmWordFile));
for (int i = 0; i < c.getFeatureSize(); i++) {
String wordName = c.getFeature(i);
wordPW.println(wordName);
}
wordPW.flush();
wordPW.close();
}
PrintWriter stnLengthPW = new PrintWriter(new File(stnLengthFile));
PrintWriter shortParentPW = new PrintWriter(new File(shortStnFile));
PrintWriter longParentPW = new PrintWriter(new File(longStnFile));
PrintWriter parentPW = new PrintWriter(new File(selectedSentencesinParentFile));
PrintWriter childPW = new PrintWriter(new File(selectedCommentsFile));
PrintWriter sctmParentPW = new PrintWriter(new File(sctmFormatParentFile));
PrintWriter sctmChildPW = new PrintWriter(new File(sctmFormatChildFile));
int totoalParentNum = 0;
TreeMap<Integer, _ParentDoc> parentMap = new TreeMap<Integer, _ParentDoc>();
int totalStnNum = 0;
ArrayList<_Doc> m_trainSet = c.getCollection();
ArrayList<Integer> parentNameList = new ArrayList<Integer>();
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc) {
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
totoalParentNum += 1;
String parentName = d.getName();
parentMap.put(Integer.parseInt(parentName), (_ParentDoc) d);
parentNameList.add(Integer.parseInt(parentName));
}
}
ArrayList<Double> parentDocLenList = new ArrayList<Double>();
ArrayList<Double> childDocLenList = new ArrayList<Double>();
double parentDocLenSum = 0;
double childDocLenSum = 0;
for (int parentID : parentMap.keySet()) {
_ParentDoc parentObj = parentMap.get(parentID);
double parentDocLen = parentObj.getTotalDocLength();
parentDocLenSum += parentDocLen;
parentDocLenList.add(parentDocLen);
for (_ChildDoc cDoc : parentObj.m_childDocs) {
double childDocLen = cDoc.getTotalDocLength();
childDocLenList.add(childDocLen);
childDocLenSum += childDocLen;
}
_Stn[] sentenceArray = parentObj.getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
stnLengthPW.println(stnObj.getLength());
// if(stnObj==null)
// continue;
// selectedStn += 1;
}
totalStnNum += selectedStn;
parentPW.print(parentID + "\t" + selectedStn + "\t");
shortParentPW.print(parentID + "\t");
longParentPW.print(parentID + "\t");
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
if (stnObj.getLength() < 15)
shortParentPW.print((stnObj.getIndex() + 1) + "\t");
else
longParentPW.print((stnObj.getIndex() + 1) + "\t");
parentPW.print((stnObj.getIndex() + 1) + "\t");
}
parentPW.println();
longParentPW.println();
shortParentPW.println();
}
System.out.println("longest child\t" + Collections.max(childDocLenList));
System.out.println("shortest child\t" + Collections.min(childDocLenList));
System.out.println("parent doc len\t" + parentDocLenSum / parentDocLenList.size());
System.out.println("child doc len\t" + childDocLenSum / childDocLenList.size());
parentPW.flush();
parentPW.close();
stnLengthPW.flush();
stnLengthPW.close();
shortParentPW.flush();
shortParentPW.close();
longParentPW.flush();
longParentPW.close();
sctmParentPW.println(totoalParentNum);
sctmChildPW.println(totoalParentNum);
System.out.println("stnNum" + totalStnNum);
for (int parentID : parentMap.keySet()) {
_ParentDoc d = parentMap.get(parentID);
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
_Stn[] sentenceArray = (d).getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
}
sctmParentPW.println(selectedStn);
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
_SparseFeature[] sv = stnObj.getFv();
sctmParentPW.print((int) stnObj.getLength() + "\t");
for (int j = 0; j < sv.length; j++) {
int index = sv[j].getIndex();
double value = sv[j].getValue();
for (int v = 0; v < value; v++) sctmParentPW.print(index + "\t");
}
sctmParentPW.println();
}
ArrayList<_ChildDoc> childDocs = ((_ParentDoc) d).m_childDocs;
sctmChildPW.println(childDocs.size());
String parentName = d.getName();
TreeMap<Integer, _ChildDoc> childMap = new TreeMap<Integer, _ChildDoc>();
for (_ChildDoc cDoc : childDocs) {
String childName = cDoc.getName();
int childID = Integer.parseInt(childName.replace(parentName + "_", ""));
childMap.put(childID, cDoc);
}
childPW.print(parentName + "\t");
for (int t : childMap.keySet()) {
_ChildDoc cDoc = childMap.get(t);
sctmChildPW.print((int) cDoc.getTotalDocLength() + "\t");
childPW.print(cDoc.getName() + "\t");
// System.out.println(cDoc.getName() + "\t");
_SparseFeature[] fv = cDoc.getSparse();
for (int j = 0; j < fv.length; j++) {
int index = fv[j].getIndex();
double value = fv[j].getValue();
for (int v = 0; v < value; v++) {
sctmChildPW.print(index + "\t");
}
}
sctmChildPW.println();
}
childPW.println();
}
sctmParentPW.flush();
sctmParentPW.close();
sctmChildPW.flush();
sctmChildPW.close();
childPW.flush();
childPW.close();
} catch (Exception e) {
e.printStackTrace();
}
}
use of structures._ChildDoc in project IR_Base by Linda-sunshine.
the class ACCTM method computeMu4Doc.
protected void computeMu4Doc(_ChildDoc d) {
_ParentDoc tempParent = d.m_parentDoc;
double mu = Utils.cosine(tempParent.getSparse(), d.getSparse());
d.setMu(mu);
}
Aggregations