use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class languageModelBaseLine method rankChild4StnByLanguageModel.
protected HashMap<String, Double> rankChild4StnByLanguageModel(_Stn stnObj, _ParentDoc pDoc) {
HashMap<String, Double> childLikelihoodMap = new HashMap<String, Double>();
double smoothingMu = 1000;
for (_ChildDoc cDoc : pDoc.m_childDocs) {
int cDocLen = cDoc.getTotalDocLength();
_SparseFeature[] fv = cDoc.getSparse();
double stnLogLikelihood = 0;
double alphaDoc = smoothingMu / (smoothingMu + cDocLen);
_SparseFeature[] sv = stnObj.getFv();
for (_SparseFeature svWord : sv) {
double featureLikelihood = 0;
int wid = svWord.getIndex();
double stnVal = svWord.getValue();
int featureIndex = Utils.indexOf(fv, wid);
double docVal = 0;
if (featureIndex != -1) {
docVal = fv[featureIndex].getValue();
}
double smoothingProb = (1 - alphaDoc) * docVal / (cDocLen);
smoothingProb += alphaDoc * getReferenceProb(wid);
featureLikelihood = Math.log(smoothingProb);
stnLogLikelihood += stnVal * featureLikelihood;
}
childLikelihoodMap.put(cDoc.getName(), stnLogLikelihood);
}
return childLikelihoodMap;
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class outputFile method outputFiles.
public static void outputFiles(String filePrefix, _Corpus c) {
try {
String selectedSentencesinParentFile = filePrefix + "/selected_Stn.txt";
String selectedCommentsFile = filePrefix + "/selected_Comments.txt";
String sctmFormatParentFile = filePrefix + "/abagf.AT.txt";
String sctmFormatChildFile = filePrefix + "/cbagf.AT.txt";
String sctmWordFile = filePrefix + "/words.AT.txt";
String stnLengthFile = filePrefix + "/selected_StnLength.txt";
String shortStnFile = filePrefix + "/selected_ShortStn.txt";
String longStnFile = filePrefix + "/selected_LongStn.txt";
if (c.getFeatureSize() != 0) {
PrintWriter wordPW = new PrintWriter(new File(sctmWordFile));
for (int i = 0; i < c.getFeatureSize(); i++) {
String wordName = c.getFeature(i);
wordPW.println(wordName);
}
wordPW.flush();
wordPW.close();
}
PrintWriter stnLengthPW = new PrintWriter(new File(stnLengthFile));
PrintWriter shortParentPW = new PrintWriter(new File(shortStnFile));
PrintWriter longParentPW = new PrintWriter(new File(longStnFile));
PrintWriter parentPW = new PrintWriter(new File(selectedSentencesinParentFile));
PrintWriter childPW = new PrintWriter(new File(selectedCommentsFile));
PrintWriter sctmParentPW = new PrintWriter(new File(sctmFormatParentFile));
PrintWriter sctmChildPW = new PrintWriter(new File(sctmFormatChildFile));
int totoalParentNum = 0;
TreeMap<Integer, _ParentDoc> parentMap = new TreeMap<Integer, _ParentDoc>();
int totalStnNum = 0;
ArrayList<_Doc> m_trainSet = c.getCollection();
ArrayList<Integer> parentNameList = new ArrayList<Integer>();
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc) {
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
totoalParentNum += 1;
String parentName = d.getName();
parentMap.put(Integer.parseInt(parentName), (_ParentDoc) d);
parentNameList.add(Integer.parseInt(parentName));
}
}
ArrayList<Double> parentDocLenList = new ArrayList<Double>();
ArrayList<Double> childDocLenList = new ArrayList<Double>();
double parentDocLenSum = 0;
double childDocLenSum = 0;
for (int parentID : parentMap.keySet()) {
_ParentDoc parentObj = parentMap.get(parentID);
double parentDocLen = parentObj.getTotalDocLength();
parentDocLenSum += parentDocLen;
parentDocLenList.add(parentDocLen);
for (_ChildDoc cDoc : parentObj.m_childDocs) {
double childDocLen = cDoc.getTotalDocLength();
childDocLenList.add(childDocLen);
childDocLenSum += childDocLen;
}
_Stn[] sentenceArray = parentObj.getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
stnLengthPW.println(stnObj.getLength());
// if(stnObj==null)
// continue;
// selectedStn += 1;
}
totalStnNum += selectedStn;
parentPW.print(parentID + "\t" + selectedStn + "\t");
shortParentPW.print(parentID + "\t");
longParentPW.print(parentID + "\t");
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
if (stnObj.getLength() < 15)
shortParentPW.print((stnObj.getIndex() + 1) + "\t");
else
longParentPW.print((stnObj.getIndex() + 1) + "\t");
parentPW.print((stnObj.getIndex() + 1) + "\t");
}
parentPW.println();
longParentPW.println();
shortParentPW.println();
}
System.out.println("longest child\t" + Collections.max(childDocLenList));
System.out.println("shortest child\t" + Collections.min(childDocLenList));
System.out.println("parent doc len\t" + parentDocLenSum / parentDocLenList.size());
System.out.println("child doc len\t" + childDocLenSum / childDocLenList.size());
parentPW.flush();
parentPW.close();
stnLengthPW.flush();
stnLengthPW.close();
shortParentPW.flush();
shortParentPW.close();
longParentPW.flush();
longParentPW.close();
sctmParentPW.println(totoalParentNum);
sctmChildPW.println(totoalParentNum);
System.out.println("stnNum" + totalStnNum);
for (int parentID : parentMap.keySet()) {
_ParentDoc d = parentMap.get(parentID);
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
_Stn[] sentenceArray = (d).getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
}
sctmParentPW.println(selectedStn);
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
_SparseFeature[] sv = stnObj.getFv();
sctmParentPW.print((int) stnObj.getLength() + "\t");
for (int j = 0; j < sv.length; j++) {
int index = sv[j].getIndex();
double value = sv[j].getValue();
for (int v = 0; v < value; v++) sctmParentPW.print(index + "\t");
}
sctmParentPW.println();
}
ArrayList<_ChildDoc> childDocs = ((_ParentDoc) d).m_childDocs;
sctmChildPW.println(childDocs.size());
String parentName = d.getName();
TreeMap<Integer, _ChildDoc> childMap = new TreeMap<Integer, _ChildDoc>();
for (_ChildDoc cDoc : childDocs) {
String childName = cDoc.getName();
int childID = Integer.parseInt(childName.replace(parentName + "_", ""));
childMap.put(childID, cDoc);
}
childPW.print(parentName + "\t");
for (int t : childMap.keySet()) {
_ChildDoc cDoc = childMap.get(t);
sctmChildPW.print((int) cDoc.getTotalDocLength() + "\t");
childPW.print(cDoc.getName() + "\t");
// System.out.println(cDoc.getName() + "\t");
_SparseFeature[] fv = cDoc.getSparse();
for (int j = 0; j < fv.length; j++) {
int index = fv[j].getIndex();
double value = fv[j].getValue();
for (int v = 0; v < value; v++) {
sctmChildPW.print(index + "\t");
}
}
sctmChildPW.println();
}
childPW.println();
}
sctmParentPW.flush();
sctmParentPW.close();
sctmChildPW.flush();
sctmChildPW.close();
childPW.flush();
childPW.close();
} catch (Exception e) {
e.printStackTrace();
}
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class ACCTM method sampleInParentDoc.
protected void sampleInParentDoc(_Doc d) {
_ParentDoc pDoc = (_ParentDoc) d;
int wid, tid;
double normalizedProb;
for (_Word w : pDoc.getWords()) {
wid = w.getIndex();
tid = w.getTopic();
pDoc.m_sstat[tid]--;
if (m_collectCorpusStats) {
word_topic_sstat[tid][wid]--;
m_sstat[tid]--;
}
normalizedProb = 0;
for (tid = 0; tid < number_of_topics; tid++) {
double pWordTopic = parentWordByTopicProb(tid, wid);
double pTopicPDoc = parentTopicInDocProb(tid, pDoc);
double pTopicCDoc = parentChildInfluenceProb(tid, pDoc);
m_topicProbCache[tid] = pWordTopic * pTopicPDoc * pTopicCDoc;
normalizedProb += m_topicProbCache[tid];
}
normalizedProb *= m_rand.nextDouble();
for (tid = 0; tid < number_of_topics; tid++) {
normalizedProb -= m_topicProbCache[tid];
if (normalizedProb <= 0)
break;
}
if (tid == number_of_topics)
tid--;
w.setTopic(tid);
pDoc.m_sstat[tid]++;
if (m_collectCorpusStats) {
word_topic_sstat[tid][wid]++;
m_sstat[tid]++;
}
}
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class ACCTM method computeMu4Doc.
protected void computeMu4Doc(_ChildDoc d) {
_ParentDoc tempParent = d.m_parentDoc;
double mu = Utils.cosine(tempParent.getSparse(), d.getSparse());
d.setMu(mu);
}
use of structures._ParentDoc in project IR_Base by Linda-sunshine.
the class ACCTM method parentChildInfluenceProb.
protected double parentChildInfluenceProb(int tid, _ParentDoc pDoc) {
double term = 1.0;
if (tid == 0)
return term;
double topicSum = Utils.sumOfArray(pDoc.m_sstat);
for (_ChildDoc cDoc : pDoc.m_childDocs) {
double muDp = cDoc.getMu() / topicSum;
term *= gammaFuncRatio((int) cDoc.m_sstat[tid], muDp, d_alpha + pDoc.m_sstat[tid] * muDp) / gammaFuncRatio((int) cDoc.m_sstat[0], muDp, d_alpha + pDoc.m_sstat[0] * muDp);
}
return term;
}
Aggregations