use of structures._ChildDoc4BaseWithPhi_Hard in project IR_Base by Linda-sunshine.
the class ParentChildAnalyzer method loadChildDoc.
public void loadChildDoc(String fileName) {
if (fileName == null || fileName.isEmpty())
return;
JSONObject json = LoadJSON(fileName);
String content = Utils.getJSONValue(json, "content");
String name = Utils.getJSONValue(json, "name");
String parent = Utils.getJSONValue(json, "parent");
String title = Utils.getJSONValue(json, "title");
//
// _ChildDoc4BaseWithPhi d = new _ChildDoc4BaseWithPhi(m_corpus.getSize(),
// name, "", content, 0);
// _ChildDoc4BaseWithPhi_Hard d = new _ChildDoc4BaseWithPhi_Hard(m_corpus.getSize(), name, "", content, 0) ;
// _ChildDoc4ChildPhi d = new _ChildDoc4ChildPhi(m_corpus.getSize(),
// name,
// "", content, 0);
// _ChildDoc4TwoPhi d = new _ChildDoc4TwoPhi(m_corpus.getSize(), name, "", content, 0);
// _ChildDoc4ThreePhi d = new _ChildDoc4ThreePhi(m_corpus.getSize(), name,
// "", content, 0);
// _ChildDoc4OneTopicProportion d = new _ChildDoc4OneTopicProportion(m_corpus.getSize(), name, "", content, 0);
_ChildDoc d = new _ChildDoc(m_corpus.getSize(), name, "", content, 0);
if (parentHashMap.containsKey(parent)) {
if (AnalyzeDoc(d)) {
// this is a valid child document
// if (parentHashMap.containsKey(parent)) {
_ParentDoc pDoc = parentHashMap.get(parent);
d.setParentDoc(pDoc);
pDoc.addChildDoc(d);
} else {
// System.err.format("filtering comments %s!\n", parent);
}
} else {
// System.err.format("[Warning]Missing parent document %s!\n", parent);
}
}
use of structures._ChildDoc4BaseWithPhi_Hard in project IR_Base by Linda-sunshine.
the class ACCTM_CHard method initialize_probability.
@Override
protected void initialize_probability(Collection<_Doc> collection) {
createSpace();
for (int i = 0; i < number_of_topics; i++) Arrays.fill(word_topic_sstat[i], d_beta);
Arrays.fill(m_sstat, d_beta * vocabulary_size);
for (_Doc d : collection) {
if (d instanceof _ParentDoc) {
d.setTopics4Gibbs(number_of_topics, 0);
for (_Stn stnObj : d.getSentences()) stnObj.setTopic(number_of_topics);
} else if (d instanceof _ChildDoc4BaseWithPhi) {
((_ChildDoc4BaseWithPhi_Hard) d).createXSpace(number_of_topics, m_gamma.length, vocabulary_size, d_beta);
((_ChildDoc4BaseWithPhi_Hard) d).setTopics4Gibbs(number_of_topics, 0);
computeMu4Doc((_ChildDoc) d);
}
if (d instanceof _ParentDoc) {
for (_Word w : d.getWords()) {
word_topic_sstat[w.getTopic()][w.getIndex()]++;
m_sstat[w.getTopic()]++;
}
} else if (d instanceof _ChildDoc4BaseWithPhi) {
for (_Word w : d.getWords()) {
int xid = w.getX();
int tid = w.getTopic();
int wid = w.getIndex();
// update global
if (xid == 0) {
word_topic_sstat[tid][wid]++;
m_sstat[tid]++;
}
}
}
}
imposePrior();
m_statisticsNormalized = false;
}
use of structures._ChildDoc4BaseWithPhi_Hard in project IR_Base by Linda-sunshine.
the class ACCTM_CHard method initTest.
@Override
protected void initTest(ArrayList<_Doc> sampleTestSet, _Doc d) {
_ParentDoc pDoc = (_ParentDoc) d;
for (_Stn stnObj : pDoc.getSentences()) {
stnObj.setTopicsVct(number_of_topics);
}
int testLength = (int) (m_testWord4PerplexityProportion * d.getTotalDocLength());
testLength = 0;
pDoc.setTopics4GibbsTest(number_of_topics, 0, testLength);
sampleTestSet.add(pDoc);
pDoc.createSparseVct4Infer();
for (_ChildDoc cDoc : pDoc.m_childDocs) {
testLength = (int) (m_testWord4PerplexityProportion * cDoc.getTotalDocLength());
((_ChildDoc4BaseWithPhi_Hard) cDoc).createXSpace(number_of_topics, m_gamma.length, vocabulary_size, d_beta);
((_ChildDoc4BaseWithPhi_Hard) cDoc).setTopics4GibbsTest(number_of_topics, 0, testLength);
sampleTestSet.add(cDoc);
cDoc.createSparseVct4Infer();
computeTestMu4Doc(cDoc);
}
}
use of structures._ChildDoc4BaseWithPhi_Hard in project IR_Base by Linda-sunshine.
the class ACCTM_CHard method cal_logLikelihood_partial4Child.
@Override
protected double cal_logLikelihood_partial4Child(_Doc d) {
_ChildDoc4BaseWithPhi_Hard cDoc = (_ChildDoc4BaseWithPhi_Hard) d;
double docLogLikelihood = 0.0;
double gammaLen = Utils.sumOfArray(m_gamma);
double cDocXSum = Utils.sumOfArray(cDoc.m_xSstat);
for (_Word w : cDoc.getTestWords()) {
int wid = w.getIndex();
double wordLogLikelihood = 0;
if (Utils.indexOf(cDoc.m_parentDoc.getSparse(), wid) != -1) {
for (int k = 0; k < number_of_topics; k++) {
double wordPerTopicLikelihood = childWordByTopicProb(k, wid) * childTopicInDocProb(k, cDoc);
wordLogLikelihood += wordPerTopicLikelihood;
}
} else {
for (int k = 0; k < number_of_topics; k++) {
double wordPerTopicLikelihood = childWordByTopicProb(k, wid) * childTopicInDocProb(k, cDoc) * childXInDocProb(0, cDoc) / (cDocXSum + gammaLen);
wordLogLikelihood += wordPerTopicLikelihood;
}
double wordPerTopicLikelihood = childLocalWordByTopicProb(wid, cDoc) * childXInDocProb(1, cDoc) / (cDocXSum + gammaLen);
wordLogLikelihood += wordPerTopicLikelihood;
}
if (Math.abs(wordLogLikelihood) < 1e-10) {
System.out.println("wordLoglikelihood\t" + wordLogLikelihood);
wordLogLikelihood += 1e-10;
}
wordLogLikelihood = Math.log(wordLogLikelihood);
docLogLikelihood += wordLogLikelihood;
}
return docLogLikelihood;
}
Aggregations