use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class HTMM method accPhiStat.
// probabilities of topic assignment
void accPhiStat(_Doc d) {
double prob;
for (int t = 0; t < d.getSenetenceSize(); t++) {
_Stn s = d.getSentence(t);
for (_SparseFeature f : s.getFv()) {
int wid = f.getIndex();
// frequency
double v = f.getValue();
for (int i = 0; i < this.number_of_topics; i++) {
prob = this.p_dwzpsi[t][i];
for (int j = 1; j < this.constant; j++) prob += this.p_dwzpsi[t][i + j * this.number_of_topics];
this.word_topic_sstat[i][wid] += v * prob;
}
}
}
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class HTMM method docSummary.
public void docSummary(String[] productList) {
for (String prodID : productList) {
for (int i = 0; i < this.number_of_topics; i++) {
// top three sentences per topic per product
MyPriorityQueue<_RankItem> stnQueue = new MyPriorityQueue<_RankItem>(3);
for (_Doc d : m_trainSet) {
if (d.getItemID().equalsIgnoreCase(prodID)) {
for (int j = 0; j < d.getSenetenceSize(); j++) {
_Stn sentence = d.getSentence(j);
double prob = d.m_topics[i];
for (_SparseFeature f : sentence.getFv()) prob += f.getValue() * topic_term_probabilty[i][f.getIndex()];
prob /= sentence.getLength();
stnQueue.add(new _RankItem(sentence.getRawSentence(), prob));
}
}
}
System.out.format("Product: %s, Topic: %d\n", prodID, i);
summaryWriter.format("Product: %s, Topic: %d\n", prodID, i);
for (_RankItem it : stnQueue) {
System.out.format("%s\t%.3f\n", it.m_name, it.m_value);
summaryWriter.format("%s\t%.3f\n", it.m_name, it.m_value);
}
}
}
summaryWriter.flush();
summaryWriter.close();
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class outputFile method outputFiles.
public static void outputFiles(String filePrefix, _Corpus c) {
try {
String selectedSentencesinParentFile = filePrefix + "/selected_Stn.txt";
String selectedCommentsFile = filePrefix + "/selected_Comments.txt";
String sctmFormatParentFile = filePrefix + "/abagf.AT.txt";
String sctmFormatChildFile = filePrefix + "/cbagf.AT.txt";
String sctmWordFile = filePrefix + "/words.AT.txt";
String stnLengthFile = filePrefix + "/selected_StnLength.txt";
String shortStnFile = filePrefix + "/selected_ShortStn.txt";
String longStnFile = filePrefix + "/selected_LongStn.txt";
if (c.getFeatureSize() != 0) {
PrintWriter wordPW = new PrintWriter(new File(sctmWordFile));
for (int i = 0; i < c.getFeatureSize(); i++) {
String wordName = c.getFeature(i);
wordPW.println(wordName);
}
wordPW.flush();
wordPW.close();
}
PrintWriter stnLengthPW = new PrintWriter(new File(stnLengthFile));
PrintWriter shortParentPW = new PrintWriter(new File(shortStnFile));
PrintWriter longParentPW = new PrintWriter(new File(longStnFile));
PrintWriter parentPW = new PrintWriter(new File(selectedSentencesinParentFile));
PrintWriter childPW = new PrintWriter(new File(selectedCommentsFile));
PrintWriter sctmParentPW = new PrintWriter(new File(sctmFormatParentFile));
PrintWriter sctmChildPW = new PrintWriter(new File(sctmFormatChildFile));
int totoalParentNum = 0;
TreeMap<Integer, _ParentDoc> parentMap = new TreeMap<Integer, _ParentDoc>();
int totalStnNum = 0;
ArrayList<_Doc> m_trainSet = c.getCollection();
ArrayList<Integer> parentNameList = new ArrayList<Integer>();
for (_Doc d : m_trainSet) {
if (d instanceof _ParentDoc) {
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
totoalParentNum += 1;
String parentName = d.getName();
parentMap.put(Integer.parseInt(parentName), (_ParentDoc) d);
parentNameList.add(Integer.parseInt(parentName));
}
}
ArrayList<Double> parentDocLenList = new ArrayList<Double>();
ArrayList<Double> childDocLenList = new ArrayList<Double>();
double parentDocLenSum = 0;
double childDocLenSum = 0;
for (int parentID : parentMap.keySet()) {
_ParentDoc parentObj = parentMap.get(parentID);
double parentDocLen = parentObj.getTotalDocLength();
parentDocLenSum += parentDocLen;
parentDocLenList.add(parentDocLen);
for (_ChildDoc cDoc : parentObj.m_childDocs) {
double childDocLen = cDoc.getTotalDocLength();
childDocLenList.add(childDocLen);
childDocLenSum += childDocLen;
}
_Stn[] sentenceArray = parentObj.getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
stnLengthPW.println(stnObj.getLength());
// if(stnObj==null)
// continue;
// selectedStn += 1;
}
totalStnNum += selectedStn;
parentPW.print(parentID + "\t" + selectedStn + "\t");
shortParentPW.print(parentID + "\t");
longParentPW.print(parentID + "\t");
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
if (stnObj.getLength() < 15)
shortParentPW.print((stnObj.getIndex() + 1) + "\t");
else
longParentPW.print((stnObj.getIndex() + 1) + "\t");
parentPW.print((stnObj.getIndex() + 1) + "\t");
}
parentPW.println();
longParentPW.println();
shortParentPW.println();
}
System.out.println("longest child\t" + Collections.max(childDocLenList));
System.out.println("shortest child\t" + Collections.min(childDocLenList));
System.out.println("parent doc len\t" + parentDocLenSum / parentDocLenList.size());
System.out.println("child doc len\t" + childDocLenSum / childDocLenList.size());
parentPW.flush();
parentPW.close();
stnLengthPW.flush();
stnLengthPW.close();
shortParentPW.flush();
shortParentPW.close();
longParentPW.flush();
longParentPW.close();
sctmParentPW.println(totoalParentNum);
sctmChildPW.println(totoalParentNum);
System.out.println("stnNum" + totalStnNum);
for (int parentID : parentMap.keySet()) {
_ParentDoc d = parentMap.get(parentID);
// HashMap<Integer, _Stn> stnMap = ((_ParentDoc)
// d).m_sentenceMap;
_Stn[] sentenceArray = (d).getSentences();
int selectedStn = 0;
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
selectedStn += 1;
}
sctmParentPW.println(selectedStn);
for (int i = 0; i < sentenceArray.length; i++) {
_Stn stnObj = sentenceArray[i];
if (stnObj == null)
continue;
_SparseFeature[] sv = stnObj.getFv();
sctmParentPW.print((int) stnObj.getLength() + "\t");
for (int j = 0; j < sv.length; j++) {
int index = sv[j].getIndex();
double value = sv[j].getValue();
for (int v = 0; v < value; v++) sctmParentPW.print(index + "\t");
}
sctmParentPW.println();
}
ArrayList<_ChildDoc> childDocs = ((_ParentDoc) d).m_childDocs;
sctmChildPW.println(childDocs.size());
String parentName = d.getName();
TreeMap<Integer, _ChildDoc> childMap = new TreeMap<Integer, _ChildDoc>();
for (_ChildDoc cDoc : childDocs) {
String childName = cDoc.getName();
int childID = Integer.parseInt(childName.replace(parentName + "_", ""));
childMap.put(childID, cDoc);
}
childPW.print(parentName + "\t");
for (int t : childMap.keySet()) {
_ChildDoc cDoc = childMap.get(t);
sctmChildPW.print((int) cDoc.getTotalDocLength() + "\t");
childPW.print(cDoc.getName() + "\t");
// System.out.println(cDoc.getName() + "\t");
_SparseFeature[] fv = cDoc.getSparse();
for (int j = 0; j < fv.length; j++) {
int index = fv[j].getIndex();
double value = fv[j].getValue();
for (int v = 0; v < value; v++) {
sctmChildPW.print(index + "\t");
}
}
sctmChildPW.println();
}
childPW.println();
}
sctmParentPW.flush();
sctmParentPW.close();
sctmChildPW.flush();
sctmChildPW.close();
childPW.flush();
childPW.close();
} catch (Exception e) {
e.printStackTrace();
}
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class Utils method sumOfFeaturesL2.
// L2 normalization: fsValue/sqrt(sum of fsValue*fsValue)
public static double sumOfFeaturesL2(_SparseFeature[] fs) {
if (fs == null)
return 0;
double sum = 0;
for (_SparseFeature feature : fs) {
double value = feature.getValue();
sum += value * value;
}
return Math.sqrt(sum);
}
use of structures._SparseFeature in project IR_Base by Linda-sunshine.
the class Utils method RBF.
public static double RBF(_SparseFeature[] spVct1, _SparseFeature[] spVct2, double delta) {
if (spVct1.length == 0 || spVct2.length == 0)
return 0;
else {
double similarity = 0;
int p1 = 0, p2 = 0;
while (p1 < spVct1.length && p2 < spVct2.length) {
_SparseFeature t1 = spVct1[p1];
_SparseFeature t2 = spVct2[p2];
if (t1.getIndex() == t2.getIndex()) {
similarity += (t1.getValue() - t2.getValue()) * (t1.getValue() - t2.getValue());
p1++;
p2++;
} else if (t1.getIndex() > t2.getIndex())
p2++;
else
p1++;
}
return Math.exp(-similarity / delta);
}
}
Aggregations