use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class asyncCoRegLR method train.
// this is online training in each individual user
@Override
public double train() {
double gNorm, gNormOld = Double.MAX_VALUE;
int updateCount = 0;
int predL, trueL;
_Review doc;
_PerformanceStat perfStat;
_CoRegLRAdaptStruct user;
initLBFGS();
init();
for (int t = 0; t < m_userOrder.length; t++) {
user = (_CoRegLRAdaptStruct) m_userList.get(m_userOrder[t]);
if (user.hasNextAdaptationIns()) {
// test the latest model
if (m_testmode != TestMode.TM_batch && (doc = user.getLatestTestIns()) != null) {
perfStat = user.getPerfStat();
predL = predict(doc, user);
trueL = doc.getYLabel();
perfStat.addOnePredResult(predL, trueL);
}
// in batch mode we will not accumulate the performance during adaptation
// prepare to adapt: initialize gradient
Arrays.fill(m_g, 0);
calculateGradients(user);
gNorm = gradientTest(user);
if (m_displayLv == 1) {
if (gNorm < gNormOld)
System.out.print("o");
else
System.out.print("x");
}
// gradient descent
gradientDescent(user, m_initStepSize, 1.0);
gNormOld = gNorm;
if (m_displayLv > 0 && ++updateCount % 100 == 0)
System.out.println();
}
}
if (m_displayLv > 0)
System.out.println();
setPersonalizedModel();
// we do not evaluate function value
return 0;
}
use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class asyncMTRegLR method train.
@Override
public double train() {
double gNorm, gNormOld = Double.MAX_VALUE;
int predL, trueL;
_Review doc;
_AdaptStruct user;
_PerformanceStat perfStat;
double val;
initLBFGS();
init();
try {
m_writer = new PrintWriter(new File(String.format("train_online_MTRegLR.txt")));
for (int i = 0; i < m_userList.size(); i++) {
user = m_userList.get(i);
while (user.hasNextAdaptationIns()) {
// test the latest model before model adaptation
if (m_testmode != TestMode.TM_batch && (doc = user.getLatestTestIns()) != null) {
perfStat = user.getPerfStat();
val = logit(doc.getSparse(), user);
predL = predict(doc, user);
trueL = doc.getYLabel();
perfStat.addOnePredResult(predL, trueL);
m_writer.format("%s\t%d\t%.4f\t%d\t%d\n", user.getUserID(), doc.getID(), val, predL, trueL);
}
// in batch mode we will not accumulate the performance during adaptation
gradientDescent(user, m_initStepSize, 1.0);
// test the gradient only when we want to debug
if (m_displayLv > 0) {
gNorm = gradientTest();
if (m_displayLv == 1) {
if (gNorm < gNormOld)
System.out.print("o");
else
System.out.print("x");
}
gNormOld = gNorm;
}
}
m_writer.flush();
if (m_displayLv == 1)
System.out.println();
}
} catch (IOException e) {
e.printStackTrace();
}
setPersonalizedModel();
// we do not evaluate function value
return 0;
}
Aggregations