use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class ModelAdaptation method test.
@Override
public double test() {
int numberOfCores = Runtime.getRuntime().availableProcessors();
ArrayList<Thread> threads = new ArrayList<Thread>();
for (int k = 0; k < numberOfCores; ++k) {
threads.add((new Thread() {
int core, numOfCores;
@Override
public void run() {
_AdaptStruct user;
_PerformanceStat userPerfStat;
try {
for (int i = 0; i + core < m_userList.size(); i += numOfCores) {
user = m_userList.get(i + core);
if (// no testing data
(m_testmode == TestMode.TM_batch && user.getTestSize() < 1) || // no adaptation data
(m_testmode == TestMode.TM_online && user.getAdaptationSize() < 1) || // no testing and adaptation data
(m_testmode == TestMode.TM_hybrid && user.getAdaptationSize() < 1) && user.getTestSize() < 1)
continue;
userPerfStat = user.getPerfStat();
if (m_testmode == TestMode.TM_batch || m_testmode == TestMode.TM_hybrid) {
// record prediction results
for (_Review r : user.getReviews()) {
if (r.getType() != rType.TEST)
continue;
int trueL = r.getYLabel();
// evoke user's own model
int predL = user.predict(r);
r.setPredictLabel(predL);
userPerfStat.addOnePredResult(predL, trueL);
}
}
userPerfStat.calculatePRF();
}
} catch (Exception ex) {
ex.printStackTrace();
}
}
private Thread initialize(int core, int numOfCores) {
this.core = core;
this.numOfCores = numOfCores;
return this;
}
}).initialize(k, numberOfCores));
threads.get(k).start();
}
for (int k = 0; k < numberOfCores; ++k) {
try {
threads.get(k).join();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
int count = 0;
ArrayList<ArrayList<Double>> macroF1 = new ArrayList<ArrayList<Double>>();
// init macroF1
for (int i = 0; i < m_classNo; i++) macroF1.add(new ArrayList<Double>());
_PerformanceStat userPerfStat;
m_microStat.clear();
for (_AdaptStruct user : m_userList) {
if (// no testing data
(m_testmode == TestMode.TM_batch && user.getTestSize() < 1) || // no adaptation data
(m_testmode == TestMode.TM_online && user.getAdaptationSize() < 1) || // no testing and adaptation data
(m_testmode == TestMode.TM_hybrid && user.getAdaptationSize() < 1) && user.getTestSize() < 1)
continue;
userPerfStat = user.getPerfStat();
for (int i = 0; i < m_classNo; i++) {
if (userPerfStat.getTrueClassNo(i) > 0)
macroF1.get(i).add(userPerfStat.getF1(i));
}
m_microStat.accumulateConfusionMat(userPerfStat);
count++;
}
System.out.print("neg users: " + macroF1.get(0).size());
System.out.print("\tpos users: " + macroF1.get(1).size() + "\n");
System.out.println(toString());
calcMicroPerfStat();
// macro average and standard deviation.
System.out.println("\nMacro F1:");
for (int i = 0; i < m_classNo; i++) {
double[] avgStd = calcAvgStd(macroF1.get(i));
m_perf[i] = avgStd[0];
System.out.format("Class %d: %.4f+%.4f\t", i, avgStd[0], avgStd[1]);
}
// printPerformance();
return 0;
}
use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class asyncRegLR method train.
// this is online training in each individual user
@Override
public double train() {
double gNorm, gNormOld = Double.MAX_VALUE;
;
int predL, trueL;
_Review doc;
_PerformanceStat perfStat;
initLBFGS();
init();
for (_AdaptStruct user : m_userList) {
while (user.hasNextAdaptationIns()) {
// test the latest model before model adaptation
if (m_testmode != TestMode.TM_batch && (doc = user.getLatestTestIns()) != null) {
perfStat = user.getPerfStat();
predL = predict(doc, user);
trueL = doc.getYLabel();
perfStat.addOnePredResult(predL, trueL);
}
// in batch mode we will not accumulate the performance during adaptation
// prepare to adapt: initialize gradient
Arrays.fill(m_g, 0);
calculateGradients(user);
gNorm = gradientTest();
if (m_displayLv == 1) {
if (gNorm < gNormOld)
System.out.print("o");
else
System.out.print("x");
}
// gradient descent
gradientDescent(user, m_initStepSize, m_g);
gNormOld = gNorm;
}
if (m_displayLv > 0)
System.out.println();
}
setPersonalizedModel();
// we do not evaluate function value
return 0;
}
use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class asyncCoLinAdapt method train.
// this is online training in each individual user
@Override
public double train() {
double gNorm, gNormOld = Double.MAX_VALUE;
int updateCount = 0;
_CoLinAdaptStruct user;
int predL, trueL;
_Review doc;
_PerformanceStat perfStat;
initLBFGS();
init();
for (int t = 0; t < m_userOrder.length; t++) {
user = (_CoLinAdaptStruct) m_userList.get(m_userOrder[t]);
if (user.hasNextAdaptationIns()) {
// test the latest model
if (m_testmode != TestMode.TM_batch && (doc = user.getLatestTestIns()) != null) {
perfStat = user.getPerfStat();
predL = predict(doc, user);
trueL = doc.getYLabel();
perfStat.addOnePredResult(predL, trueL);
}
// in batch mode we will not accumulate the performance during adaptation
// prepare to adapt: initialize gradient
Arrays.fill(m_g, 0);
calculateGradients(user);
gNorm = gradientTest(user);
if (m_displayLv == 1) {
if (gNorm < gNormOld)
System.out.print("o");
else
System.out.print("x");
}
// gradient descent
gradientDescent(user, m_initStepSize, 1.0);
// gradientDescent(user, asyncLinAdapt.getStepSize(initStepSize, user));
gNormOld = gNorm;
if (m_displayLv > 0 && ++updateCount % 100 == 0)
System.out.println();
}
}
if (m_displayLv > 0)
System.out.println();
setPersonalizedModel();
// we do not evaluate function value
return 0;
}
use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class asyncLinAdapt method train.
// this is online training in each individual user
@Override
public double train() {
double gNorm, gNormOld = Double.MAX_VALUE;
;
int predL, trueL;
_Review doc;
_PerformanceStat perfStat;
_LinAdaptStruct user;
initLBFGS();
init();
for (int i = 0; i < m_userList.size(); i++) {
user = (_LinAdaptStruct) m_userList.get(i);
while (user.hasNextAdaptationIns()) {
// test the latest model before model adaptation
if (m_testmode != TestMode.TM_batch && (doc = user.getLatestTestIns()) != null) {
perfStat = user.getPerfStat();
predL = predict(doc, user);
trueL = doc.getYLabel();
perfStat.addOnePredResult(predL, trueL);
}
// in batch mode we will not accumulate the performance during adaptation
// prepare to adapt: initialize gradient
Arrays.fill(m_g, 0);
calculateGradients(user);
gNorm = gradientTest();
if (m_displayLv == 1) {
if (gNorm < gNormOld)
System.out.print("o");
else
System.out.print("x");
}
// gradient descent
asyncRegLR.gradientDescent(user, m_initStepSize, m_g);
gNormOld = gNorm;
}
if (m_displayLv > 0)
System.out.println();
}
setPersonalizedModel();
// we do not evaluate function value
return 0;
}
use of structures._PerformanceStat in project IR_Base by Linda-sunshine.
the class ModelAdaptation method printPerformance.
public void printPerformance() {
_PerformanceStat perf;
for (_AdaptStruct user : m_userList) {
perf = user.getPerfStat();
System.out.print(String.format("pos:%d\tneg:%d\tposF1:%.4f\tnegF1:%.4f\n", perf.getTrueClassNo(1), perf.getTrueClassNo(0), perf.getF1(1), perf.getF1(0)));
}
}
Aggregations