Search in sources :

Example 1 with DocAnalyzer

use of Analyzer.DocAnalyzer in project IR_Base by Linda-sunshine.

the class AmazonReviewMain method main.

public static void main(String[] args) throws IOException, ParseException {
    /**
     ***Set these parameters before run the classifiers.****
     */
    // Define the number of classes
    int classNumber = 5;
    // The default value is bigram.
    int Ngram = 2;
    // Document length threshold
    int lengthThreshold = 10;
    // "TF", "TFIDF", "BM25", "PLN"
    // The way of calculating the feature value, which can also be "TFIDF", "BM25"
    String featureValue = "TF";
    // The way of normalization.(only 1 and 2)
    int norm = 0;
    // k fold-cross validation
    int CVFold = 10;
    // "SUP", "SEMI", "FV", "ASPECT"
    String style = "SUP";
    // "NB", "LR", "SVM", "PR"
    // Which classifier to use.
    String classifier = "SVM";
    // "GF", "NB-EM"
    String model = "SVM";
    double C = 1.0;
    // String modelPath = "./data/Model/";
    // "data/debug/LR.output";
    String debugOutput = null;
    System.out.println("--------------------------------------------------------------------------------------");
    System.out.println("Parameters of this run:" + "\nClassNumber: " + classNumber + "\tNgram: " + Ngram + "\tFeatureValue: " + featureValue + "\tLearning Method: " + style + "\tClassifier: " + classifier + "\nCross validation: " + CVFold);
    // /*****Parameters in feature selection.*****/
    // Feature selection method.
    String featureSelection = "CHI";
    String stopwords = "./data/Model/stopwords.dat";
    // Used in feature selection, the starting point of the features.
    double startProb = 0.5;
    // Used in feature selection, the ending point of the features.
    double endProb = 0.999;
    // Filter the features with DFs smaller than this threshold.
    int maxDF = -1, minDF = 1;
    // System.out.println("Feature Seleciton: " + featureSelection + "\tStarting probability: " + startProb + "\tEnding probability:" + endProb);
    /**
     ***The parameters used in loading files.****
     */
    String folder = "./data/amazon/tablet/small";
    String suffix = ".json";
    // Token model
    String tokenModel = "./data/Model/en-token.bin";
    String pattern = String.format("%dgram_%s", Ngram, featureSelection);
    String fvFile = String.format("data/Features/fv_%s_small.txt", pattern);
    String fvStatFile = String.format("data/Features/fv_stat_%s_small.txt", pattern);
    String vctFile = String.format("data/Fvs/vct_%s_tablet_small.dat", pattern);
    /**
     ***Parameters in time series analysis.****
     */
    int window = 0;
    System.out.println("Window length: " + window);
    System.out.println("--------------------------------------------------------------------------------------");
    // /****Loading json files*****/
    DocAnalyzer analyzer = new DocAnalyzer(tokenModel, classNumber, null, Ngram, lengthThreshold);
    analyzer.LoadStopwords(stopwords);
    // Load all the documents as the data set.
    analyzer.LoadDirectory(folder, suffix);
    // /****Feature selection*****/
    System.out.println("Performing feature selection, wait...");
    // Select the features.
    analyzer.featureSelection(fvFile, featureSelection, startProb, endProb, maxDF, minDF);
    analyzer.SaveCVStat(fvStatFile);
    /**
     **create vectors for documents****
     */
    System.out.println("Creating feature vectors, wait...");
    // jsonAnalyzer
    analyzer = new DocAnalyzer(tokenModel, classNumber, fvFile, Ngram, lengthThreshold);
    // Just for debugging purpose: all the other classifiers do not need content
    analyzer.setReleaseContent(!(classifier.equals("PR") || debugOutput != null));
    // Load all the documents as the data set.
    analyzer.LoadDirectory(folder, suffix);
    analyzer.setFeatureValues(featureValue, norm);
    // //		analyzer.setTimeFeatures(window);
    _Corpus corpus = analyzer.getCorpus();
    // Execute different classifiers.
    if (style.equals("SUP")) {
        if (classifier.equals("NB")) {
            // Define a new naive bayes with the parameters.
            System.out.println("Start naive bayes, wait...");
            NaiveBayes myNB = new NaiveBayes(corpus);
            // Use the movie reviews for testing the codes.
            myNB.crossValidation(CVFold, corpus);
        } else if (classifier.equals("LR")) {
            // Define a new logistics regression with the parameters.
            System.out.println("Start logistic regression, wait...");
            LogisticRegression myLR = new LogisticRegression(corpus, C);
            myLR.setDebugOutput(debugOutput);
            // Use the movie reviews for testing the codes.
            myLR.crossValidation(CVFold, corpus);
        // myLR.saveModel(modelPath + "LR.model");
        } else if (classifier.equals("SVM")) {
            System.out.println("Start SVM, wait...");
            SVM mySVM = new SVM(corpus, C);
            mySVM.crossValidation(CVFold, corpus);
        } else if (classifier.equals("PR")) {
            System.out.println("Start PageRank, wait...");
            PageRank myPR = new PageRank(corpus, C, 100, 50, 1e-6);
            myPR.train(corpus.getCollection());
        } else
            System.out.println("Classifier has not developed yet!");
    } else if (style.equals("SEMI")) {
        if (model.equals("GF")) {
            System.out.println("Start Gaussian Field, wait...");
            GaussianFields mySemi = new GaussianFields(corpus, classifier, C);
            mySemi.crossValidation(CVFold, corpus);
        } else if (model.equals("NB-EM")) {
            // corpus.setUnlabeled();
            System.out.println("Start Naive Bayes with EM, wait...");
            NaiveBayesEM myNB = new NaiveBayesEM(corpus);
            // Use the movie reviews for testing the codes.
            myNB.crossValidation(CVFold, corpus);
        }
    } else if (style.equals("FV")) {
        corpus.save2File(vctFile);
        System.out.format("Vectors saved to %s...\n", vctFile);
    } else
        System.out.println("Learning paradigm has not developed yet!");
}
Also used : structures._Corpus(structures._Corpus) NaiveBayes(Classifier.supervised.NaiveBayes) DocAnalyzer(Analyzer.DocAnalyzer) PageRank(influence.PageRank) NaiveBayesEM(Classifier.semisupervised.NaiveBayesEM) GaussianFields(Classifier.semisupervised.GaussianFields) SVM(Classifier.supervised.SVM) LogisticRegression(Classifier.supervised.LogisticRegression)

Example 2 with DocAnalyzer

use of Analyzer.DocAnalyzer in project IR_Base by Linda-sunshine.

the class Execution method main.

public static void main(String[] args) throws IOException, ParseException {
    Parameter param = new Parameter(args);
    System.out.println(param.toString());
    String stnModel = (param.m_model.equals("HTMM") || param.m_model.equals("LRHTMM")) ? param.m_stnModel : null;
    String posModel = (param.m_model.equals("HTMM") || param.m_model.equals("LRHTMM")) ? param.m_posModel : null;
    _Corpus corpus;
    Analyzer analyzer;
    /**
     *Load the data from vector file**
     */
    if (param.m_fvFile != null && (new File(param.m_fvFile)).exists()) {
        analyzer = new VctAnalyzer(param.m_classNumber, param.m_lengthThreshold, param.m_featureFile);
        // Load all the documents as the data set.
        analyzer.LoadDoc(param.m_fvFile);
        corpus = analyzer.getCorpus();
    } else {
        /**
         *Load the data from text file**
         */
        analyzer = new DocAnalyzer(param.m_tokenModel, stnModel, posModel, param.m_classNumber, param.m_featureFile, param.m_Ngram, param.m_lengthThreshold);
        ((DocAnalyzer) analyzer).setReleaseContent(!param.m_weightScheme.equals("PR"));
        if (param.m_featureFile == null) {
            /**
             **Pre-process the data.****
             */
            // Feture selection.
            System.out.println("Performing feature selection, wait...");
            param.m_featureFile = String.format("./data/Features/%s_fv.dat", param.m_featureSelection);
            param.m_featureStat = String.format("./data/Features/%s_fv_stat.dat", param.m_featureSelection);
            System.out.println(param.printFeatureSelectionConfiguration());
            ((DocAnalyzer) analyzer).LoadStopwords(param.m_stopwords);
            // Load all the documents as the data set.
            analyzer.LoadDirectory(param.m_folder, param.m_suffix);
            // Select the features.
            analyzer.featureSelection(param.m_featureFile, param.m_featureSelection, param.m_startProb, param.m_endProb, param.m_maxDF, param.m_minDF);
        }
        // Collect vectors for documents.
        System.out.println("Creating feature vectors, wait...");
        // Load all the documents as the data set.
        analyzer.LoadDirectory(param.m_folder, param.m_suffix);
        analyzer.setFeatureValues(param.m_featureValue, param.m_norm);
        corpus = analyzer.returnCorpus(param.m_featureStat);
    }
    if (param.m_weightScheme.equals("PR")) {
        System.out.println("Creating PageRank instance weighting, wait...");
        PageRank myPR = new PageRank(corpus, param.m_C, 100, 50, 1e-6);
        myPR.train(corpus.getCollection());
    }
    // Execute different classifiers.
    if (param.m_style.equals("SUP")) {
        BaseClassifier model = null;
        if (param.m_model.equals("NB")) {
            // Define a new naive bayes with the parameters.
            System.out.println("Start naive bayes, wait...");
            model = new NaiveBayes(corpus);
        } else if (param.m_model.equals("LR")) {
            // Define a new logistics regression with the parameters.
            System.out.println("Start logistic regression, wait...");
            model = new LogisticRegression(corpus, param.m_C);
        } else if (param.m_model.equals("PR-LR")) {
            // Define a new logistics regression with the parameters.
            System.out.println("Start posterior regularized logistic regression, wait...");
            model = new PRLogisticRegression(corpus, param.m_C);
        } else if (param.m_model.equals("SVM")) {
            // corpus.save2File("data/FVs/fvector.dat");
            System.out.println("Start SVM, wait...");
            model = new SVM(corpus, param.m_C);
        } else {
            System.out.println("Classifier has not been developed yet!");
            System.exit(-1);
        }
        model.setDebugOutput(param.m_debugOutput);
        model.crossValidation(param.m_CVFold, corpus);
    } else if (param.m_style.equals("SEMI")) {
        BaseClassifier model = null;
        if (param.m_model.equals("GF")) {
            System.out.println("Start Gaussian Field by matrix inversion, wait...");
            model = new GaussianFields(corpus, param.m_classifier, param.m_C, param.m_sampleRate, param.m_kUL, param.m_kUU);
        } else if (param.m_model.equals("GF-RW")) {
            System.out.println("Start Gaussian Field by random walk, wait...");
            model = new GaussianFieldsByRandomWalk(corpus, param.m_classifier, param.m_C, param.m_sampleRate, param.m_kUL, param.m_kUU, param.m_alpha, param.m_beta, param.m_converge, param.m_eta, param.m_weightedAvg);
        } else if (param.m_model.equals("GF-RW-ML")) {
            System.out.println("Start Gaussian Field with distance metric learning by random walk, wait...");
            model = new LinearSVMMetricLearning(corpus, param.m_classifier, param.m_C, param.m_sampleRate, param.m_kUL, param.m_kUU, param.m_alpha, param.m_beta, param.m_converge, param.m_eta, param.m_weightedAvg, param.m_bound);
        // ((LinearSVMMetricLearning)model).setMetricLearningMethod(false);
        // ((LinearSVMMetricLearning)model).verification(param.m_CVFold, corpus, param.m_debugOutput);
        } else {
            System.out.println("Classifier has not been developed yet!");
            System.exit(-1);
        }
        model.setDebugOutput(param.m_debugOutput);
        model.crossValidation(param.m_CVFold, corpus);
    } else if (param.m_style.equals("TM")) {
        TopicModel model = null;
        if (param.m_model.equals("2topic")) {
            model = new twoTopic(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda);
        } else if (param.m_model.equals("pLSA")) {
            if (param.m_multithread == false) {
                model = new pLSA(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda, param.m_numTopics, param.m_alpha);
            } else {
                model = new pLSA_multithread(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda, param.m_numTopics, param.m_alpha);
            }
            ((pLSA) model).LoadPrior(param.m_priorFile, param.m_gamma);
        } else if (param.m_model.equals("vLDA")) {
            if (param.m_multithread == false) {
                model = new LDA_Variational(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda, param.m_numTopics, param.m_alpha, param.m_maxVarIterations, param.m_varConverge);
            } else {
                model = new LDA_Variational_multithread(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda, param.m_numTopics, param.m_alpha, param.m_maxVarIterations, param.m_varConverge);
            }
            ((LDA_Variational) model).LoadPrior(param.m_priorFile, param.m_gamma);
        } else if (param.m_model.equals("gLDA")) {
            model = new LDA_Gibbs(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_lambda, param.m_numTopics, param.m_alpha, param.m_burnIn, param.m_lag);
            ((LDA_Gibbs) model).LoadPrior(param.m_priorFile, param.m_gamma);
        } else if (param.m_model.equals("HTMM")) {
            model = new HTMM(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_numTopics, param.m_alpha);
        } else if (param.m_model.equals("LRHTMM")) {
            model = new LRHTMM(param.m_maxmIterations, param.m_converge, param.m_beta, corpus, param.m_numTopics, param.m_alpha, param.m_C);
        } else {
            System.out.println("The specified topic model has not been developed yet!");
            System.exit(-1);
        }
        if (param.m_CVFold <= 1) {
            model.EMonCorpus();
            // fixed: print top 10 words
            model.printTopWords(10);
        } else
            model.crossValidation(param.m_CVFold);
    } else if (param.m_style.equals("FV")) {
        corpus.save2File(param.m_fvFile);
        System.out.format("Vectors saved to %s...\n", param.m_fvFile);
    } else
        System.out.println("Learning paradigm has not developed yet!");
}
Also used : PRLogisticRegression(Classifier.supervised.PRLogisticRegression) GaussianFieldsByRandomWalk(Classifier.semisupervised.GaussianFieldsByRandomWalk) DocAnalyzer(Analyzer.DocAnalyzer) topicmodels.twoTopic(topicmodels.twoTopic) VctAnalyzer(Analyzer.VctAnalyzer) SVM(Classifier.supervised.SVM) DocAnalyzer(Analyzer.DocAnalyzer) VctAnalyzer(Analyzer.VctAnalyzer) Analyzer(Analyzer.Analyzer) TopicModel(topicmodels.TopicModel) LDA_Variational(topicmodels.LDA.LDA_Variational) HTMM(topicmodels.markovmodel.HTMM) LRHTMM(topicmodels.markovmodel.LRHTMM) structures._Corpus(structures._Corpus) NaiveBayes(Classifier.supervised.NaiveBayes) BaseClassifier(Classifier.BaseClassifier) LDA_Variational_multithread(topicmodels.multithreads.LDA.LDA_Variational_multithread) topicmodels.multithreads.pLSA.pLSA_multithread(topicmodels.multithreads.pLSA.pLSA_multithread) PRLogisticRegression(Classifier.supervised.PRLogisticRegression) LogisticRegression(Classifier.supervised.LogisticRegression) topicmodels.pLSA.pLSA(topicmodels.pLSA.pLSA) LRHTMM(topicmodels.markovmodel.LRHTMM) PageRank(influence.PageRank) LinearSVMMetricLearning(Classifier.metricLearning.LinearSVMMetricLearning) LDA_Gibbs(topicmodels.LDA.LDA_Gibbs) Parameter(structures.Parameter) GaussianFields(Classifier.semisupervised.GaussianFields) File(java.io.File)

Example 3 with DocAnalyzer

use of Analyzer.DocAnalyzer in project IR_Base by Linda-sunshine.

the class MovieReviewMain method main.

/**
 ***************************Main function******************************
 */
public static void main(String[] args) throws IOException {
    _Corpus corpus = new _Corpus();
    /**
     ***Set these parameters before run the classifiers.****
     */
    // Initialize the fetureSize to be zero at first.
    int featureSize = 0;
    // Define the number of classes in this Naive Bayes.
    int classNumber = 2;
    // The default value is unigram.
    int Ngram = 1;
    // Document length threshold
    int lengthThreshold = 5;
    // The way of calculating the feature value, which can also be "TFIDF", "BM25"
    String featureValue = "TF";
    int norm = 1;
    // Which classifier to use.
    String classifier = "SVM";
    System.out.println("--------------------------------------------------------------------------------------");
    System.out.println("Parameters of this run:" + "\nClassNumber: " + classNumber + "\tNgram: " + Ngram + "\tFeatureValue: " + featureValue + "\tClassifier: " + classifier);
    /**
     ***The parameters used in loading files.****
     */
    String folder = "data/txt_sentoken";
    String suffix = ".txt";
    // Token model.
    String tokenModel = "./data/Model/en-token.bin";
    // String finalLocation = "/Users/lingong/Documents/Lin'sWorkSpace/IR_Base/data/movie/FinalFeatureStat.txt"; //The destination of storing the final features with stats.
    // String featureLocation = "/Users/lingong/Documents/Lin'sWorkSpace/IR_Base/data/movie/SelectedFeatures.txt";
    String finalLocation = "/home/lin/Lin'sWorkSpace/IR_Base/FinalFeatureStat.txt";
    String featureLocation = "/home/lin/Lin'sWorkSpace/IR_Base/SelectedFeatures.txt";
    /**
     ***Paramters in feature selection.****
     */
    // String providedCV = "";
    String featureSelection = "";
    // Provided CV.
    String providedCV = "Features.txt";
    // String featureSelection = "MI"; //Feature selection method.
    // Used in feature selection, the starting point of the features.
    double startProb = 0.5;
    // Used in feature selection, the ending point of the features.
    double endProb = 1;
    // Filter the features with DFs smaller than this threshold.
    int maxDF = -1, minDF = 5;
    System.out.println("Feature Seleciton: " + featureSelection + "\tStarting probability: " + startProb + "\tEnding probability:" + endProb);
    System.out.println("--------------------------------------------------------------------------------------");
    if (providedCV.isEmpty() && featureSelection.isEmpty()) {
        // Case 1: no provided CV, no feature selection.
        System.out.println("Case 1: no provided CV, no feature selection.  Start loading files, wait...");
        DocAnalyzer analyzer = new DocAnalyzer(tokenModel, classNumber, null, Ngram, lengthThreshold);
        // Load all the documents as the data set.
        analyzer.LoadDirectory(folder, suffix);
        analyzer.setFeatureValues(featureValue, norm);
        corpus = analyzer.returnCorpus(finalLocation);
    } else if (!providedCV.isEmpty() && featureSelection.isEmpty()) {
        // Case 2: provided CV, no feature selection.
        System.out.println("Case 2: provided CV, no feature selection. Start loading files, wait...");
        DocAnalyzer analyzer = new DocAnalyzer(tokenModel, classNumber, providedCV, Ngram, lengthThreshold);
        // Load all the documents as the data set.
        analyzer.LoadDirectory(folder, suffix);
        analyzer.setFeatureValues(featureValue, norm);
        corpus = analyzer.returnCorpus(finalLocation);
    } else if (providedCV.isEmpty() && !featureSelection.isEmpty()) {
        // Case 3: no provided CV, feature selection.
        System.out.println("Case 3: no provided CV, feature selection. Start loading files to do feature selection, wait...");
        DocAnalyzer analyzer = new DocAnalyzer(tokenModel, classNumber, null, Ngram, lengthThreshold);
        // Load all the documents as the data set.
        analyzer.LoadDirectory(folder, suffix);
        // Select the features.
        analyzer.featureSelection(featureLocation, featureSelection, startProb, endProb, maxDF, minDF);
        System.out.println("Start loading files, wait...");
        analyzer = new DocAnalyzer(tokenModel, classNumber, featureLocation, Ngram, lengthThreshold);
        analyzer.LoadDirectory(folder, suffix);
        analyzer.setFeatureValues(featureValue, norm);
        corpus = analyzer.returnCorpus(finalLocation);
    } else if (!providedCV.isEmpty() && !featureSelection.isEmpty()) {
        // Case 4: provided CV, feature selection.
        DocAnalyzer analyzer = new DocAnalyzer(tokenModel, classNumber, providedCV, Ngram, lengthThreshold);
        System.out.println("Case 4: provided CV, feature selection. Start loading files to do feature selection, wait...");
        // Load all the documents as the data set.
        analyzer.LoadDirectory(folder, suffix);
        // Select the features.
        analyzer.featureSelection(featureLocation, featureSelection, startProb, endProb, maxDF, minDF);
        System.out.println("Start loading files, wait...");
        analyzer = new DocAnalyzer(tokenModel, classNumber, featureLocation, Ngram, lengthThreshold);
        analyzer.LoadDirectory(folder, suffix);
        analyzer.setFeatureValues(featureValue, norm);
        corpus = analyzer.returnCorpus(finalLocation);
    } else
        System.out.println("The setting fails, please check the parameters!!");
    // Execute different classifiers.
    if (classifier.equals("NB")) {
        // Define a new naive bayes with the parameters.
        System.out.println("Start naive bayes, wait...");
        NaiveBayes myNB = new NaiveBayes(corpus);
        // Use the movie reviews for testing the codes.
        myNB.crossValidation(10, corpus);
    } else if (classifier.equals("LR")) {
        // Define a new lambda.
        double lambda = 0;
        // Define a new logistics regression with the parameters.
        System.out.println("Start logistic regression, wait...");
        LogisticRegression myLR = new LogisticRegression(corpus, lambda);
        // Use the movie reviews for testing the codes.
        myLR.crossValidation(10, corpus);
    } else if (classifier.equals("SVM")) {
        // corpus.save2File("data/FVs/fvector.dat");
        // The default value is 1.
        double C = 3;
        // default value from Lin's implementation
        double eps = 0.01;
        System.out.println("Start SVM, wait...");
        SVM mySVM = new SVM(corpus, C);
        mySVM.crossValidation(10, corpus);
    } else
        System.out.println("Have not developed yet!:(");
}
Also used : structures._Corpus(structures._Corpus) NaiveBayes(Classifier.supervised.NaiveBayes) DocAnalyzer(Analyzer.DocAnalyzer) SVM(Classifier.supervised.SVM) LogisticRegression(Classifier.supervised.LogisticRegression)

Aggregations

DocAnalyzer (Analyzer.DocAnalyzer)3 LogisticRegression (Classifier.supervised.LogisticRegression)3 NaiveBayes (Classifier.supervised.NaiveBayes)3 SVM (Classifier.supervised.SVM)3 structures._Corpus (structures._Corpus)3 GaussianFields (Classifier.semisupervised.GaussianFields)2 PageRank (influence.PageRank)2 Analyzer (Analyzer.Analyzer)1 VctAnalyzer (Analyzer.VctAnalyzer)1 BaseClassifier (Classifier.BaseClassifier)1 LinearSVMMetricLearning (Classifier.metricLearning.LinearSVMMetricLearning)1 GaussianFieldsByRandomWalk (Classifier.semisupervised.GaussianFieldsByRandomWalk)1 NaiveBayesEM (Classifier.semisupervised.NaiveBayesEM)1 PRLogisticRegression (Classifier.supervised.PRLogisticRegression)1 File (java.io.File)1 Parameter (structures.Parameter)1 LDA_Gibbs (topicmodels.LDA.LDA_Gibbs)1 LDA_Variational (topicmodels.LDA.LDA_Variational)1 TopicModel (topicmodels.TopicModel)1 HTMM (topicmodels.markovmodel.HTMM)1