2021-07-29
作者:互联网
mm监督学习
1.决策树(Decision Trees)
2.朴素贝叶斯分类(Naive Bayesian classification)
3.最小二乘法(Ordinary Least Squares Regression)
4.逻辑回归(Logistic Regression)
5.支持向量机(Support Vector Machine,SVM)
6.集成方法(Ensemble methods)
无监督学习
7.聚类算法(Clustering Algorithms)
8.主成分分析(Principal Component Analysis,PCA)
9.奇异值分解(Singular Value Decomposition,SVD)
10.独立成分分析(Independent Component A
感知机
main.cpp
#include <iostream>
#include <vector>
#include "perceptron.h"
using std::vector;
using std::cout;
using std::endl;
int main() {
Base* obj = new Perceptron();
obj->run();
delete obj;
return 0;
}
perceptron.cpp
#include "perceptron.h"
using std::string;
using std::vector;
using std::pair;
void Perceptron::getData(const std::string &filename) {
//load data to a vector
std::vector<double> temData;
double onepoint;
std::string line;
inData.clear();
std::ifstream infile(filename);
std::cout<<"reading ..."<<std::endl;
while(!infile.eof()){
temData.clear();
std::getline(infile, line);
if(line.empty())
continue;
std::stringstream stringin(line);
while(stringin >> onepoint){
temData.push_back(onepoint);
}
indim = temData.size();
indim -= 1;
inData.push_back(temData);
}
std::cout<<"total data is "<<inData.size()<<std::endl;
}
void Perceptron::splitData(const float& trainTotalRatio){
std::random_shuffle(inData.begin(), inData.end());
unsigned long size = inData.size();
unsigned long trainSize = size * trainTotalRatio;
std::cout<<"total data is "<< size<<" ,train data has "<<trainSize<<std::endl;
for(int i=0;i<size;++i){
if (i<trainSize)
trainData.push_back(inData[i]);
else
testData.push_back(inData[i]);
}
}
void Perceptron::createFeatureGt() {
//create feature for test,using trainData, testData
for (const auto& data:trainData){
std::vector<double> trainf;
trainf.assign(data.begin(), data.end()-1);
trainDataF.push_back(trainf);
trainDataGT.push_back(*(data.end()-1));
}
for (const auto& data:testData){
std::vector<double> testf;
testf.assign(data.begin(), data.end()-1);
testDataF.push_back(testf);
testDataGT.push_back(*(data.end()-1));
}
}
void Perceptron::initialize(std::vector<double>& init) {
// must initialize parameter first, using vector to initialize
if(init.size()!=indim+1) {
std::cout<<"input dimension is should be "+std::to_string(indim+1)<<std::endl;
throw init.size();
}
w.assign(init.begin(), init.end()-1);
b = *(init.end()-1);
}
double Perceptron::inference(const std::vector<double>& inputData){
//just compute wx+b , for compute loss and predict.
if (inputData.size()!=indim){
std::cout<<"input dimension is incorrect. "<<std::endl;
throw inputData.size();
}
double sum_tem = 0.0;
sum_tem = inputData * w;
sum_tem += b;
return sum_tem;
}
double Perceptron::loss(const std::vector<double>& inputData, const double& groundTruth){
double infer = inference(inputData);
double loss = -1.0 * groundTruth * infer;
std::cout<<"loss is "<< loss <<std::endl;
return loss;
}
std::pair<std::vector<double>, double> Perceptron::computeGradient(const std::vector<double>& inputData, const double& groundTruth) {
double lossVal = loss(inputData, groundTruth);
std::vector<double> wi;
double bi;
if (lossVal >= 0.0)
{
for(auto indata:inputData) {
wi.push_back(indata*groundTruth);
}
bi = groundTruth;
}
else{
for(auto indata:inputData) {
wi.push_back(0.0);
}
bi = 0.0;
}
return std::pair<std::vector<double>, double>(wi, bi);//here, for understandable, we use pair to represent w and b.
//you also could return a vector which contains w and b.
}
void Perceptron::train(const int & step, const float & lr) {
std::vector<double> init = {1.0,1.0,1.0};
initialize(init);
int count = 0;
for(int i=0; i<step; ++i){
if (count==trainDataF.size()-1)
count = 0;
std::vector<double> inputData = trainDataF[count];
double groundTruth = trainDataGT[count];
auto grad = computeGradient(inputData, groundTruth);
auto grad_w = grad.first;
double grad_b = grad.second;
for (int j=0; j<indim;++j){
w[j] += lr * (grad_w[j]);
}
b += lr * (grad_b);
count++;
}
}
int Perceptron::predict(const std::vector<double>& inputData) {
double out = inference(inputData);
if(out>=0.0){
return 1;
}
else{
return -1;
}
}
/*perceptrondata.txt
3 4 1
1 1 -1
2 4 1
1 2 -1
1 5 1
2 0.5 -1
1 6 1
1 2.5 -1
0.5 6 1
0 1 -1
2 2.5 1
0.5 1 -1
1 4 1
1.5 1 -1
2.7 1 1
2 3.5 1
0.8 3 -1
0.1 4 -1
*/
void Perceptron::run(){
//记得更改样本路径
getData("../data/perceptrondata.txt");
splitData(0.6);//below is split data , and store it in trainData, testData
createFeatureGt();
train(200, 1.0);//20 is steps and 1.0 is learning rate
std::vector<std::vector<double>> testData = getTestDataFeature();
std::vector<double> testGT = getTestGT();
for(int i=0; i<testData.size(); ++i){
std::cout<<i<<std::endl;
std::cout<<"The right class is "<<testGT[i]<<std::endl;
int out = predict(testData[i]);
std::cout<<"The predict class is "<<out<<std::endl;
}
}
perceptron.h
#ifndef MACHINE_LEARNING_PERCEPTRON_H
#define MACHINE_LEARNING_PERCEPTRON_H
#include <vector>
#include <array>
#include <utility>
#include "model_base.h"
class Perceptron: public Base{
private:
std::vector<double> w;
double b;
public:
virtual void getData(const std::string& filename);
virtual void run();
void splitData(const float& );
void createFeatureGt();//create feature for test,using trainData, testData
void setDim(const unsigned long& iDim){indim = iDim;}
double inference(const std::vector<double>&) ;
void initialize(std::vector<double>& init);
void train(const int& step,const float& lr);
int predict(const std::vector<double>& inputData);
double loss(const std::vector<double>& inputData, const double& groundTruth);
std::pair<std::vector<double>, double> computeGradient(const std::vector<double>& inputData, const double& groundTruth);
std::vector<std::vector<double>> getTestDataFeature(){return testDataF;}
std::vector<double> getTestGT(){ return testDataGT;}
};
#endif //MACHINE_LEARNING_PERCEPTRON_H
K近邻
朴素贝叶斯
决策树
逻辑回归
支持向量机
adaBoost
GMM
标签:std,const,07,double,void,29,vector,2021,inputData 来源: https://blog.csdn.net/seek97/article/details/119206336