机器学习——基础算法(五)

编程入门 行业动态 更新时间:2024-10-28 10:23:04

机器学习——基础<a href=https://www.elefans.com/category/jswz/34/1770096.html style=算法(五)"/>

机器学习——基础算法(五)

文章目录

  • 机器学习——基础算法(五)
    • 一、XGBoost官网
    • 二、安装XGBoost过程
    • 三、Kaggle简介
    • 四、数据预处理-清洗
    • 五、0-1的one-hot编码
    • 六、用python读取矩阵形式的数据
    • 七、使用logistic,随机森林,XGBoost进行分类

机器学习——基础算法(五)

一、XGBoost官网

 官网::/

二、安装XGBoost过程

三、Kaggle简介

Kaggle是一个数据分析的竞赛平台
网址:/

四、数据预处理-清洗

五、0-1的one-hot编码

六、用python读取矩阵形式的数据


基本的数据读取方法:每一行分别读,一行是一个x样本,按照空格分开,第一行是y值,后面的依据冒号分成2部分,有值得标出,没值的不看,得到一个稀疏矩阵,然后再转为稠密矩阵。

# !/usr/bin/python
# -*- encoding:utf-8 -*-
#wine数据第0列是y,后面都是ximport xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split   # cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_scoreif __name__ == "__main__":# 作业:尝试用Pandas读取试试?data = np.loadtxt('C:/Users/forever/Desktop/wine.data', dtype=float, delimiter=',')#以逗号分割# 的数据也可以用numpy读取,需要改一下默认的分隔符y, x = np.split(data, (1,), axis=1)#数据从0开始,到1且不包含1分成一份,从1开始到最后分成一份x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=0.5)# Logistic回归lr = LogisticRegression(penalty='l2')lr.fit(x_train, y_train.ravel())y_hat = lr.predict(x_test)print  ('Logistic回归正确率:', accuracy_score(y_test, y_hat))# XGBoost要求标记必须从开始数,所以把标记为3的类别取出来变为0.#XGBoost的精度非常高y_train[y_train == 3] = 0y_test[y_test == 3] = 0data_train = xgb.DMatrix(x_train, label=y_train)data_test = xgb.DMatrix(x_test, label=y_test)watch_list = [(data_test, 'eval'), (data_train, 'train')]params = {'max_depth': 3, 'eta': 1, 'silent': 0, 'objective': 'multi:softmax', 'num_class': 3}#做一个soft_max(3分类)bst = xgb.train(params, data_train, num_boost_round=2, evals=watch_list)#做两轮y_hat = bst.predict(data_test)print  ('XGBoost正确率:', accuracy_score(y_test, y_hat))

七、使用logistic,随机森林,XGBoost进行分类

# /usr/bin/python
# -*- encoding:utf-8 -*-import xgboost as xgb
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
import csvdef show_accuracy(a, b, tip):acc = a.ravel() == b.ravel()acc_rate = 100 * float(acc.sum()) / a.sizeprint ('%s正确率:%.3f%%' % (tip, acc_rate))return acc_ratedef load_data(file_name, is_train):data = pd.read_csv(file_name)  # 数据文件路径print ('data.describe() = \n', data.describe())# 性别data['Sex'] = data['Sex'].map({'female': 0, 'male': 1}).astype(int)# 补齐船票价格缺失值if len(data.Fare[data.Fare.isnull()]) > 0:fare = np.zeros(3)for f in range(0, 3):fare[f] = data[data.Pclass == f + 1]['Fare'].dropna().median()for f in range(0, 3):  # loop 0 to 2data.loc[(data.Fare.isnull()) & (data.Pclass == f + 1), 'Fare'] = fare[f]# 年龄:使用均值代替缺失值# mean_age = data['Age'].dropna().mean()# data.loc[(data.Age.isnull()), 'Age'] = mean_ageif is_train:# 年龄:使用随机森林预测年龄缺失值print ('随机森林预测缺失年龄:--start--')data_for_age = data[['Age', 'Survived', 'Fare', 'Parch', 'SibSp', 'Pclass']]age_exist = data_for_age.loc[(data.Age.notnull())]   # 年龄不缺失的数据age_null = data_for_age.loc[(data.Age.isnull())]# print age_existx = age_exist.values[:, 1:]y = age_exist.values[:, 0]rfr = RandomForestRegressor(n_estimators=1000)rfr.fit(x, y)age_hat = rfr.predict(age_null.values[:, 1:])# print age_hatdata.loc[(data.Age.isnull()), 'Age'] = age_hatprint ('随机森林预测缺失年龄:--over--')else:print ('随机森林预测缺失年龄2:--start--')data_for_age = data[['Age', 'Fare', 'Parch', 'SibSp', 'Pclass']]age_exist = data_for_age.loc[(data.Age.notnull())]  # 年龄不缺失的数据age_null = data_for_age.loc[(data.Age.isnull())]# print age_existx = age_exist.values[:, 1:]y = age_exist.values[:, 0]rfr = RandomForestRegressor(n_estimators=1000)rfr.fit(x, y)age_hat = rfr.predict(age_null.values[:, 1:])# print age_hatdata.loc[(data.Age.isnull()), 'Age'] = age_hatprint ('随机森林预测缺失年龄2:--over--')# 起始城市data.loc[(data.Embarked.isnull()), 'Embarked'] = 'S'  # 保留缺失出发城市# data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2, 'U': 0}).astype(int)# print data['Embarked']embarked_data = pd.get_dummies(data.Embarked)print (embarked_data)# embarked_data = embarked_data.rename(columns={'S': 'Southampton', 'C': 'Cherbourg', 'Q': 'Queenstown', 'U': 'UnknownCity'})embarked_data = embarked_data.rename(columns=lambda x: 'Embarked_' + str(x))data = pd.concat([data, embarked_data], axis=1)print (data.describe())data.to_csv('New_Data.csv')x = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked_C', 'Embarked_Q', 'Embarked_S']]# x = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]y = Noneif 'Survived' in data:y = data['Survived']x = np.array(x)y = np.array(y)# 思考:这样做,其实发生了什么?x = np.tile(x, (5, 1))y = np.tile(y, (5, ))if is_train:return x, yreturn x, data['PassengerId']def write_result(c, c_type):file_name = 'Titanic.test.csv'x, passenger_id = load_data(file_name, False)if type == 3:x = xgb.DMatrix(x)y = c.predict(x)y[y > 0.5] = 1y[~(y > 0.5)] = 0predictions_file = open("Prediction_%d.csv" % c_type, "wb")open_file_object = csv.writer(predictions_file)open_file_object.writerow(["PassengerId", "Survived"])open_file_object.writerows(zip(passenger_id, y))predictions_file.close()if __name__ == "__main__":x, y = load_data('C:/Users/forever/Desktop/Titanic.train.csv', True)#载入x和y两列数据x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=1)lr = LogisticRegression(penalty='l2')lr.fit(x_train, y_train)y_hat = lr.predict(x_test)lr_acc = accuracy_score(y_test, y_hat)# write_result(lr, 1)rfc = RandomForestClassifier(n_estimators=100)rfc.fit(x_train, y_train)y_hat = rfc.predict(x_test)rfc_acc = accuracy_score(y_test, y_hat)# write_result(rfc, 2)# XGBoostdata_train = xgb.DMatrix(x_train, label=y_train)data_test = xgb.DMatrix(x_test, label=y_test)watch_list = [(data_test, 'eval'), (data_train, 'train')]param = {'max_depth': 6, 'eta': 0.8, 'silent': 1, 'objective': 'binary:logistic'}# 'subsample': 1, 'alpha': 0, 'lambda': 0, 'min_child_weight': 1}bst = xgb.train(param, data_train, num_boost_round=100, evals=watch_list)y_hat = bst.predict(data_test)# write_result(bst, 3)y_hat[y_hat > 0.5] = 1y_hat[~(y_hat > 0.5)] = 0xgb_acc = accuracy_score(y_test, y_hat)print ('Logistic回归:%.3f%%' % lr_acc)print ('随机森林:%.3f%%' % rfc_acc)print ('XGBoost:%.3f%%' % xgb_acc)
# /usr/bin/python
# -*- encoding:utf-8 -*-
#第一列数据表示类别
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import train_test_split   # cross_validation
from sklearn.linear_model import LogisticRegressionCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_scoredef iris_type(s):it = {b'Iris-setosa': 0, b'Iris-versicolor': 1, b'Iris-virginica': 2}return it[s]if __name__ == "__main__":path = u'C:/Users/forever/Desktop/8.Regression/iris.data'  # 数据文件路径#data = np.loadtxt(path, dtype=float, delimiter=',', converters={4: iris_type})#用numpy方式读取文件data = pd.read_csv(path, header=None)#用numpy读取文件x, y = data[range(4)], data[4]y = pd.Categorical(y).codesx_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1, test_size=50)data_train = xgb.DMatrix(x_train, label=y_train)data_test = xgb.DMatrix(x_test, label=y_test)watch_list = [(data_test, 'eval'), (data_train, 'train')]param = {'max_depth': 2, 'eta': 0.3, 'silent': 1, 'objective': 'multi:softmax', 'num_class': 3}bst = xgb.train(param, data_train, num_boost_round=6, evals=watch_list)y_hat = bst.predict(data_test)result = y_test.reshape(1, -1) == y_hatprint ('正确率:\t', float(np.sum(result)) / len(y_hat))print  ('END.....\n')#做logistic回归分类,做3则交叉验证,超参数是10个有可能的值,
# 做随机森林分类,取30棵决策树,用gini进行分类models=[('LogisticRegression',LogisticRegressionCV(Cs=10,cv=3)),('RandomForest',RandomForestClassifier(n_estimators= 30,criterion= 'gini'))]for name, model in models:model.fit(x_train,y_train)print (name, '训练集正确率:',accuracy_score(y_train,model.predict(x_train)))print (name, '测试集正确率:',accuracy_score(y_test,model.predict(x_test)))

更多推荐

机器学习——基础算法(五)

本文发布于:2024-02-25 07:13:31,感谢您对本站的认可!
本文链接:https://www.elefans.com/category/jswz/34/1698203.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
本文标签:算法   机器   基础

发布评论

评论列表 (有 0 条评论)
草根站长

>www.elefans.com

编程频道|电子爱好者 - 技术资讯及电子产品介绍!