本次测试基于MovieLens数据集实现的基于物品的协同过滤,目前只是在小样本上实现,主要问题是计算太耗内存,后期代码继续优化与完善。
数据集说明:movies.dat中数据是用户对电影的评分。数据格式:UserID::MovieID::Rating::Timestamp。
代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
|
import pandas as pd import numpy as np import math import os import time import datetime os.chdir(r 'f:\zxx\pthon_work\CF' ) def loadData(): #读入movies.dat, rating.dat,tags.dat #mnames=['movie_id','title','genres'] #movies=pd.read_table(r'.\data\movies.dat',sep='::',header=None,names=mnames) rnames = [ 'UserID' , 'MovieID' , 'Rating' , 'Timestamp' ] all_ratings = pd.read_table(r '.\data\ratings.dat' ,sep = '::' ,header = None ,names = rnames,nrows = 300000 ) #tnames=['UserID','MovieID','Tag','Timestamp'] #tags=pd.read_table(r'.\data\tags.dat',sep='::',header=None,names=tnames) return all_ratings #数据探索:rating def data_alay(ratings): """rating nums10000054, 3, 示例 : 1 122 5 838985046 col:'UserID','MovieID','Rating','Timestamp' """ #一个用户只对一个电影打分一次 UR = ratings.groupby([ratings[ 'UserID' ],ratings[ 'MovieID' ]]) len (UR.size) #计算每部电影的平均打分,电影数10677 def avgRating(ratings): movies_mean = ratings[ 'Rating' ].groupby(ratings[ 'MovieID' ]).mean() #计算所有用户对电影X的平均打分 movies_id = movies_mean.index movies_avg_rating = movies_mean.values return movies_id,movies_avg_rating,movies_mean #计算电影相似度矩阵相,即建立10677*10677矩阵 def calculatePC(ratings): movies_id,movies_avg_rating,movies_mean = avgRating(ratings) #pc_mat=np.eye(3)#建立电影相似度单位矩阵 pc_dic = {} top_movie = len (movies_id) for i in range ( 0 ,top_movie): for j in range (i + 1 ,top_movie): movieAID = movies_id[i] movieBID = movies_id[j] see_moviesA_user = ratings[ 'UserID' ][ratings[ 'MovieID' ] = = movieAID] see_moviesB_user = ratings[ 'UserID' ][ratings[ 'MovieID' ] = = movieBID] join_user = np.intersect1d(see_moviesA_user.values,see_moviesB_user.values) #同时给电影A、B评分的用户 movieA_avg = movies_mean[movieAID] movieB_avg = movies_mean[movieBID] key1 = str (movieAID) + ':' + str (movieBID) key2 = str (movieBID) + ':' + str (movieAID) value = twoMoviesPC(join_user,movieAID,movieBID,movieA_avg,movieB_avg,ratings) pc_dic[key1] = value pc_dic[key2] = value #pc_mat[i][i+1]=twoMoviesPC(join_user,movieAID,movieBID,movieA_avg,movieB_avg,ratings) #print ('---the %s, %d,%d:--movie %s--%s--pc is %f' % (key1,movieAID,movieBID,movieAID,movieBID,pc_dic[key1])) return pc_dic #计算电影A与电影B的相似度,皮尔森相似度=sum(A-A^)*sum(B-B^)/sqrt(sum[(A-A^)*(A-A^)]*sum[(B-B^)*(B-B^)]) def twoMoviesPC(join_user,movieAID,movieBID,movieA_avg,movieB_avg,ratings): cent_AB_sum = 0.0 #相似度分子 centA_sum = 0.0 #分母 centB_sum = 0.0 #分母 movieAB_pc = 0.0 #电影A,B的相似度 count = 0 for u in range ( len (join_user)): #print '---------',u count = count + 1 ratA = ratings[ 'Rating' ][ratings[ 'UserID' ] = = join_user[u]][ratings[ 'MovieID' ] = = movieAID].values[ 0 ] #用户给电影A评分 ratB = ratings[ 'Rating' ][ratings[ 'UserID' ] = = join_user[u]][ratings[ 'MovieID' ] = = movieBID].values[ 0 ] #用户给电影B评分 cent_AB = (ratA - movieA_avg) * (ratB - movieB_avg) #去均值中心化 centA_square = (ratA - movieA_avg) * (ratA - movieA_avg) #去均值平方 centB_square = (ratB - movieB_avg) * (ratB - movieB_avg) #去均值平方 cent_AB_sum = cent_AB_sum + cent_AB centA_sum = centA_sum + centA_square centB_sum = centB_sum + centB_square if (centA_sum> 0 and centB_sum> 0 ): movieAB_pc = cent_AB_sum / math.sqrt(centA_sum * centB_sum) return movieAB_pc """ 预测用户U对那些电影感兴趣。分三步, 1)用户U过去X天看过的电影。 2)提出用户U已看过的电影,根据用户U过去看过的电影,计算用户U对其他电影的打分. 3) 拉去打分最高的的电影给用户推荐。 预测用户U对电影C的打分。分三步:(先只做这个) 1)用户U过去X天看过的电影。 2)利用加权去中心化公式预测用户U对电影C的打分. """ #日期处理: -3天,然后转换为uinxtime def timePro(last_rat_time,UserU): lastDate = datetime.datetime.fromtimestamp(last_rat_time[UserU]) #unix转为日期 date_sub3 = lastDate + datetime.timedelta(days = - 3 ) #减去3天 unix_sub3 = time.mktime(date_sub3.timetuple()) #日期转为unix return unix_sub3 #取用户最后一次评分前3天评估的电影进行预测 def getHisRat(ratings,last_rat_time,UserUID): unix_sub3 = timePro(last_rat_time,UserUID) UserU_info = ratings[ratings[ 'UserID' ] = = UserUID][ratings[ 'Timestamp' ]>unix_sub3] return UserU_info #预测用户U对电影C的打分 def hadSeenMovieByUser(UserUID,MovieA,ratings,pc_dic,movies_mean): pre_rating = 0.0 last_rat_time = ratings[ 'Timestamp' ].groupby([ratings[ 'UserID' ]]). max () #获取用户U最近一次评分日期 UserU_info = getHisRat(ratings,last_rat_time,UserUID) #获取用户U过去看过的电影 flag = 0 #表示新电影,用户U是否给电影A打过分 wmv = 0.0 #相似度*mv平均打分去均值后之和 w = 0.0 #相似度之和 movie_userU = UserU_info[ 'MovieID' ].values #当前用户看过的电影 if MovieA in movie_userU: flag = 1 pre_rating = UserU_info[ 'Rating' ][UserU_info[ 'MovieID' ] = = MovieA].values else : for mv in movie_userU: key = str (mv) + ':' + str (MovieA) rat_U_mv = UserU_info[ 'Rating' ][UserU_info[ 'MovieID' ] = = mv][UserU_info[ 'UserID' ] = = UserUID].values #用户U对看过电影mv的打分 wmv = (wmv + pc_dic[key] * (rat_U_mv - movies_mean[mv])) #相似度*mv平均打分去均值后之和 w = (w + pc_dic[key]) #看过电影与新电影相似度之和 #print ('---have seen mv %d with new mv %d,%f,%f'%(mv,MovieA,wmv,w)) pre_rating = (movies_mean[MovieA] + wmv / w) print ( '-flag:%d---User:%d rating movie:%d with %f score----' % (flag,UserUID,MovieA,pre_rating)) return pre_rating,flag if __name__ = = '__main__' : all_ratings = loadData() movie_num = 100 #控制电影数,只针对电影ID在该范围的数据进行计算,否则数据量太大 ratings = all_ratings[all_ratings[ 'MovieID' ]< = movie_num] movies_id,movies_avg_rating,movies_mean = avgRating(ratings) pc_dic = calculatePC(ratings) #电影相似度矩阵 #预测 UserUID = 10 #当前数据集只看过电影4,7, MovieA = 6 pre_rating,flag = hadSeenMovieByUser(UserUID,MovieA,ratings,pc_dic,movies_mean) "-----------------测试ID提取------------------" #选取UserUID ratings.head( 10 ) #从前10行中随机选取一个用户ID,例如:UserID=10 #查看该用户在当前数据集中看过那些电影,方便选取新电影(防止选择的是用户已经看过的电影) ratings[ratings[ 'UserID' ] = = 10 ] #该用户在当前数据集中,只看过电影MovieID in(4,7),则可选择不是4,7的电影ID进行预测,例如6. |
运行结果:
-flag:0---User:10 rating movie:6 with 4.115996 score----
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/zhouwenyuan1015/article/details/72584727