自上一篇文章 Z Story : Using Django with GAE Python 后台抓取多个网站的页面全文 后,大体的进度如下:
1.增加了Cron: 用来告诉程序每隔30分钟 让一个task 醒来, 跑到指定的那几个博客上去爬取最新的更新
2.用google 的 Datastore 来存贮每次爬虫爬下来的内容。。只存贮新的内容。。
就像上次说的那样,这样以来 性能有了大幅度的提高: 原来的每次请求后, 爬虫才被唤醒 所以要花大约17秒的时间才能从后台输出到前台而现在只需要2秒不到
3.对爬虫进行了优化
1. Cron.yaml 来安排每个程序醒来的时间
经过翻文档, 问问题终于弄明白google的cron的工作原理--实际上只是google每隔指定的时间虚拟地访问一个我们自己指定的url…
因此在Django 下, 根本不需要写一个纯的python 程序 一定不要写:
if __name__=="__main__":
只需要自己配置一个url 放在views.py里:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
def updatePostsDB(request): #deleteAll() SiteInfos = [] SiteInfo = {} SiteInfo[ 'PostSite' ] = "L2ZStory" SiteInfo[ 'feedurl' ] = "feed://l2zstory.wordpress.com/feed/" SiteInfo[ 'blog_type' ] = "wordpress" SiteInfos.append(SiteInfo) SiteInfo = {} SiteInfo[ 'PostSite' ] = "YukiLife" SiteInfo[ 'feedurl' ] = "feed://blog.sina.com.cn/rss/1583902832.xml" SiteInfo[ 'blog_type' ] = "sina" SiteInfos.append(SiteInfo) SiteInfo = {} SiteInfo[ 'PostSite' ] = "ZLife" SiteInfo[ 'feedurl' ] = "feed://ireallife.wordpress.com/feed/" SiteInfo[ 'blog_type' ] = "wordpress" SiteInfos.append(SiteInfo) SiteInfo = {} SiteInfo[ 'PostSite' ] = "ZLife_Sina" SiteInfo[ 'feedurl' ] = "feed://blog.sina.com.cn/rss/1650910587.xml" SiteInfo[ 'blog_type' ] = "sina" SiteInfos.append(SiteInfo) try : for site in SiteInfos: feedurl = site[ 'feedurl' ] blog_type = site[ 'blog_type' ] PostSite = site[ 'PostSite' ] PostInfos = getPostInfosFromWeb(feedurl,blog_type) recordToDB(PostSite,PostInfos) Msg = "Cron Job Done..." except Exception,e: Msg = str (e) return HttpResponse(Msg) |
cron.yaml 要放在跟app.yaml同一个级别上:
cron:
- description: retrieve newest posts
url: /task_updatePosts/
schedule: every 30 minutes
在url.py 里只要指向这个把task_updatePostsDB 指向url就好了
调试这个cron的过程可以用惨烈来形容。。。在stackoverflow上有很多很多人在问为什么自己的cron不能工作。。。我一开始也是满头是汗,找不着头脑。。。最后侥幸弄好了,大体步骤也是空泛的很。。但是很朴实:
首先,一定要确保自己的程序没有什么syntax error….然后可以自己试着手动访问一下那个url 如果cron 正常的话,这个时候任务应该已经被执行了 最后实在不行的话多看看log…
2. Datastore的配置和利用--Using Datastore with Django
我的需求在这里很简单--没有join…所以我就直接用了最简陋的django-helper..
这个models.py 是个重点:
from appengine_django.models import BaseModel
from google.appengine.ext import db
classPostsDB(BaseModel):
link=db.LinkProperty()
py" id="highlighter_816192">
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
|
import urllib #from BeautifulSoup import BeautifulSoup from pyquery import PyQuery as pq def getArticleList(url): lstArticles = [] url_prefix = url[: - 6 ] Cnt = 1 response = urllib.urlopen(url) html = response.read() d = pq(html) try : pageCnt = d( "ul.SG_pages" ).find( 'span' ) pageCnt = int (d(pageCnt).text()[ 1 : - 1 ]) except : pageCnt = 1 for i in range ( 1 ,pageCnt + 1 ): url = url_prefix + str (i) + ".html" #print url response = urllib.urlopen(url) html = response.read() d = pq(html) title_spans = d( ".atc_title" ).find( 'a' ) date_spans = d( '.atc_tm' ) for j in range ( 0 , len (title_spans)): titleObj = title_spans[j] dateObj = date_spans[j] article = {} article[ 'link' ] = d(titleObj).attr( 'href' ) article[ 'title' ] = d(titleObj).text() article[ 'date' ] = d(dateObj).text() article[ 'desc' ] = getPageContent(article[ 'link' ]) lstArticles.append(article) return lstArticles def getPageContent(url): #get Page Content response = urllib.urlopen(url) html = response.read() d = pq(html) pageContent = d( "div.articalContent" ).text() #print pageContent return pageContent def main(): url = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_1.html' #Han Han url = "http://blog.sina.com.cn/s/articlelist_1225833283_0_1.html" #Gu Du Chuan Ling url = "http://blog.sina.com.cn/s/articlelist_1650910587_0_1.html" #Feng url = "http://blog.sina.com.cn/s/articlelist_1583902832_0_1.html" #Yuki lstArticles = getArticleList(url) for article in lstArticles: f = open ( "blogs/" + article[ 'date' ] + "_" + article[ 'title' ] + ".txt" , 'w' ) f.write(article[ 'desc' ].encode( 'utf-8' )) #特别注意对中文的处理 f.close() #print article['desc'] if __name__ = = '__main__' : main() |