本文实例为大家分享了python使用tornado实现简单爬虫的具体代码,供大家参考,具体内容如下
代码在官方文档的示例代码中有,但是作为一个tornado新手来说阅读起来还是有点困难的,于是我在代码中添加了注释,方便理解,代码如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
# coding=utf-8 #!/usr/bin/env python import time from datetime import timedelta try : from HTMLParser import HTMLParser from urlparse import urljoin, urldefrag except ImportError: from html.parser import HTMLParser from urllib.parse import urljoin, urldefrag from tornado import httpclient, gen, ioloop, queues # 设置要爬取的网址 base_url = 'http://www.baidu.com' # 设置worker数量 concurrency = 10 # 此代码会获取base_url下的所有其他url @gen .coroutine def get_links_from_url(url): try : # 通过异步向url发起请求 response = yield httpclient.AsyncHTTPClient().fetch(url) print ( 'fetched %s' % url) # 响应如果是字节类型 进行解码 html = response.body if isinstance (response.body, str ) \ else response.body.decode(errors = 'ignore' ) # 构建url列表 urls = [urljoin(url, remove_fragment(new_url)) for new_url in get_links(html)] except Exception as e: print ( 'Exception: %s %s' % (e, url)) # 报错返回空列表 raise gen.Return([]) # 返回url列表 raise gen.Return(urls) def remove_fragment(url): #去除锚点 pure_url, frag = urldefrag(url) return pure_url def get_links(html): #从html页面里提取url class URLSeeker(HTMLParser): def __init__( self ): HTMLParser.__init__( self ) self .urls = [] def handle_starttag( self , tag, attrs): href = dict (attrs).get( 'href' ) if href and tag = = 'a' : self .urls.append(href) url_seeker = URLSeeker() url_seeker.feed(html) return url_seeker.urls @gen .coroutine def main(): # 创建队列 q = queues.Queue() # 记录开始时间戳 start = time.time() # 构建两个集合 fetching, fetched = set (), set () @gen .coroutine def fetch_url(): # 从队列中取出数据 current_url = yield q.get() try : # 如果取出的数据在队列中已经存在 返回 if current_url in fetching: return print ( 'fetching %s' % current_url) # 如果不存在添加到集合当中 fetching.add(current_url) # 从新放入的链接中继续获取链接 urls = yield get_links_from_url(current_url) # 将已经请求玩的url放入第二个集合 fetched.add(current_url) for new_url in urls: # Only follow links beneath the base URL # 如果链接是以传入的url开始则放入队列 if new_url.startswith(base_url): yield q.put(new_url) finally : # 队列内数据减一 q.task_done() @gen .coroutine def worker(): while True : # 保证程序持续运行 yield fetch_url() # 将第一个url放入队列 q.put(base_url) # Start workers, then wait for the work queue to be empty. for _ in range (concurrency): # 启动对应数量的worker worker() # 等待队列数据处理完成 yield q.join(timeout = timedelta(seconds = 300 )) # 如果两个集合不相等抛出异常 assert fetching = = fetched # 打印执行时间 print ( 'Done in %d seconds, fetched %s URLs.' % ( time.time() - start, len (fetched))) if __name__ = = '__main__' : io_loop = ioloop.IOLoop.current() io_loop.run_sync(main) |
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/wf134/article/details/79900407