收集所有外部链接的网站爬虫程序流程图
下例是爬取本站python绘制条形图方法代码详解的实例,大家可以参考下。
完整代码:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
#! /usr/bin/env python #coding=utf-8 import urllib2 from bs4 import BeautifulSoup import re import datetime import random pages = set () random.seed(datetime.datetime.now()) #Retrieves a list of all Internal links found on a page def getInternalLinks(bsObj, includeUrl): internalLinks = [] #Finds all links that begin with a "/" for link in bsObj.findAll( "a" , href = re. compile ( "^(/|.*" + includeUrl + ")" )): if link.attrs[ 'href' ] is not None : if link.attrs[ 'href' ] not in internalLinks: internalLinks.append(link.attrs[ 'href' ]) return internalLinks #Retrieves a list of all external links found on a page def getExternalLinks(bsObj, excludeUrl): externalLinks = [] #Finds all links that start with "http" or "www" that do #not contain the current URL for link in bsObj.findAll( "a" , href = re. compile ( "^(http|www)((?!" + excludeUrl + ").)*$" )): if link.attrs[ 'href' ] is not None : if link.attrs[ 'href' ] not in externalLinks: externalLinks.append(link.attrs[ 'href' ]) return externalLinks def splitAddress(address): addressParts = address.replace( "http://" , " ").split(" / ") return addressParts def getRandomExternalLink(startingPage): html = urllib2.urlopen(startingPage) bsObj = BeautifulSoup(html) externalLinks = getExternalLinks(bsObj, splitAddress(startingPage)[ 0 ]) if len (externalLinks) = = 0 : internalLinks = getInternalLinks(startingPage) return internalLinks[random.randint( 0 , len (internalLinks) - 1 )] else : return externalLinks[random.randint( 0 , len (externalLinks) - 1 )] def followExternalOnly(startingSite): externalLink = getRandomExternalLink( "//www.zyiz.net/article/130968.htm" ) print ( "Random external link is: " + externalLink) followExternalOnly(externalLink) #Collects a list of all external URLs found on the site allExtLinks = set () allIntLinks = set () def getAllExternalLinks(siteUrl): html = urllib2.urlopen(siteUrl) bsObj = BeautifulSoup(html) internalLinks = getInternalLinks(bsObj,splitAddress(siteUrl)[ 0 ]) externalLinks = getExternalLinks(bsObj,splitAddress(siteUrl)[ 0 ]) for link in externalLinks: if link not in allExtLinks: allExtLinks.add(link) print (link) for link in internalLinks: if link not in allIntLinks: print ( "About to get link:" + link) allIntLinks.add(link) getAllExternalLinks(link) getAllExternalLinks( "//www.zyiz.net/article/130968.htm" ) |
爬取结果如下:
总结
以上就是本文关于Python爬虫获取整个站点中的所有外部链接代码示例的全部内容,希望对大家有所帮助。感兴趣的朋友可以继续参阅本站其他相关专题,如有不足之处,欢迎留言指出。感谢朋友们对本站的支持!
原文链接:http://blog.csdn.net/qq_16103331/article/details/52690558