把获取到的下载视频的url存放在数组中(也可写入文件中),通过调用迅雷接口,进行自动下载。(请先下载迅雷,并在其设置中心的下载管理中设置为一键下载)
实现代码如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
from bs4 import BeautifulSoup import requests import os,re,time import urllib3 from win32com.client import Dispatch class DownloadVideo: def __init__( self ): self .r = requests.session() self .url = self .get_url() self .download_urla = [] self .download_urlb = [] self .url_set = [ "%s/shipin/list-短视频.html" % self .url] #获取最新网址 def get_url( self ): urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) a = self .r.get( 'https://www.k58.com' ,verify = False ) b = a.url return b #几页内容的网址 def url_set1( self ,n): if n = = 2 : url = "%s/shipin/list-短视频-2.html" % self .url self .url_set.append(url) elif n> = 3 : m = n + 1 for i in range ( 2 ,m): url = "%s/shipin/list-短视频-%d.html" % ( self .url,i) self .url_set.append(url) else : pass #分别加载每一个页内容的网址 def download_url1( self ): for j in self .url_set: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) r = self .r.get(j,verify = False ) sp1 = r.content soup = BeautifulSoup(sp1, "html.parser" ) sp2 = soup.find_all( class_ = "shown" ) for i in sp2: url1 = re.findall( '<a href="(.*?)" rel="external nofollow" ' , str (i)) u = self .url + url1[ 0 ] self .download_urla.append(u) #分别获取各个视频的下载链接 def download_url2( self ): for i in self .download_urla: urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) r = self .r.get(i,verify = False ) sp1 = r.content soup = BeautifulSoup(sp1, "html.parser" ) sp2 = soup.find_all( class_ = "form-control input-sm copy_btn app_disable" ) for j in sp2: url2 = j[ "data-clipboard-text" ] self .download_urlb.append(url2) #将链接写入txt中 # self.write_txt(url2) #迅雷下载 def thunder_download( self ): try : thunder = Dispatch( "ThunderAgent.Agent64.1" ) for i in self .download_urlb: thunder.AddTask(i) thunder.CommitTasks() time.sleep( 2 ) except : print ( "请下载迅雷,并在其设置中心的下载管理中设置为一键下载" ) def mkdir( self ,path): folder = os.path.exists(path) if not folder: os.makedirs(path) else : pass def write_txt( self ,c): self .mkdir(r "D:\AAAAA" ) file_name = time.strftime( '%Y%m%d_%H%M%S.txt' ) with open (r "D:\AAAAA\%s" % file_name, 'a' ) as f: f.write(c + "\n" ) if __name__ = = '__main__' : d = DownloadVideo() #数字表示几页的内容 d.url_set1( 5 ) d.download_url1() d.download_url2() d.thunder_download() |
到此这篇关于python爬虫爬取某网站视频的示例代码的文章就介绍到这了,更多相关python爬虫爬取网站视频内容请搜索服务器之家以前的文章或继续浏览下面的相关文章希望大家以后多多支持服务器之家!
原文链接:https://www.cnblogs.com/badbadboyyx/p/12450695.html