当前位置:   article > 正文

Python 获取动漫番剧 -XXOO_7666.tv

7666.tv

 

前言

没有什么好说的,就是想起来前些年失恋使劲刷番剧缓解自己糟糕的情绪。纪念下。


一、直接上代码

1.搜索入口

  1. # 搜索动漫名称 列表
  2. def get_video_list(name):
  3. # 开启代理
  4. # proxy = {'http': 'http://127.0.0.1:8080', 'https': 'https://127.0.0.1:8080' }
  5. url = 'http://www.7666.tv/search.php?searchword=' + name + '&submit='
  6. headers = {
  7. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  8. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  9. 'Accept-Language': 'zh-CN,zh;q=0.9',
  10. 'Connection': 'keep-alive',
  11. 'Upgrade-Insecure-Requests': '1',
  12. 'Content-Type': 'application/x-www-form-urlencoded',
  13. 'Cookie': ""
  14. }
  15. # url 中文转码
  16. url = url.replace(url.split("/")[-1].split(".")[0], quote(url.split("/")[-1].split(".")[0]))
  17. # 发起请求 , proxies=proxy
  18. response = requests.get(url, headers)
  19. # 解决requests.get 网页中文乱码
  20. response.encoding = response.apparent_encoding
  21. # 得到搜索结果。获取视频信息
  22. html_obj = etree.HTML(response.text)
  23. v_list = html_obj.xpath('//ul[@class="myui-vodlist__media clearfix"]/li')
  24. counter = 0
  25. result = []
  26. result_head = ['序号', '名称', '类型', '时间', '链接', '简介']
  27. result.append(result_head)
  28. for v in v_list:
  29. thumb_a = v.xpath('//div[@class="thumb"]/a[@class="myui-vodlist__thumb img-lg-150 img-xs-100 lazyload"]')[
  30. counter]
  31. # 视频名称
  32. video_name = thumb_a.attrib.get('title')
  33. # 视频头像
  34. video_head = thumb_a.attrib.get('data-original')
  35. # 视频链接
  36. video_url = thumb_a.xpath('@href')[0]
  37. # 视频评分
  38. pattern = re.compile(r'\s+');
  39. thumb_span_g = thumb_a.xpath('//span[@class="pic-tag pic-tag-top"]')[counter]
  40. video_grade = re.sub(pattern, '', str(thumb_span_g.xpath('text()')[0]))
  41. # 视频最近更新
  42. thumb_span_u = thumb_a.xpath('//span[@class="pic-text text-right"]')[counter]
  43. video_update = re.sub(pattern, '', str(thumb_span_u.xpath('text()')[0]))
  44. detail_p = v.xpath('//div[@class="detail"]/p')
  45. # 视频导演
  46. video_director = detail_p[0].xpath('text()')[0]
  47. # 视频主演
  48. video_starring = detail_p[1][1].xpath('text()')[0]
  49. # 视频分类
  50. video_type = detail_p[2].xpath('text()')[0]
  51. # 视频地区
  52. video_address = detail_p[2][2].tail
  53. # 视频年份
  54. video_year = detail_p[2][4].tail
  55. # 视频简介
  56. video_synopsis = v.xpath('//div[@class="detail"]/p[@class="hidden-xs"]/text()')[counter]
  57. video_synopsis = video_synopsis.encode("gbk", 'ignore').decode("gbk", "ignore")
  58. counter = counter + 1
  59. # print(video_name, video_head, video_url, video_grade, video_update)
  60. # print(video_director, video_starring, video_type, video_address, video_year, video_synopsis)
  61. # print('\n')
  62. result.append([counter, video_name, video_type, video_year, video_url, video_synopsis])
  63. return result

2.视频集数

  1. # 查询单个视频信息
  2. def search_video_info(url):
  3. headers = {
  4. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  5. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  6. 'Accept-Language': 'zh-CN,zh;q=0.9',
  7. 'Connection': 'keep-alive',
  8. 'Upgrade-Insecure-Requests': '1',
  9. 'Content-Type': 'application/x-www-form-urlencoded',
  10. 'Cookie': ""
  11. }
  12. # 发起请求
  13. response = requests.get(url, headers)
  14. # 解决requests.get 网页中文乱码
  15. response.encoding = response.apparent_encoding
  16. # 得到搜索结果。获取视频信息
  17. html_obj = etree.HTML(response.text)
  18. v_list = html_obj.xpath('//ul[@class="myui-content__list scrollbar sort-list clearfix"]/li/a')
  19. result = []
  20. result_head = ['序号', '视频集数', '视频集数链接']
  21. result.append(result_head)
  22. counter = 1
  23. for v in v_list:
  24. # 视频集数
  25. video_set = v.xpath('text()')[0]
  26. # 视频集数链接
  27. video_set_url = v.xpath('@href')[0]
  28. vr = [counter, video_set, video_set_url]
  29. result.append(vr)
  30. counter = counter + 1
  31. return result

3.下载TS文件

  1. # TS 流 m3u8
  2. # 获取ts 路径列表
  3. def search_video_ts(url):
  4. result_urls = []
  5. headers = {
  6. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  7. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  8. 'Accept-Language': 'zh-CN,zh;q=0.9',
  9. 'Connection': 'keep-alive',
  10. 'Upgrade-Insecure-Requests': '1',
  11. 'Content-Type': 'application/x-www-form-urlencoded',
  12. 'Cookie': ""
  13. }
  14. # 发起请求
  15. response = requests.get(url, headers)
  16. # 解决requests.get 网页中文乱码
  17. response.encoding = response.apparent_encoding
  18. # 得到搜索结果。获取视频信息
  19. html_obj = etree.HTML(response.text)
  20. # 获取ts流路径
  21. ts_info = html_obj.xpath('//div[@class="embed-responsive embed-responsive-16by9 clearfix"]/script/text()')[0]
  22. pattern = r"now=(.*)"
  23. m = re.findall(pattern, ts_info, re.I)
  24. # 当前视频的m3u8文件(不包含ts地址)
  25. ts_url = str(m).split(";")[0].replace("[", '').replace('"', '').replace("'", '')
  26. # 1.转发地址,发起请求
  27. response = requests.get(ts_url, headers)
  28. # 解决requests.get 网页中文乱码
  29. response.encoding = response.apparent_encoding
  30. # 当前视频的m3u8m3u8文件(包含ts地址)
  31. ts_url_2 = response.text.split("\n")[2]
  32. # 拼接地址
  33. ts_url_2 = ts_url.split("index")[0] + ts_url_2
  34. # 2.ts地址列表,发起请求
  35. response = requests.get(ts_url_2)
  36. # 解决requests.get 网页中文乱码
  37. response.encoding = response.apparent_encoding
  38. # 得到搜索结果。获取视频信息
  39. ts_list = response.text.split("\n")
  40. # https://sina.com-h-sina.com/20180812/8108_9a67fe52/1000k/hls/f9ebcf457c6000.ts
  41. for ts in ts_list:
  42. if (str.find(ts, '#') != -1) == False and len(ts) != 0:
  43. ts_url_3 = ts_url_2.split("index")[0] + ts
  44. result_urls.append(ts_url_3)
  45. return result_urls
  46. def target_handel_download(start, end, name, url_list):
  47. # 开启代理
  48. headers = {
  49. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  50. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  51. 'Accept-Language': 'zh-CN,zh;q=0.9',
  52. 'Connection': 'keep-alive',
  53. 'Upgrade-Insecure-Requests': '1',
  54. 'Content-Type': 'application/x-www-form-urlencoded',
  55. 'Cookie': ""
  56. }
  57. for url in url_list[start:end]:
  58. global count
  59. print('间隔一秒,开始下载ts文件>>>' + url + ', count>>' + str(count))
  60. time.sleep(1)
  61. try:
  62. r = requests.get(url, headers=headers, stream=True)
  63. except Exception as ex:
  64. print('下载ts文件>>>' + url + ' 异常,间隔5秒重新下载, err>>' + ex)
  65. else:
  66. with open(url.split("hls/")[1], "wb") as code:
  67. code.write(r.content)
  68. print("下载进度:%.2f" % (count / len(url_list)))
  69. count = count + 1
  70. # TS 流 m3u8
  71. # 获取ts 路径列表,下载,多线程
  72. def download_video_ts(name, result_list, num_thread=100):
  73. global count
  74. count = 0
  75. # 下载ts文件
  76. file_size = len(result_list)
  77. # 启动多线程写文件
  78. part = file_size // num_thread # 如果不能整除,最后一块应该多几个字节
  79. counter = 0
  80. for i in range(num_thread):
  81. start = part * i
  82. if i == num_thread - 1: # 最后一块
  83. end = file_size
  84. else:
  85. end = start + part
  86. print('start>>' + str(start) + ' end>>' + str(end) + ' 路径name>>' + name + ' ts文件数量>>' + str(len(result_list)))
  87. print('间隔5秒启动多线程写文件, 线程总数>>' + str(num_thread) + ', 当前线程>>' + str(i))
  88. time.sleep(5)
  89. t = threading.Thread(target=target_handel_download,
  90. kwargs={'start': start, 'end': end, 'name': name, 'url_list': result_list})
  91. t.setDaemon(True)
  92. t.start()
  93. counter = counter + 1
  94. # 等待所有线程下载完成
  95. main_thread = threading.current_thread()
  96. # 所有存活的 Thread 对象
  97. for t in threading.enumerate():
  98. if t is main_thread:
  99. continue
  100. t.join()

 4.合并TS文件为mp4格式

  1. # 合并小文件
  2. # copy/b D:\newpython\doutu\sao\ts_files\*.ts d:\fnew.ts
  3. # 在windows系统下面,直接可以使用:copy/b *.ts video.mp4 把所有ts文件合成一个mp4格式文件
  4. def merge_ts_list(result_list, videoNamePy, name):
  5. tmp = []
  6. for file in result_list[0:568]:
  7. tmp.append(file.replace("\n", ""))
  8. # 合并ts文件
  9. # windows cmd 操作
  10. shell_str = 'copy /b *.ts ' + name + '.mp4' + '\n' + 'del ' + videoNamePy + '\*.ts'
  11. return shell_str
  12. # 把合并命令写到文件中|也可直接执行
  13. def wite_to_file(cmdString):
  14. f = open("combined.cmd", 'w', encoding="utf-8")
  15. f.write(cmdString)
  16. f.close()
  17. print('执行cmd命令............', cmdString)
  18. # 解决中文乱码
  19. os.system('chcp 65001')
  20. r = os.system(cmdString)
  21. print(r)

4.全部代码走一波

  1. # encoding=utf-8
  2. # 用python 视频下载器
  3. import requests
  4. import re
  5. import threading
  6. import os
  7. import datetime
  8. import time
  9. import sys
  10. import pinyin.cedict
  11. from lxml import etree
  12. # from lxml import html
  13. from urllib.parse import quote
  14. # etree = html.etree
  15. head_url = 'http://www.7666.tv'
  16. count = 0
  17. # 搜索动漫名称 列表
  18. def get_video_list(name):
  19. # 开启代理
  20. # proxy = {'http': 'http://127.0.0.1:8080', 'https': 'https://127.0.0.1:8080' }
  21. url = 'http://www.7666.tv/search.php?searchword=' + name + '&submit='
  22. headers = {
  23. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  24. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  25. 'Accept-Language': 'zh-CN,zh;q=0.9',
  26. 'Connection': 'keep-alive',
  27. 'Upgrade-Insecure-Requests': '1',
  28. 'Content-Type': 'application/x-www-form-urlencoded',
  29. 'Cookie': ""
  30. }
  31. # url 中文转码
  32. url = url.replace(url.split("/")[-1].split(".")[0], quote(url.split("/")[-1].split(".")[0]))
  33. # 发起请求 , proxies=proxy
  34. response = requests.get(url, headers)
  35. # 解决requests.get 网页中文乱码
  36. response.encoding = response.apparent_encoding
  37. # 得到搜索结果。获取视频信息
  38. html_obj = etree.HTML(response.text)
  39. v_list = html_obj.xpath('//ul[@class="myui-vodlist__media clearfix"]/li')
  40. counter = 0
  41. result = []
  42. result_head = ['序号', '名称', '类型', '时间', '链接', '简介']
  43. result.append(result_head)
  44. for v in v_list:
  45. thumb_a = v.xpath('//div[@class="thumb"]/a[@class="myui-vodlist__thumb img-lg-150 img-xs-100 lazyload"]')[
  46. counter]
  47. # 视频名称
  48. video_name = thumb_a.attrib.get('title')
  49. # 视频头像
  50. video_head = thumb_a.attrib.get('data-original')
  51. # 视频链接
  52. video_url = thumb_a.xpath('@href')[0]
  53. # 视频评分
  54. pattern = re.compile(r'\s+');
  55. thumb_span_g = thumb_a.xpath('//span[@class="pic-tag pic-tag-top"]')[counter]
  56. video_grade = re.sub(pattern, '', str(thumb_span_g.xpath('text()')[0]))
  57. # 视频最近更新
  58. thumb_span_u = thumb_a.xpath('//span[@class="pic-text text-right"]')[counter]
  59. video_update = re.sub(pattern, '', str(thumb_span_u.xpath('text()')[0]))
  60. detail_p = v.xpath('//div[@class="detail"]/p')
  61. # 视频导演
  62. video_director = detail_p[0].xpath('text()')[0]
  63. # 视频主演
  64. video_starring = detail_p[1][1].xpath('text()')[0]
  65. # 视频分类
  66. video_type = detail_p[2].xpath('text()')[0]
  67. # 视频地区
  68. video_address = detail_p[2][2].tail
  69. # 视频年份
  70. video_year = detail_p[2][4].tail
  71. # 视频简介
  72. video_synopsis = v.xpath('//div[@class="detail"]/p[@class="hidden-xs"]/text()')[counter]
  73. video_synopsis = video_synopsis.encode("gbk", 'ignore').decode("gbk", "ignore")
  74. counter = counter + 1
  75. # print(video_name, video_head, video_url, video_grade, video_update)
  76. # print(video_director, video_starring, video_type, video_address, video_year, video_synopsis)
  77. # print('\n')
  78. result.append([counter, video_name, video_type, video_year, video_url, video_synopsis])
  79. return result
  80. # 查询单个视频信息
  81. def search_video_info(url):
  82. headers = {
  83. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  84. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  85. 'Accept-Language': 'zh-CN,zh;q=0.9',
  86. 'Connection': 'keep-alive',
  87. 'Upgrade-Insecure-Requests': '1',
  88. 'Content-Type': 'application/x-www-form-urlencoded',
  89. 'Cookie': ""
  90. }
  91. # 发起请求
  92. response = requests.get(url, headers)
  93. # 解决requests.get 网页中文乱码
  94. response.encoding = response.apparent_encoding
  95. # 得到搜索结果。获取视频信息
  96. html_obj = etree.HTML(response.text)
  97. v_list = html_obj.xpath('//ul[@class="myui-content__list scrollbar sort-list clearfix"]/li/a')
  98. result = []
  99. result_head = ['序号', '视频集数', '视频集数链接']
  100. result.append(result_head)
  101. counter = 1
  102. for v in v_list:
  103. # 视频集数
  104. video_set = v.xpath('text()')[0]
  105. # 视频集数链接
  106. video_set_url = v.xpath('@href')[0]
  107. vr = [counter, video_set, video_set_url]
  108. result.append(vr)
  109. counter = counter + 1
  110. return result
  111. # TS 流 m3u8
  112. # 获取ts 路径列表
  113. def search_video_ts(url):
  114. result_urls = []
  115. headers = {
  116. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  117. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  118. 'Accept-Language': 'zh-CN,zh;q=0.9',
  119. 'Connection': 'keep-alive',
  120. 'Upgrade-Insecure-Requests': '1',
  121. 'Content-Type': 'application/x-www-form-urlencoded',
  122. 'Cookie': ""
  123. }
  124. # 发起请求
  125. response = requests.get(url, headers)
  126. # 解决requests.get 网页中文乱码
  127. response.encoding = response.apparent_encoding
  128. # 得到搜索结果。获取视频信息
  129. html_obj = etree.HTML(response.text)
  130. # 获取ts流路径
  131. ts_info = html_obj.xpath('//div[@class="embed-responsive embed-responsive-16by9 clearfix"]/script/text()')[0]
  132. pattern = r"now=(.*)"
  133. m = re.findall(pattern, ts_info, re.I)
  134. # 当前视频的m3u8文件(不包含ts地址)
  135. ts_url = str(m).split(";")[0].replace("[", '').replace('"', '').replace("'", '')
  136. # 1.转发地址,发起请求
  137. response = requests.get(ts_url, headers)
  138. # 解决requests.get 网页中文乱码
  139. response.encoding = response.apparent_encoding
  140. # 当前视频的m3u8m3u8文件(包含ts地址)
  141. ts_url_2 = response.text.split("\n")[2]
  142. # 拼接地址
  143. ts_url_2 = ts_url.split("index")[0] + ts_url_2
  144. # 2.ts地址列表,发起请求
  145. response = requests.get(ts_url_2)
  146. # 解决requests.get 网页中文乱码
  147. response.encoding = response.apparent_encoding
  148. # 得到搜索结果。获取视频信息
  149. ts_list = response.text.split("\n")
  150. # https://sina.com-h-sina.com/20180812/8108_9a67fe52/1000k/hls/f9ebcf457c6000.ts
  151. for ts in ts_list:
  152. if (str.find(ts, '#') != -1) == False and len(ts) != 0:
  153. ts_url_3 = ts_url_2.split("index")[0] + ts
  154. result_urls.append(ts_url_3)
  155. return result_urls
  156. def target_handel_download(start, end, name, url_list):
  157. # 开启代理
  158. headers = {
  159. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
  160. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
  161. 'Accept-Language': 'zh-CN,zh;q=0.9',
  162. 'Connection': 'keep-alive',
  163. 'Upgrade-Insecure-Requests': '1',
  164. 'Content-Type': 'application/x-www-form-urlencoded',
  165. 'Cookie': ""
  166. }
  167. for url in url_list[start:end]:
  168. global count
  169. print('间隔一秒,开始下载ts文件>>>' + url + ', count>>' + str(count))
  170. time.sleep(1)
  171. try:
  172. r = requests.get(url, headers=headers, stream=True)
  173. except Exception as ex:
  174. print('下载ts文件>>>' + url + ' 异常,间隔5秒重新下载, err>>' + ex)
  175. else:
  176. with open(url.split("hls/")[1], "wb") as code:
  177. code.write(r.content)
  178. print("下载进度:%.2f" % (count / len(url_list)))
  179. count = count + 1
  180. # TS 流 m3u8
  181. # 获取ts 路径列表,下载,多线程
  182. def download_video_ts(name, result_list, num_thread=100):
  183. global count
  184. count = 0
  185. # 下载ts文件
  186. file_size = len(result_list)
  187. # 启动多线程写文件
  188. part = file_size // num_thread # 如果不能整除,最后一块应该多几个字节
  189. counter = 0
  190. for i in range(num_thread):
  191. start = part * i
  192. if i == num_thread - 1: # 最后一块
  193. end = file_size
  194. else:
  195. end = start + part
  196. print('start>>' + str(start) + ' end>>' + str(end) + ' 路径name>>' + name + ' ts文件数量>>' + str(len(result_list)))
  197. print('间隔5秒启动多线程写文件, 线程总数>>' + str(num_thread) + ', 当前线程>>' + str(i))
  198. time.sleep(5)
  199. t = threading.Thread(target=target_handel_download,
  200. kwargs={'start': start, 'end': end, 'name': name, 'url_list': result_list})
  201. t.setDaemon(True)
  202. t.start()
  203. counter = counter + 1
  204. # 等待所有线程下载完成
  205. main_thread = threading.current_thread()
  206. # 所有存活的 Thread 对象
  207. for t in threading.enumerate():
  208. if t is main_thread:
  209. continue
  210. t.join()
  211. # 合并小文件
  212. # copy/b D:\newpython\doutu\sao\ts_files\*.ts d:\fnew.ts
  213. # 在windows系统下面,直接可以使用:copy/b *.ts video.mp4 把所有ts文件合成一个mp4格式文件
  214. def merge_ts_list(result_list, videoNamePy, name):
  215. tmp = []
  216. for file in result_list[0:568]:
  217. tmp.append(file.replace("\n", ""))
  218. # 合并ts文件
  219. # windows cmd 操作
  220. shell_str = 'copy /b *.ts ' + name + '.mp4' + '\n' + 'del ' + videoNamePy + '\*.ts'
  221. return shell_str
  222. # 把合并命令写到文件中
  223. def wite_to_file(cmdString):
  224. f = open("combined.cmd", 'w', encoding="utf-8")
  225. f.write(cmdString)
  226. f.close()
  227. print('执行cmd命令............', cmdString)
  228. # 解决中文乱码
  229. os.system('chcp 65001')
  230. r = os.system(cmdString)
  231. print(r)
  232. # 展示视频搜索结果
  233. def show_video_list(result):
  234. print('------------------结果如下--------------------')
  235. for r in result:
  236. print(str(r).replace('[', '').replace(']', '').replace(',', ''))
  237. print('\n')
  238. # 验证是否数字
  239. def is_number(s):
  240. try:
  241. int(s)
  242. return True
  243. except ValueError:
  244. pass
  245. try:
  246. import unicodedata
  247. unicodedata.numeric(s)
  248. return True
  249. except (TypeError, ValueError):
  250. pass
  251. return False
  252. def download_vodeo_ts(videoName, cwd, content):
  253. print('你选择的集数内容:',
  254. str(content).replace('[', '').replace(']', '').replace(',', ''))
  255. # 视频详情
  256. print("正在爬取-{}-视频集数详情".format(str(content[1])))
  257. # 视频集数下载ts
  258. result_urls = search_video_ts(head_url + str(content[2]))
  259. if len(result_urls) < 1:
  260. print('视频TS-{}-,无内容!'.format(str(content[1])))
  261. else:
  262. # 转拼音
  263. # videoNamePy = sys.path[0] + '\\' + pinyin.get(videoName, format="numerical")
  264. videoNamePy = sys.path[0] + '\\' + videoName
  265. # 下载TS文件
  266. start = datetime.datetime.now().replace(microsecond=0)
  267. print('下载 start..................>>', start)
  268. print('文件名称>>>>>>>', videoNamePy)
  269. if not os.path.exists(videoNamePy):
  270. os.mkdir(videoNamePy)
  271. os.chdir(videoNamePy)
  272. else:
  273. os.chdir(videoNamePy)
  274. download_video_ts(videoNamePy, result_urls, 5)
  275. end = datetime.datetime.now().replace(microsecond=0)
  276. print('下载 end..................>>', end)
  277. # 合并小文件
  278. cmd = merge_ts_list(result_urls, videoNamePy, videoNamePy + '\\' + videoName + str(content[1]))
  279. # 把合并命令写到文件中
  280. wite_to_file(cmd)
  281. print(str(content[1]) + "-视频下载完成")
  282. # main
  283. def download_vodeo_man():
  284. cwd = os.getcwd() # 获取当前目录即dir目录下
  285. video_list = []
  286. print("------------------------current working directory------------------" + cwd)
  287. while True:
  288. exit_flag = False
  289. videoName = input("请输入动漫名称||输入exit退出:")
  290. if 'exit' == videoName:
  291. break
  292. # 搜索动漫名称 列表
  293. print('搜索限制3秒一次.....................')
  294. time.sleep(3)
  295. try:
  296. video_list = get_video_list(videoName)
  297. except Exception as ex:
  298. print('搜索异常-{},请重新输入!'.format(ex))
  299. if len(video_list) < 2:
  300. print('没有找到你想要的视频,请重新输入!')
  301. else:
  302. show_video_list(video_list)
  303. while True:
  304. if exit_flag == True:
  305. break
  306. num = input("请选择你要下载的视频序号||键入t返回上一级:")
  307. if num == 't':
  308. break
  309. elif is_number(num):
  310. if int(num) > len(video_list):
  311. print('没有找到你选择的序号,请重新输入:')
  312. else:
  313. content = video_list[int(num)]
  314. video_name = content[1]
  315. print('你选择的内容:', str(content).replace('[', '').replace(']', '').replace(',', ''))
  316. # 视频详情
  317. print("正在爬取-{}-视频详情".format(str(content[1])))
  318. try:
  319. result = search_video_info(head_url + str(content[4]))
  320. except Exception:
  321. print('视频详情异常,返回上一级!')
  322. break
  323. if len(result) < 2:
  324. print('视频-{}-,无内容!'.format(str(content[1])))
  325. else:
  326. show_video_list(result)
  327. while True:
  328. num = input("请选择你要下载的视频集数序号||键入all下载所有||键入t返回上一级:")
  329. if num == 't':
  330. break
  331. if num == 'all':
  332. print('你选择下载所有集数')
  333. number = 1
  334. for content in result:
  335. try:
  336. print('开始下载-{}'.format(video_name+str(content[1])))
  337. download_vodeo_ts(video_name, cwd, result[number])
  338. except Exception as ex:
  339. print('下载-{}-集数异常-{},继续下载!'.format(content[1], ex))
  340. number = number + 1
  341. exit_flag = True
  342. break
  343. elif int(num) > len(result):
  344. print('没有找到你选择的请选择你要下载的视频集数序号序号,请重新输入:')
  345. else:
  346. content = result[int(num)]
  347. try:
  348. print('开始下载-{}'.format(video_name+str(content[1])))
  349. download_vodeo_ts(video_name, cwd, content)
  350. except Exception as ex:
  351. print('下载-{}-集数异常-{},返回上一级!'.format(content[1], ex))
  352. break
  353. if __name__ == '__main__':
  354. download_vodeo_man()
  355. print("OK")

最后

我以为相爱的两个人分手,至少要有一件轰轰烈烈的大事,比如说第三者,比如说绝症,其实不用,忙碌疲乏,不安就够了。

感谢各位大大的耐心阅读~

声明:本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号