当前位置:   article > 正文

python爬虫实战——DouYin_抖音爬虫

抖音爬虫

        示例代码

  1. # -*- coding: utf-8 -*-
  2. '''
  3. @Author: zhujj
  4. @Time: 2024/2/29 15:12
  5. If you want to see the detail, please goto :
  6. https://github.com/iszhujj/PythonStudy/blob/master/001%23spider_douyin_&_linux/README.md
  7. '''
  8. import threading, requests, os, zipfile
  9. from selenium.webdriver.common.by import By
  10. from selenium.common.exceptions import NoSuchElementException
  11. from selenium.webdriver.support import expected_conditions as EC
  12. from selenium.webdriver.support.wait import WebDriverWait
  13. from datetime import datetime
  14. from selenium import webdriver
  15. from selenium.webdriver.firefox.options import Options
  16. from pyvirtualdisplay import Display
  17. from time import sleep
  18. from bs4 import BeautifulSoup
  19. from selenium.common.exceptions import WebDriverException
  20. display = Display(visible=0, size=(1980, 1440))
  21. display.start()
  22. firefox_options = Options()
  23. firefox_options.headless = True
  24. firefox_options.binary_location = '/home/lighthouse/firefox/firefox'
  25. # 获取当前时间
  26. def get_current_time():
  27. now = datetime.now()
  28. format_time = now.strftime("_%Y-%m-%d__%H-%M-%S-%f__")
  29. return format_time
  30. # 设置一个根路径,作品文件以及日志文件都保留在此
  31. ABS_PATH = f'/home/resources/{get_current_time()}'
  32. # 创建目录,dir_name 是作品的发布时间,格式为:2024-02-26 16:59,需要进行处理
  33. def create_dir(dir_name):
  34. dir_name = dir_name.replace(' ', '-').replace(':', '-')
  35. path = f'{ABS_PATH}/{dir_name}'
  36. try:
  37. os.makedirs(path)
  38. except FileExistsError:
  39. print(f'试图创建已存在的文件, 失败({path})')
  40. else:
  41. print(f'创建目录成功 {path}')
  42. finally:
  43. return path
  44. # 下载 目录名称,当前文件的命名,下载的地址
  45. def download_works(dir_name, work_name, src):
  46. response = requests.get(src, stream=True)
  47. if response.status_code == 200:
  48. with open(f'{dir_name}/{work_name}', mode='wb') as f:
  49. for chunk in response.iter_content(1024):
  50. f.write(chunk)
  51. # 判断作品是否已经下载过
  52. def test_work_exist(dir_name):
  53. dir_name = dir_name.replace(' ', '-').replace(':', '-')
  54. path = f'{ABS_PATH}/{dir_name}'
  55. if os.path.exists(path) and os.path.isdir(path):
  56. if os.listdir(path):
  57. return True
  58. return False
  59. def get_all_works(target):
  60. try:
  61. driver = webdriver.Firefox(options=firefox_options)
  62. driver.set_page_load_timeout(6)
  63. # 目标博主页面
  64. driver.get(target)
  65. WebDriverWait(driver, 6).until(EC.presence_of_element_located((By.CLASS_NAME, 'e6wsjNLL')))
  66. WebDriverWait(driver, 6).until(EC.presence_of_element_located((By.CLASS_NAME, 'niBfRBgX')))
  67. driver.execute_script('document.querySelector(".wcHSRAj6").scrollIntoView()')
  68. sleep(1)
  69. html = BeautifulSoup(driver.page_source, 'lxml')
  70. driver.quit()
  71. # 作品列表
  72. ul = html.find(class_='e6wsjNLL')
  73. # 每一个作品
  74. lis = ul.findAll(class_='niBfRBgX')
  75. for li in lis:
  76. element_a = li.find('a')
  77. is_pictures = element_a.find(class_='TQTCdYql')
  78. if (not is_pictures) or (not is_pictures.svg):
  79. href = f'https://www.douyin.com{element_a["href"]}'
  80. temp_driver = webdriver.Firefox(options=firefox_options)
  81. temp_driver.set_page_load_timeout(6)
  82. temp_driver.get(href)
  83. WebDriverWait(temp_driver, 6).until(EC.presence_of_element_located((By.CLASS_NAME, 'D8UdT9V8')))
  84. # 不是必须,剩余内容webdriver也能胜任
  85. html_v = BeautifulSoup(temp_driver.page_source, 'lxml')
  86. temp_driver.quit()
  87. # 获取该作品的发布时间
  88. publish_time = html_v.find(class_='D8UdT9V8').string[5:]
  89. # if test_work_exist(f'{publish_time}_video'):
  90. # continue
  91. video = html_v.find(class_='xg-video-container').video
  92. source = video.find('source')
  93. # 为该作品创建文件夹
  94. path = create_dir(f'{publish_time}_video')
  95. # 下载作品
  96. download_works(path, f'{get_current_time()}.mp4', f'https:{source["src"]}')
  97. else:
  98. href = f'https:{element_a["href"]}'
  99. temp_driver = webdriver.Firefox(options=firefox_options)
  100. temp_driver.set_page_load_timeout(6)
  101. temp_driver.get(href)
  102. WebDriverWait(temp_driver, 6).until(EC.presence_of_element_located((By.CLASS_NAME, 'YWeXsAGK')))
  103. # 使用 beautifulsoup 不是必须
  104. html_p = BeautifulSoup(temp_driver.page_source, 'lxml')
  105. temp_driver.quit()
  106. publish_time = f'{html_p.find(class_="YWeXsAGK")}'[-23:-7]
  107. # 图片列表
  108. img_ul = html_p.find(class_='KiGtXxLr')
  109. imgs = img_ul.findAll('img')
  110. # if test_work_exist(f'{publish_time}_pictures_{len(imgs)}'):
  111. # continue
  112. path = create_dir(f'{publish_time}_pictures_{len(imgs)}')
  113. for img in imgs:
  114. download_works(path, f'{get_current_time()}.webp', f'{img["src"]}')
  115. display.stop()
  116. print('##### finish #####')
  117. except WebDriverException as e:
  118. print(f"捕获到 WebDriverException: {e}")
  119. except Exception as err:
  120. print("捕获到其他错误 get_all_works 末尾")
  121. print(err)
  122. finally:
  123. driver.quit()
  124. display.stop()
  125. # 将目录进行压缩
  126. def zipdir(path, ziph):
  127. # ziph 是 zipfile.ZipFile 对象
  128. for root, dirs, files in os.walk(path):
  129. for file in files:
  130. ziph.write(os.path.join(root, file),
  131. os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
  132. def dy_download_all(target_url):
  133. get_all_works(target_url)
  134. directory_to_zip = ABS_PATH # 目录路径
  135. output_filename = f'{ABS_PATH}.zip' # 输出ZIP文件的名称
  136. with zipfile.ZipFile(output_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
  137. zipdir(directory_to_zip, zipf)
  138. return f'{ABS_PATH}.zip' # 返回下载地址
  139. if __name__ == '__main__':
  140. # 简单测试
  141. url = input('请输入博主主页url:')
  142. path = dy_download_all(url)
  143. print('下载完成')
  144. print(f'地址:{path}')

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/正经夜光杯/article/detail/846927
推荐阅读
相关标签
  

闽ICP备14008679号