shipinhao_get_url.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. # -*- coding: utf-8 -*-
  2. # @Author: wangkun
  3. # @Time: 2022/9/1
  4. import json
  5. import os
  6. import sys
  7. import time
  8. # import atomacos
  9. from appium import webdriver
  10. from selenium.webdriver.common.by import By
  11. sys.path.append(os.getcwd())
  12. from crawler_shipinhao.main.common import Common
  13. from crawler_shipinhao.main.feishu_lib import Feishu
  14. # from main.common import Common
  15. class GetUrl:
  16. @classmethod
  17. def click_video(cls, log_type, video_title):
  18. Common.logger(log_type).info('启动"微信"')
  19. desired_caps = {'app': r"C:\Program Files (x86)\Tencent\WeChat\WeChat.exe"}
  20. driver = webdriver.Remote(
  21. command_executor='http://127.0.0.1:4723',
  22. desired_capabilities=desired_caps)
  23. driver.implicitly_wait(10)
  24. Common.logger(log_type).info('点击"爬虫群"')
  25. driver.find_element(By.NAME, '爬虫群').click()
  26. Common.logger(log_type).info('点击视频:{}', video_title)
  27. driver.find_element(By.NAME, video_title).click()
  28. time.sleep(5)
  29. driver.quit()
  30. @classmethod
  31. def get_url(cls, log_type):
  32. try:
  33. # charles 抓包文件保存目录
  34. # charles_file_dir = r"./crawler_kanyikan_recommend/chlsfiles/"
  35. charles_file_dir = r"./chlsfiles/"
  36. if int(len(os.listdir(charles_file_dir))) == 1:
  37. Common.logger(log_type).info("未找到chlsfile文件,等待60s")
  38. time.sleep(60)
  39. else:
  40. # 目标文件夹下所有文件
  41. all_file = sorted(os.listdir(charles_file_dir))
  42. # 获取到目标文件
  43. old_file = all_file[-1]
  44. # 分离文件名与扩展名
  45. new_file = os.path.splitext(old_file)
  46. # 重命名文件后缀
  47. os.rename(os.path.join(charles_file_dir, old_file),
  48. os.path.join(charles_file_dir, new_file[0] + ".txt"))
  49. with open(charles_file_dir + new_file[0] + ".txt", encoding='utf-8-sig', errors='ignore') as f:
  50. contents = json.load(f, strict=False)
  51. video_url_list = []
  52. cover_url_list = []
  53. if "finder.video.qq.com" in [text['host'] for text in contents]:
  54. for text in contents:
  55. if text["host"] == "finder.video.qq.com" and text["path"] == "/251/20302/stodownload":
  56. video_url_list.append(text)
  57. elif text["host"] == "finder.video.qq.com" and text["path"] == "/251/20304/stodownload":
  58. cover_url_list.append(text)
  59. video_url = video_url_list[0]['host']+video_url_list[0]['path']+'?'+video_url_list[0]['query']
  60. cover_url = cover_url_list[0]['host']+cover_url_list[0]['path']+'?'+cover_url_list[0]['query']
  61. head_url = cover_url
  62. return video_url, cover_url, head_url
  63. else:
  64. Common.logger(log_type).info("未找到 url,10s后重新获取")
  65. time.sleep(10)
  66. cls.get_url(log_type)
  67. except Exception as e:
  68. Common.logger(log_type).exception("get_url异常:{}", e)
  69. return None
  70. @classmethod
  71. def write_url(cls, log_type):
  72. while True:
  73. if Feishu.get_values_batch(log_type, 'shipinhao', 'FSDlBy')[1][11] is None:
  74. Common.logger(log_type).info('开始点击分享的视频')
  75. cls.click_video(log_type, Feishu.get_values_batch(log_type, 'shipinhao', 'FSDlBy')[1][2])
  76. time.sleep(60)
  77. Common.logger(log_type).info('获取视频头像/封面/播放地址')
  78. urls = cls.get_url(log_type)
  79. Feishu.update_values(log_type, 'shipinhao', '', 'J2:L2', [[urls[2], urls[1], urls[0]]])
  80. Common.logger(log_type).info('视频地址信息写入飞书成功\n')
  81. break
  82. else:
  83. break
  84. if __name__ == '__main__':
  85. GetUrl.write_url('recommend')