Browse Source

update haitunzhufu

wangkun 1 year ago
parent
commit
9071c52846

+ 1 - 1
haitunzhufu/haitunzhufu_main/run_htzf_dev.py

@@ -6,7 +6,7 @@ import sys
 
 sys.path.append(os.getcwd())
 from common.common import Common
-from haitunzhufu.haitunzhufu_recommend.haitunzhufu_recommend import HTZFRecommend
+from haitunzhufu.haitunzhufu_recommend.haitunzhufu_recommend2 import HTZFRecommend
 
 
 class HTZFMain:

+ 5 - 2
haitunzhufu/haitunzhufu_main/run_htzf_recommend.py

@@ -7,7 +7,7 @@ import sys
 
 sys.path.append(os.getcwd())
 from common.common import Common
-from haitunzhufu.haitunzhufu_recommend.haitunzhufu_recommend import HTZFRecommend
+from haitunzhufu.haitunzhufu_recommend.haitunzhufu_recommend2 import HTZFRecommend
 
 
 class HTZFMain:
@@ -15,7 +15,10 @@ class HTZFMain:
     def main(cls, log_type, crawler, env):
         videos_cnt = 50
         Common.logger(log_type, crawler).info('开始抓取"海豚祝福"')
-        HTZFRecommend.start_wechat(log_type, crawler, videos_cnt, env)
+        HTZFRecommend.start_wechat(log_type=log_type,
+                                   crawler=crawler,
+                                   videos_cnt=videos_cnt,
+                                   env=env)
         Common.logger(log_type, crawler).info("抓取一轮结束")
         Common.del_logs(log_type, crawler)
 

+ 99 - 93
haitunzhufu/haitunzhufu_recommend/haitunzhufu_recommend2.py

@@ -12,6 +12,7 @@ from hashlib import md5
 from appium import webdriver
 from appium.webdriver.extensions.android.nativekey import AndroidKey
 from appium.webdriver.webdriver import WebDriver
+from bs4 import BeautifulSoup
 from selenium.common import NoSuchElementException
 from selenium.webdriver.common.by import By
 
@@ -24,7 +25,6 @@ from common.scheduling_db import MysqlHelper
 
 class HTZFRecommend:
     platform = "海豚祝福"
-    i = 0
 
     @classmethod
     def today_download_cnt(cls, log_type, crawler, env):
@@ -79,7 +79,11 @@ class HTZFRecommend:
         Common.logger(log_type, crawler).info('打开小程序"海豚祝福"')
         driver.find_elements(By.XPATH, '//*[@text="海豚祝福"]')[-1].click()
         time.sleep(5)
-        cls.get_videoList(log_type, crawler, driver, videos_cnt, env)
+        cls.get_videoList(log_type=log_type,
+                          crawler=crawler,
+                          driver=driver,
+                          videos_cnt=videos_cnt,
+                          env=env)
         time.sleep(1)
         driver.quit()
 
@@ -104,14 +108,21 @@ class HTZFRecommend:
         return len(repeat_video)
 
     @classmethod
-    def get_video_url(cls, driver: WebDriver, video_element):
-        video_element.click()
-        time.sleep(5)
-        video_url_elements = cls.search_elements(driver, '//*[@id="myVideo"]')
-        if video_url_elements:
-            return video_url_elements[0].get_attribute("src")
-        else:
-            return
+    def get_video_url(cls, driver: WebDriver, video_title_element):
+        for i in range(3):
+            cls.search_elements(driver, '//*[@class="list"]')
+            video_title_element[0].click()
+            time.sleep(5)
+            video_url_elements = cls.search_elements(driver, '//*[@id="myVideo"]')
+            if video_url_elements:
+                return video_url_elements[0].get_attribute("src")
+
+    @classmethod
+    def swipe_up(cls, driver: WebDriver):
+        cls.search_elements(driver, '//*[@class="list"]')
+        size = driver.get_window_size()
+        driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
+                     int(size["width"] * 0.5), int(size["height"] * 0.55), 200)
 
     @classmethod
     def get_videoList(cls, log_type, crawler, driver: WebDriver, videos_cnt, env):
@@ -131,98 +142,94 @@ class HTZFRecommend:
                 time.sleep(1)
         cls.search_elements(driver, '//*[@class="nav cur"]')[-1].click()
         Common.logger(log_type, crawler).info('点击"推荐"列表成功\n')
-        index = 0
-        while True:
+
+        # while True:
+        for page in range(200):
+            Common.logger(log_type, crawler).info(f"正在抓取第{page+1}页")
             if cls.search_elements(driver, '//*[@class="list"]') is None:
                 Common.logger(log_type, crawler).info("列表页窗口已销毁\n")
-                cls.i = 0
-                return
-            videoList_elements = cls.search_elements(driver, '//*[@class="img_bf"]')
-            if videoList_elements is None or len(videoList_elements) == 0:
-                cls.i = 0
-                Common.logger(log_type, crawler).warning(f"videoList_elements:{videoList_elements}")
-                return
-            video_list = videoList_elements[index:]
-            Common.logger(log_type, crawler).info(f"video_list: {video_list}")
-            if len(video_list) == 0 or video_list is None:
-                Common.logger(log_type, crawler).info("到底啦~~~~~~~~~~")
-                cls.i = 0
                 return
-            for i, video_element in enumerate(video_list):
+            for i in range(1):
+                cls.swipe_up(driver)
+                time.sleep(0.5)
+
+            page_source = driver.page_source
+            soup = BeautifulSoup(page_source, 'html.parser')
+            soup.prettify()
+
+            video_list_elements = soup.findAll("wx-view", class_="img_bf")
+            Common.logger(log_type, crawler).info(f"第{page+1}页共:{len(video_list_elements)}条视频\n")
+
+            for i, video_element in enumerate(video_list_elements):
                 try:
-                    today_download_cnt = cls.today_download_cnt(log_type, crawler, env)
-                    Common.logger(log_type, crawler).info(f"今日已下载{today_download_cnt}条视频\n")
-                    if today_download_cnt >= videos_cnt:
-                        cls.i = 0
-                        return
-                    if video_element is None:
-                        Common.logger(log_type, crawler).info("没有更多数据啦~\n")
-                        cls.i = 0
+                    today_download = cls.today_download_cnt(log_type, crawler, env)
+                    if today_download >= videos_cnt:
+                        Common.logger(log_type, crawler).info(f"今日已下载视频数:{today_download}")
                         return
-                    cls.i += 1
-                    cls.search_elements(driver, '//*[@class="img_bf"]')
-                    Common.logger(log_type, crawler).info(f"拖动第{cls.i}条视频至屏幕顶部")
-                    time.sleep(3)
-                    driver.execute_script("arguments[0].scrollIntoView({block:'center',inline:'center'})", video_element)
-                    time.sleep(3)
-                    # # cls.search_elements(driver, '//*[@class="img_bf"]')
-                    # video_title = video_element.find_elements(By.XPATH, '//*[@class="title"]')[i+index].text
-                    # plat_cnt_str = video_element.find_elements(By.XPATH, '//*[@class="wan"]')[i+index].text
-                    # play_cnt = int(re.sub(r"\D", "", plat_cnt_str)) * 10000 if "万" in plat_cnt_str else plat_cnt_str
-                    # cover_url = video_element.find_elements(By.XPATH, '//*[@class="img"]')[i+index].get_attribute('src')
-                    # play_icon = video_element.find_elements(By.XPATH, '//*[@class="bf"]')[i+index]
-                    # out_video_id = md5(video_title.encode('utf8')).hexdigest()
-                    # video_dict = {
-                    #     "video_title": video_title,
-                    #     'video_id': out_video_id,
-                    #     "plat_cnt_str": plat_cnt_str,
-                    #     "play_cnt": play_cnt,
-                    #     'comment_cnt': 0,
-                    #     'like_cnt': 0,
-                    #     'share_cnt': 0,
-                    #     'publish_time_stamp': int(time.time()),
-                    #     'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
-                    #     'user_name': "haitunzhufu",
-                    #     'user_id': "haitunzhufu",
-                    #     "cover_url": cover_url,
-                    #     'avatar_url': cover_url,
-                    #     'session': f"haitunzhufu-{int(time.time())}"
-                    # }
-                    #
-                    # for k, v in video_dict.items():
-                    #     Common.logger(log_type, crawler).info(f"{k}:{v}")
-                    # if video_title is None or cover_url is None:
-                    #     Common.logger(log_type, crawler).info("无效视频\n")
-                    # elif cls.repeat_out_video_id(log_type, crawler, out_video_id, env) != 0:
-                    #     Common.logger(log_type, crawler).info('视频已下载\n')
-                    #     # if cls.i % 4 == 0:
-                    #     #     size = driver.get_window_size()
-                    #     #     driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
-                    #     #                  int(size["width"] * 0.5), int(size["height"] * 0.2), 200)
-                    # else:
-                    #     video_url = cls.get_video_url(driver, play_icon)
-                    #     if video_url is None:
-                    #         Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
-                    #         # driver.press_keycode(AndroidKey.BACK)
-                    #         continue
-                    #     video_dict["video_url"] = video_url
-                    #     Common.logger(log_type, crawler).info(f"video_url:{video_url}")
-                    #     cls.download_publish(log_type, crawler, video_dict, env)
-                    #     driver.press_keycode(AndroidKey.BACK)
-                    #     # if cls.i % 4 == 0:
-                    #     #     size = driver.get_window_size()
-                    #     #     driver.swipe(int(size["width"] * 0.5), int(size["height"] * 0.8),
-                    #     #                  int(size["width"] * 0.5), int(size["height"] * 0.2), 200)
+                    Common.logger(log_type, crawler).info(f"第{i+1}条视频")
+                    video_title = video_element.find("wx-view", class_="title").text
+                    play_str = video_element.find("wx-view", class_="wan").text
+                    play_cnt = int(re.sub(r"\D", "", play_str)) * 10000 if "万" in play_str else play_str
+                    cover_url = video_element.find("wx-image", class_="img")["src"]
+                    out_video_id = md5(video_title.encode('utf8')).hexdigest()
+
+                    video_dict = {
+                        "video_title": video_title,
+                        'video_id': out_video_id,
+                        "plat_cnt_str": play_str,
+                        "play_cnt": play_cnt,
+                        'comment_cnt': 0,
+                        'like_cnt': 0,
+                        'share_cnt': 0,
+                        'publish_time_stamp': int(time.time()),
+                        'publish_time_str': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time()))),
+                        'user_name': "haitunzhufu",
+                        'user_id': "haitunzhufu",
+                        "cover_url": cover_url,
+                        'avatar_url': cover_url,
+                        'session': f"haitunzhufu-{int(time.time())}"
+                    }
+                    for k, v in video_dict.items():
+                        Common.logger(log_type, crawler).info(f"{k}:{v}")
+
+                    if video_title is None or cover_url is None:
+                        Common.logger(log_type, crawler).info("无效视频\n")
+                        cls.swipe_up(driver)
+                        time.sleep(1)
+                    elif cls.repeat_out_video_id(log_type=log_type,
+                                                 crawler=crawler,
+                                                 out_video_id=out_video_id,
+                                                 env=env) != 0:
+                        Common.logger(log_type, crawler).info('视频已下载\n')
+                        cls.swipe_up(driver)
+                        time.sleep(1)
+                    else:
+                        video_title_element = cls.search_elements(driver, f'//*[contains(text(), "{video_title}")]')
+                        if video_title_element is None:
+                            Common.logger(log_type, crawler).warning(f"未找到该视频标题的element:{video_title_element}")
+                            continue
+                        Common.logger(log_type, crawler).info("点击标题,进入视频详情页")
+                        video_url = cls.get_video_url(driver, video_title_element)
+                        if video_url is None:
+                            Common.logger(log_type, crawler).info("未获取到视频播放地址\n")
+                            driver.press_keycode(AndroidKey.BACK)
+                            time.sleep(3)
+                            continue
+                        video_dict['video_url'] = video_url
+                        Common.logger(log_type, crawler).info(f"video_url:{video_url}\n")
+                        cls.download_publish(log_type=log_type,
+                                             crawler=crawler,
+                                             video_dict=video_dict,
+                                             env=env)
+                        driver.press_keycode(AndroidKey.BACK)
+                        time.sleep(3)
                 except Exception as e:
                     Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
-
-            Common.logger(log_type, crawler).info("已抓取完一组,休眠 10 秒\n")
-            time.sleep(10)
-            index = index + len(video_list)
+            Common.logger(log_type, crawler).info('已抓取完一组视频,休眠5秒\n')
+            time.sleep(5)
 
     @classmethod
     def download_publish(cls, log_type, crawler, video_dict, env):
-        # 下载视频
         Common.download_method(log_type=log_type, crawler=crawler, text='video', title=video_dict['video_title'],
                                url=video_dict['video_url'])
         ffmpeg_dict = Common.ffmpeg(log_type, crawler, f"./{crawler}/videos/{video_dict['video_title']}/video.mp4")
@@ -312,7 +319,6 @@ class HTZFRecommend:
         Common.logger(log_type, crawler).info('视频信息写入数据库成功!\n')
 
 
-
 if __name__ == "__main__":
     HTZFRecommend.start_wechat("recommend", "haitunzhufu", 5, "dev")
     # HTZFRecommend.today_download_cnt("recommend", "haitunzhufu", "dev")