소스 검색

xiaoniangao xiaochengxu

罗俊辉 1 년 전
부모
커밋
e70e655ceb

+ 395 - 0
gongzhonghao/gongzhonghao_main/gzh_bug_review.py

@@ -0,0 +1,395 @@
+import json
+import time
+import requests
+import urllib3
+from common.public import task_fun_mq, get_consumer, ack_message
+from common.scheduling_db import MysqlHelper
+from common.common import Common
+import asyncio
+from gongzhonghao.gongzhonghao_author.gongzhonghao_author import GongzhonghaoAuthor
+
+token_d = {
+    "token": "883406306",
+    "cookie": "appmsglist_action_3524986952=card; ua_id=j6t2xNuC0mv6dLVbAAAAAMPRLKj1sVGSlMDwNFJKE3s=; wxuin=93278011749821; mm_lang=zh_CN; pgv_pvid=6815195556; noticeLoginFlag=1; remember_acct=2071735594%40qq.com; rewardsn=; wxtokenkey=777; _clck=3930572231|1|ff1|0; uuid=680bd7f128bf80058bc62dd82ff85c96; rand_info=CAESIBtaIUDyVXWwBRD33d7CafRp3rV5rXK7mcvYCy4Yvnn+; slave_bizuin=3236647229; data_bizuin=3236647229; bizuin=3236647229; data_ticket=Dx0Yxt5o9JJuMyndtyu3+JZBym0Dcjy6QqjPcfp+xwsLHf3Y+L9ZmP+kDX6o4t9r; slave_sid=WjV0MXhZZXlrcG9BTGVOZjBEOUlyUFptMWEyN2JNcXlpeU5kcGIyVm9IZUZOV3J1RElKb29KTDJIRHRYaGZtNnVSbklua1FOdUNsX3NoQWE4RFVKM0lKbDkzU25wblRGTDhDWFJteExtMHBjZGwyanZKOVVCWmE1UmNxT3FaZWNsd0VrVm52eEpLakFocGVz; slave_user=gh_d284c09295eb; xid=675798a4e148cb559bed6bb65681ebf9; _clsk=1a6iklq|1694746372692|2|1|mp.weixin.qq.com/weheat-agent/payload/record"
+}
+
+
+def get_user_info(token_dict):
+    url = "https://mp.weixin.qq.com/cgi-bin/searchbiz?"
+    headers = {
+        "accept": "*/*",
+        "accept-encoding": "gzip, deflate, br",
+        "accept-language": "zh-CN,zh;q=0.9",
+        "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                   "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                   "&type=77&createType=5&token=1011071554&lang=zh_CN",
+        "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+        "sec-ch-ua-mobile": "?0",
+        "sec-ch-ua-platform": '"Windows"',
+        "sec-fetch-dest": "empty",
+        "sec-fetch-mode": "cors",
+        "sec-fetch-site": "same-origin",
+        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                      " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+        "x-requested-with": "XMLHttpRequest",
+        "cookie": token_dict["cookie"],
+    }
+    params = {
+        "action": "search_biz",
+        "begin": "0",
+        "count": "5",
+        "query": "生活小妙招小助手",
+        "token": token_dict["token"],
+        "lang": "zh_CN",
+        "f": "json",
+        "ajax": "1",
+    }
+    # proxies = Common.tunnel_proxies()
+    # print(proxies)
+    urllib3.disable_warnings()
+    r = requests.get(url=url, headers=headers, params=params, verify=False)
+    r.close()
+    print(r.json())
+    # if r.json()["base_resp"]["err_msg"] == "invalid session":
+    #     Common.logger(log_type, crawler).warning(
+    #         f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+    #     )
+    #     # Common.logging(
+    #     #     log_type,
+    #     #     crawler,
+    #     #     env,
+    #     #     f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+    #     # )
+    #     cls.release_token(log_type, crawler, env, token_dict["token_id"], -2)
+    #     if 20 >= datetime.datetime.now().hour >= 10:
+    #         Feishu.bot(
+    #             log_type,
+    #             crawler,
+    #             f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n过期啦,请扫码更换token\nhttps://mp.weixin.qq.com/",
+    #         )
+    #     time.sleep(60 * 15)
+    #     continue
+    # if r.json()["base_resp"]["err_msg"] == "freq control":
+    #     Common.logger(log_type, crawler).warning(
+    #         f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+    #     )
+    #     # Common.logging(
+    #     #     log_type,
+    #     #     crawler,
+    #     #     env,
+    #     #     f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+    #     # )
+    #     cls.release_token(log_type, crawler, env, token_dict["token_id"], -2)
+    #     if 20 >= datetime.datetime.now().hour >= 10:
+    #         Feishu.bot(
+    #             log_type,
+    #             crawler,
+    #             f"{token_dict['title']}\n操作人:{token_dict['operator']}\n更换日期:{token_dict['update_time']} \n频控啦,请扫码更换其他公众号token\nhttps://mp.weixin.qq.com/",
+    #         )
+    #     time.sleep(60 * 15)
+    #     continue
+    # if r.json()["base_resp"]["err_msg"] == "ok" and len(r.json()["list"]) == 0:
+    #     Common.logger(log_type, crawler).warning(
+    #         f"status_code:{r.status_code}, get_fakeid:{r.text}\n"
+    #     )
+    #     # Common.logging(
+    #     #     log_type,
+    #     #     crawler,
+    #     #     env,
+    #     #     f"status_code:{r.status_code}, get_fakeid:{r.text}\n",
+    #     # )
+    #     unbind_msg = task_unbind(
+    #         log_type=log_type,
+    #         crawler=crawler,
+    #         taskid=task_dict["id"],
+    #         uids=str(user_dict["uid"]),
+    #         env=env,
+    #     )
+    #     if unbind_msg == "success":
+    #         if 20 >= datetime.datetime.now().hour >= 10:
+    #             Feishu.bot(
+    #                 log_type,
+    #                 crawler,
+    #                 f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
+    #             )
+    #         # Common.logging(
+    #         #     log_type,
+    #         #     crawler,
+    #         #     env,
+    #         #     f"公众号:{user_dict['link']}, 站内昵称:{user_dict['nick_name']}\n抓取异常, 已取消抓取该公众号\n",
+    #         # )
+    #     else:
+    #         Common.logger(log_type, crawler).warning(f"unbind_msg:{unbind_msg}")
+    #         # Common.logging(log_type, crawler, env, f"unbind_msg:{unbind_msg}")
+    #     return None
+    user_info_dict = {
+        "user_name": r.json()["list"][0]["nickname"],
+        "user_id": r.json()["list"][0]["fakeid"],
+        "avatar_url": r.json()["list"][0]["round_head_img"],
+    }
+    return user_info_dict
+
+
+def get_videoList(token_dict, user_dict):
+    begin = 0
+    url = "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+    headers = {
+        "accept": "*/*",
+        "accept-encoding": "gzip, deflate, br",
+        "accept-language": "zh-CN,zh;q=0.9",
+        "referer": "https://mp.weixin.qq.com/cgi-bin/appmsg?"
+                   "t=media/appmsg_edit_v2&action=edit&isNew=1"
+                   "&type=77&createType=5&token="
+                   + str(token_dict["token"])
+                   + "&lang=zh_CN",
+        "sec-ch-ua": '" Not A;Brand";v="99", "Chromium";v="100", "Google Chrome";v="100"',
+        "sec-ch-ua-mobile": "?0",
+        "sec-ch-ua-platform": '"Windows"',
+        "sec-fetch-dest": "empty",
+        "sec-fetch-mode": "cors",
+        "sec-fetch-site": "same-origin",
+        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
+                      " (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36",
+        "x-requested-with": "XMLHttpRequest",
+        "cookie": token_dict["cookie"],
+    }
+    params = {
+        "action": "list_ex",
+        "begin": str(begin),
+        "count": "5",
+        "fakeid": user_dict["user_id"],
+        "type": "9",
+        "query": "",
+        "token": str(token_dict["token"]),
+        "lang": "zh_CN",
+        "f": "json",
+        "ajax": "1",
+    }
+    urllib3.disable_warnings()
+    r = requests.get(url=url, headers=headers, params=params, verify=False)
+    print(r.url)
+    r.close()
+    print(r.json())
+    if r.json()["base_resp"]["err_msg"] == "invalid session":
+        time.sleep(60 * 15)
+        print("invalid session")
+    if r.json()["base_resp"]["err_msg"] == "freq control":
+        print("freq control")
+    if (
+            r.json()["base_resp"]["err_msg"] == "invalid args"
+            and r.json()["base_resp"]["ret"] == 200002
+    ):
+        print("invalid args")
+    if "app_msg_list" not in r.json():
+        print("no app_msg_list")
+    if len(r.json()["app_msg_list"]) == 0:
+        print("没有更多视频了\n")
+        return
+    else:
+        begin += 5
+        app_msg_list = r.json()["app_msg_list"]
+        for article in app_msg_list:
+            # try:
+            create_time = article.get("create_time", 0)
+            publish_time_stamp = int(create_time)
+            publish_time_str = time.strftime(
+                "%Y-%m-%d %H:%M:%S", time.localtime(publish_time_stamp)
+            )
+            article_url = article.get("link", "")
+            video_dict = {
+                "video_id": article.get("aid", ""),
+                "video_title": article.get("title", "")
+                .replace(" ", "")
+                .replace('"', "")
+                .replace("'", ""),
+                "publish_time_stamp": publish_time_stamp,
+                "publish_time_str": publish_time_str,
+                "user_name": user_dict["user_name"],
+                "play_cnt": 0,
+                "comment_cnt": 0,
+                "like_cnt": 0,
+                "share_cnt": 0,
+                "user_id": user_dict["user_id"],
+                "avatar_url": user_dict["avatar_url"],
+                "cover_url": article.get("cover", ""),
+                "article_url": article.get("link", ""),
+                # "video_url": cls.get_video_url(article_url, env),
+                "video_url": "url",
+                "session": f"gongzhonghao-author1-{int(time.time())}",
+            }
+            print(video_dict)
+            # for k, v in video_dict.items():
+            #     Common.logger(log_type, crawler).info(f"{k}:{v}")
+            # Common.logging(
+            #     log_type, crawler, env, f"video_dict:{video_dict}"
+            # )
+
+        #         if int(time.time()) - publish_time_stamp > 3600 * 24 * int(
+        #                 rule_dict.get("period", {}).get("max", 1000)
+        #         ):
+        #             Common.logger(log_type, crawler).info(
+        #                 f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n"
+        #             )
+        #             # Common.logging(
+        #             #     log_type,
+        #             #     crawler,
+        #             #     env,
+        #             #     f"发布时间超过{int(rule_dict.get('period', {}).get('max', 1000))}天\n",
+        #             # )
+        #             return
+        #
+        #         if (
+        #                 video_dict["article_url"] == 0
+        #                 or video_dict["video_url"] == 0
+        #         ):
+        #             Common.logger(log_type, crawler).info("文章涉嫌违反相关法律法规和政策\n")
+        #             # Common.logging(log_type, crawler, env, "文章涉嫌违反相关法律法规和政策\n")
+        #         # 标题敏感词过滤
+        #         elif (
+        #                 any(
+        #                     str(word)
+        #                     if str(word) in video_dict["video_title"]
+        #                     else False
+        #                     for word in get_config_from_mysql(
+        #                         log_type=log_type,
+        #                         source=crawler,
+        #                         env=env,
+        #                         text="filter",
+        #                         action="",
+        #                     )
+        #                 )
+        #                 is True
+        #         ):
+        #             Common.logger(log_type, crawler).info("标题已中过滤词\n")
+        #             # Common.logging(log_type, crawler, env, "标题已中过滤词\n")
+        #         # 已下载判断
+        #         elif (
+        #                 cls.repeat_video(
+        #                     log_type, crawler, video_dict["video_id"], env
+        #                 )
+        #                 != 0
+        #         ):
+        #             Common.logger(log_type, crawler).info("视频已下载\n")
+        #             # Common.logging(log_type, crawler, env, "视频已下载\n")
+        #         # 标题相似度
+        #         elif (
+        #                 title_like(
+        #                     log_type,
+        #                     crawler,
+        #                     video_dict["video_title"],
+        #                     cls.platform,
+        #                     env,
+        #                 )
+        #                 is True
+        #         ):
+        #             Common.logger(log_type, crawler).info(
+        #                 f'标题相似度>=80%:{video_dict["video_title"]}\n'
+        #             )
+        #             # Common.logging(
+        #             #     log_type,
+        #             #     crawler,
+        #             #     env,
+        #             #     f'标题相似度>=80%:{video_dict["video_title"]}\n',
+        #             # )
+        #         else:
+        #             video_dict["out_user_id"] = video_dict["user_id"]
+        #             video_dict["platform"] = crawler
+        #             video_dict["strategy"] = log_type
+        #             video_dict["out_video_id"] = video_dict["video_id"]
+        #             video_dict["width"] = 0
+        #             video_dict["height"] = 0
+        #             video_dict["crawler_rule"] = json.dumps(rule_dict)
+        #             video_dict["user_id"] = user_dict["uid"]
+        #             video_dict["publish_time"] = video_dict["publish_time_str"]
+        #             mq.send_msg(video_dict)
+        #     except Exception as e:
+        #         Common.logger(log_type, crawler).error(f"抓取单条视频异常:{e}\n")
+        #         Common.logging(log_type, crawler, env, f"抓取单条视频异常:{e}\n")
+        # Common.logger(log_type, crawler).info("休眠 60 秒\n")
+        # Common.logging(log_type, crawler, env, "休眠 60 秒\n")
+        time.sleep(60)
+
+
+# 分割列表
+def chunks(data_list, chunk_size):
+    """
+    :param data_list: 列表
+    :param chunk_size: 每个子列表的长度
+    :return: 大列表包小列表[[], [], [], []......]
+    """
+    for i in range(0, len(data_list), chunk_size):
+        yield data_list[i: i + chunk_size]
+
+
+async def get_author_videos(args):
+    await asyncio.sleep(1)
+    print(args['log_type'])
+    await GongzhonghaoAuthor.get_all_videos(
+        log_type=args['log_type'],
+        crawler=args['crawler'],
+        task_dict=args['task_dict'],
+        token_index=args['token_index'],
+        rule_dict=args['rule_dict'],
+        user_list=args['user_list'],
+        env=args['env']
+    )
+
+
+if __name__ == "__main__":
+    mess = {
+        "createTime": 1684500378438,
+        "id": 27,
+        "interval": 86400,
+        "machine": "aliyun",
+        "mode": "author",
+        "operator": "xxl",
+        "rule": "[{\"duration\":{\"min\":20,\"max\":2700}},{\"period\":{\"min\":1,\"max\":2}}]",
+        "source": "gongzhonghao",
+        "spiderName": "run_gzh2_author",
+        "startTime": 1693493854438,
+        "status": 0,
+        "taskName": "公众号_2",
+        "updateTime": 1688572800179
+    }
+
+    # 解析 task_dict
+    rule_list = json.loads(mess['rule'])
+    rule_dict = {}
+    for item in rule_list:
+        for key, val in item.items():
+            rule_dict[key] = val
+    mess['rule'] = rule_dict
+    task_dict = mess
+    # 解析 user_list
+    task_id = task_dict["id"]
+    select_user_sql = (
+        f"""select * from crawler_user_v3 where task_id={task_id}"""
+    )
+    user_list = MysqlHelper.get_values(
+        "author", "gongzhonghao", select_user_sql, "prod", action=""
+    )
+    print(len(user_list))
+    user_list = chunks(user_list, 250)
+    print(user_list)
+    for index, i in enumerate(user_list):
+        with open("/Users/luojunhui/cyber/gzh_spider/test_AB/200/user_list_{}.json".format(index + 1), "w", encoding="utf-8") as f:
+            f.write(json.dumps(i, ensure_ascii=False, indent=4))
+    # print(user_list)
+    # loop = asyncio.get_event_loop()
+    # arg_list = []
+    # for index, sub_list in enumerate(user_list):
+    #     arg = {'log_type': "author{}".format(index + 1), 'crawler': "gongzhonghao", 'token_index': index + 1,
+    #            'task_dict': task_dict, 'rule_dict': rule_dict, 'user_list': sub_list, 'env': 'prod'}
+    #     arg_list.append(arg)
+    #
+    # coroutines_list = [get_author_videos(arg) for arg in arg_list]
+    #
+    #
+    # async def test():
+    #     await asyncio.gather(*coroutines_list)
+    # asyncio.run(test())
+
+    user_d = get_user_info(token_d)
+    # print(user_d)
+    # # #
+    # get_videoList(token_d, user_d)

+ 1 - 0
kuaishou/kuaishou_collect/__init__.py

@@ -0,0 +1 @@
+from .functions import KuaiShouSearch, search_url_from_name

+ 151 - 0
kuaishou/kuaishou_collect/functions.py

@@ -0,0 +1,151 @@
+import json
+import time
+
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+from common.common import Common
+
+
+class KuaiShouSearch:
+    def __init__(self, author_name):
+        self.url = "https://www.kuaishou.com/graphql"
+        self.pcursor = ""
+        self.author_name = author_name
+        self.search_result = []
+
+    def init_payload(self):
+        payload = json.dumps(
+            {
+                "operationName": "visionProfilePhotoList",
+                "variables": {
+                    "userId": self.author_name.replace(
+                        "https://www.kuaishou.com/profile/", ""
+                    ),
+                    "pcursor": self.pcursor,
+                    "page": "profile",
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n  __typename\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionProfilePhotoList($pcursor: String, $userId: String, $page: String, $webPageArea: String) {\n  visionProfilePhotoList(pcursor: $pcursor, userId: $userId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    hostName\n    pcursor\n    __typename\n  }\n}\n",
+            }
+        )
+        return payload
+
+    def init_headers(self, did):
+        headers = {
+            "Accept": "*/*",
+            "Content-Type": "application/json",
+            "Origin": "https://www.kuaishou.com",
+            "Cookie": "did={}".format(did),
+            "Content-Length": "1260",
+            "Accept-Language": "zh-CN,zh-Hans;q=0.9",
+            "Host": "www.kuaishou.com",
+            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6.1 Safari/605.1.15",
+            "Referer": f'https://www.kuaishou.com/profile/{self.author_name.replace("https://www.kuaishou.com/profile/", "")}',
+            "Accept-Encoding": "gzip, deflate, br",
+            "Connection": "keep-alive",
+        }
+        return headers
+
+    def search(self, did):
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount("http://", HTTPAdapter(max_retries=3))
+        s.mount("https://", HTTPAdapter(max_retries=3))
+        response = s.post(
+            url=self.url,
+            headers=self.init_headers(did),
+            data=self.init_payload(),
+            # proxies=Common.tunnel_proxies(),
+            verify=False,
+            timeout=10,
+        )
+        response.close()
+        # print(json.dumps(response.json(), ensure_ascii=False, indent=4))
+        return response.json()
+
+    def search_pages(self, did):
+        page_result = []
+        self.search_result = self.search(did)
+        # 翻页指示器
+        page_result.append(self.search_result)
+        for i in range(5):
+            if self.search_result.get("data").get('visionProfilePhotoList'):
+                self.pcursor = self.search_result['data']['visionProfilePhotoList']['pcursor']
+                self.search_result = self.search(did)
+                page_result.append(self.search_result)
+                time.sleep(5)
+            else:
+                continue
+        return page_result
+
+
+def search_url_from_name(keyword, did):
+    ""
+    payload = {
+        "operationName": "graphqlSearchUser",
+        "query": "query graphqlSearchUser($keyword: String, $pcursor: String, $searchSessionId: String) {\n  visionSearchUser(keyword: $keyword, pcursor: $pcursor, searchSessionId: $searchSessionId) {\n    result\n    users {\n      fansCount\n      photoCount\n      isFollowing\n      user_id\n      headurl\n      user_text\n      user_name\n      verified\n      verifiedDetail {\n        description\n        iconType\n        newVerified\n        musicCompany\n        type\n        __typename\n      }\n      __typename\n    }\n    searchSessionId\n    pcursor\n    __typename\n  }\n}\n",
+        "variables": {
+            "keyword": keyword
+        }
+    }
+    headers = {
+        "Content-Type": "application/json",
+        "Cookie": "did={}".format(did),
+        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
+    }
+    response = requests.post(
+        url="https://www.kuaishou.com/graphql",
+        headers=headers,
+        # proxies=Common.tunnel_proxies(),
+        data=json.dumps(payload)
+    )
+    print(response.json())
+    user_list = response.json()['data']['visionSearchUser']['users']
+    print(user_list)
+    account = user_list[0]
+    # print("user basic info")
+    print(json.dumps(account, ensure_ascii=False, indent=4))
+    return account
+
+
+if __name__ == "__main__":
+    my_did = "web_57fe06bfa96f8fdae46d286e125a5c18"
+    my_did2 = "web_727b05862ce2afa4028018cc79a50257"
+
+    account = search_url_from_name(keyword="知后品牌优选", did=my_did)
+    author_name = account['user_id']
+    # print(account)
+    # ksc = KuaiShouSearch(author_name=author_name)
+    # result = ksc.search_pages(did=my_did)
+    # print(result)
+    # c = 0
+    # f = open("url.txt", "a+", encoding="utf-8")
+    # for obj in result:
+    #     for feed in obj['data']['visionProfilePhotoList']['feeds']:
+    #         c += 1
+    #         print(json.dumps(feed, ensure_ascii=False))
+    #         f.write(json.dumps(feed, ensure_ascii=False) + "\n")
+    # print(c)
+    # f.close()
+    # print(json.dumps(result, ensure_ascii=False, indent=4))
+    # for feed in result['data']['visionProfilePhotoList']['feeds']:
+    #     print(json.dumps(feed, ensure_ascii=False))
+
+    # 读取已经存在的json文件,获取用户id
+    # with open("result_2.json", "r", encoding="utf-8") as f:
+    #     my_test_id_dict = json.loads(f.read())
+
+    # 遍历id_dict
+    # for my_id in my_test_id_dict:
+    #     my_keyword = my_id.replace("@", "")
+    #     account = search_url_from_name(keyword=my_keyword, did=my_did)
+    #     author_name = account['user_id']
+    #     ksc = KuaiShouSearch(author_name=author_name)
+    #     result = ksc.search(did=my_did)
+    #     # print(json.dumps(result, ensure_ascii=False, indent=4))
+    #     for feed in result['data']['visionProfilePhotoList']['feeds']:
+    #         print(json.dumps(feed, ensure_ascii=False))
+        # video_url = feed['photo']['photoUrl']
+        # print("success get kuaishou video_url", video_url)
+

+ 149 - 0
kuaishou/kuaishou_collect/kuaishou_collect.py

@@ -0,0 +1,149 @@
+import json
+import os
+import random
+import time
+
+from appium import webdriver
+
+from appium.webdriver.common.touch_action import TouchAction
+from selenium.webdriver.support.ui import WebDriverWait
+from selenium.webdriver.support import expected_conditions as EC
+from selenium.webdriver.common.by import By
+
+
+class KuaiShouCollect:
+    # 初始化appium
+    def __init__(self, user_id, log_type, crawler, env):
+        self.desired_caps = {
+            "platformName": "Android",
+            "deviceName": "AQQB9X3211W00486",
+            "appPackage": "com.smile.gifmaker",
+            "appActivity": "com.yxcorp.gifshow.HomeActivity",
+            "noReset": True,
+            "automationName": "UiAutomator2",
+        }
+        self.driver = webdriver.Remote(
+            "http://localhost:4723/wd/hub", self.desired_caps
+        )
+        self.driver.implicitly_wait(10)
+        self.action = TouchAction(self.driver)
+        if os.path.exists("result.json"):
+            with open("result.json", "r", encoding="utf-8") as f:
+                self.name_info_dict = json.loads(f.read())
+        else:
+            self.name_info_dict = {}
+        self.user_id = user_id
+        self.loge_type = log_type
+        self.crawler = crawler
+        self.env = env
+
+    def search_by_id(self):
+        # 搜索 找到搜索按钮并且点击
+        search_button = WebDriverWait(self.driver, 20).until(
+            EC.element_to_be_clickable((By.ID, r'com.smile.gifmaker:id/nasa_featured_default_search_view'))
+        )
+        print("找到了搜索键")
+        if search_button:
+            # action = TouchAction(self.driver)
+            self.action.tap(search_button).perform()
+        else:
+            print("can not find search button")
+            return
+        # 找到搜索栏,并且输入keywords
+        search_bar = WebDriverWait(self.driver, 10).until(
+            EC.presence_of_element_located((By.ID, r'com.smile.gifmaker:id/editor'))
+        )
+        if search_bar:
+            search_bar.send_keys(self.user_id)
+            # 点击搜索
+            self.driver.find_element(By.ID, r"com.smile.gifmaker:id/right_tv").click()
+            print("搜索完成")
+        else:
+            print("fails in input keywords")
+            return
+
+    def get_person_info(self):
+        """
+        点击进入用户主页
+        查看是否存在收藏列表,若存在收藏列表,则采集该用户的收藏列表,若不存在,则直接跳过,退出,重新搜索下一个ID
+        """
+        # 找到头像,点击进入
+        avatar = WebDriverWait(self.driver, 20).until(
+            EC.presence_of_element_located((By.XPATH,
+                                            r'//androidx.recyclerview.widget.RecyclerView[@resource-id="com.smile.gifmaker:id/recycler_view"]/android.view.ViewGroup[2]'))
+        )
+        # self.driver.find_element.click()
+        self.action.tap(avatar).perform()
+        print("进入详情页")
+        # 找到个人详情下面的数据,看看是否存在收藏
+        person_tab_list = self.driver.find_elements(
+            By.ID, r"com.smile.gifmaker:id/tab_text"
+        )
+        time.sleep(10)
+        for tab in person_tab_list:
+            print(tab.text)
+            if "收藏" in tab.text:
+                # print(tab.text)
+                self.action.tap(tab).perform()
+                time.sleep(10)
+                first_video = self.driver.find_element(By.XPATH, r'//android.widget.ImageView[@content-desc="作品"]')
+                print("找到了第一条视频")
+                self.action.tap(first_video).perform()
+                self.get_single_video_info()
+                print("开始刷视频")
+                for i in range(50):
+                    try:
+                        print(i)
+                        self.scroll_down()
+                        self.get_single_video_info()
+                    except:
+                        pass
+            else:
+                continue
+
+    def scroll_down(self):
+        """
+        刷视频函数,使用该函数可以往下滑动进入下一个视频
+        """
+        time.sleep(1)
+        width = self.driver.get_window_size()['width']  # 获取屏幕宽
+        height = self.driver.get_window_size()['height']  # 获取屏幕高
+        # print(width, height)
+        self.action.press(x=int(0.5 * width), y=int(0.75 * height))
+        self.action.wait(ms=random.randint(200, 400))
+        self.action.move_to(x=int(0.5 * width), y=int(0.25 * height))
+        self.action.release()
+        self.action.perform()
+
+    def get_single_video_info(self):
+        try:
+            author_name = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/user_name_text_view').text
+        except:
+            author_name = ""
+        try:
+            title = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/element_caption_label').text
+        except:
+            title = ""
+        if title and author_name:
+            self.name_info_dict[author_name] = title
+
+    def close_spider(self):
+        self.driver.quit()
+        with open("result.json", "w", encoding="utf-8") as f:
+            f.write(json.dumps(self.name_info_dict, ensure_ascii=False, indent=4))
+        return self.name_info_dict
+
+
+if __name__ == "__main__":
+    """
+    抓取的时候,如果遇到正在直播的视频,会很慢,这一点需要考虑优化;
+    现有的author_爬虫长期未维护,存在问题,一直是失效状态
+    2594305039, 2089610315, 
+    """
+
+    id_list = [1396121077, 1811823755, "lxy20003246"]
+    for id in id_list:
+        ksc = KuaiShouCollect(id)
+        ksc.search_by_id()
+        ksc.get_person_info()
+        ksc.close_spider()

+ 132 - 0
kuaishou/kuaishou_collect/kuaishou_comments.py

@@ -0,0 +1,132 @@
+import time
+
+from appium import webdriver
+from appium.webdriver.common.touch_action import TouchAction
+from selenium.webdriver.common.by import By
+
+
+class KuaiShouComments:
+
+    # 初始化appium
+    def __init__(self):
+        self.desired_caps = {
+            "platformName": "Android",
+            "deviceName": "AQQB9X3211W00486",
+            "appPackage": "com.smile.gifmaker",
+            "appActivity": "com.yxcorp.gifshow.HomeActivity",
+            "noReset": True,
+            "automationName": "UiAutomator2"
+        }
+        self.driver = webdriver.Remote('http://localhost:4723/wd/hub', self.desired_caps)
+        self.driver.implicitly_wait(10)  # 设置隐式等待
+
+        # 基本操作,关闭广告,关闭青少年模式,直播类型等等
+
+    # 通过关键词去搜索,获取视频列表
+    def search_by_keywords(self, keyword):
+        # 搜索 找到搜索按钮并且点击
+        search_button = self.driver.find_element(By.XPATH, r'//android.widget.ImageView[@content-desc="查找"]')
+        if search_button:
+            search_button.click()
+            time.sleep(2)
+        else:
+            print("can not find search button")
+            return
+        # 找到搜索栏,并且输入keywords
+        search_bar = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/editor')
+        if search_bar:
+            search_bar.send_keys(keyword)
+            self.driver.find_element(By.ID, r'com.smile.gifmaker:id/right_tv').click()
+            time.sleep(10)
+        else:
+            print("fails in input keywords")
+            return
+        # 找到搜索结果的视频区
+        sp_bar = self.driver.find_element(By.XPATH,
+                                          r'//android.widget.HorizontalScrollView[@resource-id="com.smile.gifmaker:id/tabs"]/android.widget.LinearLayout/android.view.View[1]')
+        if sp_bar:
+            sp_bar.click()
+            time.sleep(2)
+        else:
+            return
+        # 点击第一个视频
+        first_video = self.driver.find_element(By.XPATH,
+                                               r'//android.widget.RelativeLayout[@resource-id="com.smile.gifmaker:id/container"][1]')
+        if first_video:
+            first_video.click()
+        else:
+            return
+
+    # 打开视频的评论区
+    def open_comments_area(self):
+        comment_button = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/comment_button')
+        if comment_button:
+            comment_button.click()
+            # 把评论区最大化
+            self.driver.find_element(By.ID, r'com.smile.gifmaker:id/tabs_panel_full').click()
+        else:
+            return
+        # 点开评论区的人的头像, 暂时未配置翻页(之后需要配置翻页)
+        people_list = self.driver.find_elements(By.ID, r'com.smile.gifmaker:id/avatar')
+        print(len(people_list))
+        for person_ele in people_list:
+            self.process_comment_person(person_ele)
+
+        # 关掉评论区
+        self.driver.find_element(By.ID, r'com.smile.gifmaker:id/tabs_panel_close').click()
+
+    # 点开头像
+    def process_comment_person(self, element):
+        element.click()
+        time.sleep(1)
+        person_bar_list = self.driver.find_elements(By.ID, r'com.smile.gifmaker:id/tab_text')
+        if person_bar_list:
+            for tab in person_bar_list:
+                print(tab.text)
+                if "收藏" in tab.text:
+                    tab.click()
+                    # 获取video_list
+                    self.driver.find_element(By.ID, r'com.smile.gifmaker:id/player_cover_container').click()
+                    video_count = int(tab.text.split(" ")[1])
+                    self.find_video_basic_info()
+                    for i in range(10):
+                        time.sleep(4)
+                        self.scroll_down()
+                        try:
+                            self.find_video_basic_info()
+                        except:
+                            pass
+                    self.driver.find_element(By.ID, r'com.smile.gifmaker:id/left_btn').click()
+        else:
+            return
+
+        # 退出个人详情页面
+        self.driver.find_element(By.ID, r'com.smile.gifmaker:id/left_btn').click()
+        time.sleep(10)
+
+    # 向下滑动
+    def scroll_down(self):
+        width = self.driver.get_window_size()['width']
+        height = self.driver.get_window_size()['height']
+        action = TouchAction(self.driver)
+        action.press(x=int(0.5 * width), y=int(0.75 * height))
+        action.wait(ms=300)
+        action.move_to(x=int(0.5 * width), y=int(0.25 * height))
+        action.release()
+        action.perform()
+
+    def find_video_basic_info(self):
+        author_name = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/user_name_text_view').text
+        title = self.driver.find_element(By.ID, r'com.smile.gifmaker:id/element_caption_label').text
+        print(author_name)
+        print(title)
+
+
+if __name__ == "__main__":
+    ksc = KuaiShouComments()
+    ksc.search_by_keywords("黄瓜不能和什么一起吃?")
+    ksc.open_comments_area()
+    for i in range(10):
+        time.sleep(3)
+        ksc.scroll_down()
+        ksc.open_comments_area()

+ 164 - 0
kuaishou/kuaishou_collect/kuaishou_did.py

@@ -0,0 +1,164 @@
+import json
+
+import requests
+import urllib3
+
+from fake_useragent import FakeUserAgent
+from requests.adapters import HTTPAdapter
+
+
+def get_did(user_agent):
+    headers = {
+        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
+        "Accept-Encoding": "gzip, deflate, br",
+        "Accept-Language": "zh-CN,zh;q=0.9",
+        "Cache-Control": "max-age=0",
+        "Connection": "keep-alive",
+        "Host": "www.kuaishou.com",
+        "Sec-Ch-Ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+        "Sec-Ch-Ua-Mobile": "?0",
+        "Sec-Ch-Ua-Platform": '"macOS"',
+        "Sec-Fetch-Dest": "document",
+        "Sec-Fetch-Mode": "navigate",
+        "Sec-Fetch-Site": "same-origin",
+        "Sec-Fetch-User": "?1",
+        "Upgrade-Insecure-Requests": "1",
+        "User-Agent": user_agent
+    }
+    response = requests.get('https://www.kuaishou.com/search/video?searchKey=', headers=headers)
+    value_list = response.cookies.values()
+    for v in value_list:
+        if "web_" in v:
+            return v
+    return ""
+
+
+def get_captchaSession(did, user_agent):
+    payload = {
+        "operationName": "graphqlSearchUser",
+        "variables": {
+            "keyword": "周大爷不服老"
+        },
+        "query": "query graphqlSearchUser($keyword: String, $pcursor: String, $searchSessionId: String) {\n  visionSearchUser(keyword: $keyword, pcursor: $pcursor, searchSessionId: $searchSessionId) {\n    result\n    users {\n      fansCount\n      photoCount\n      isFollowing\n      user_id\n      headurl\n      user_text\n      user_name\n      verified\n      verifiedDetail {\n        description\n        iconType\n        newVerified\n        musicCompany\n        type\n        __typename\n      }\n      __typename\n    }\n    searchSessionId\n    pcursor\n    __typename\n  }\n}\n"
+    }
+    headers = {
+        'Accept': '*/*',
+        'Accept-Encoding': 'gzip, deflate, br',
+        'Accept-Language': 'zh-CN,zh;q=0.9',
+        'Connection': 'keep-alive',
+        'Content-Length': '668',
+        'Content-Type': 'application/json',
+        'Cookie': 'did={}'.format(did),
+        'Host': 'www.kuaishou.com',
+        'Origin': 'https://www.kuaishou.com',
+        # 'Referer': 'https://www.kuaishou.com/search/author?searchKey=%E5%91%A8%E5%A4%A7%E7%88%B7%E4%B8%8D%E6%9C%8D%E8%80%81',
+        'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+        'Sec-Ch-Ua-Mobile': '?0',
+        'Sec-Ch-Ua-Platform': '"macOS"',
+        'Sec-Fetch-Dest': 'empty',
+        'Sec-Fetch-Mode': 'cors',
+        'Sec-Fetch-Site': 'same-origin',
+        'User-Agent': user_agent
+    }
+    urllib3.disable_warnings()
+    s = requests.session()
+    # max_retries=3 重试3次
+    s.mount("http://", HTTPAdapter(max_retries=3))
+    s.mount("https://", HTTPAdapter(max_retries=3))
+    response = s.post(
+        url="https://www.kuaishou.com/graphql",
+        headers=headers,
+        data=json.dumps(payload),
+        # proxies=Common.tunnel_proxies(),
+        verify=False,
+        timeout=10,
+    )
+    response.close()
+    # print(response.json())
+    return response.json()['data']['url'].split("?")[-1]
+
+
+def get_config(did, user_agent, captcha_session):
+    payload = captcha_session
+    headers = {
+        'Accept': 'application/json, text/plain, */*',
+        'Accept-Encoding': 'gzip, deflate, br',
+        'Accept-Language': 'zh-CN,zh;q=0.9',
+        'Connection': 'keep-alive',
+        'Content-Length': '411',
+        'Content-Type': 'application/x-www-form-urlencoded',
+        'Cookie': 'did='.format(did),
+        'Host': 'captcha.zt.kuaishou.com',
+        'Origin': 'https://captcha.zt.kuaishou.com',
+        'Referer': 'https://captcha.zt.kuaishou.com/iframe/index.html?{}'.format(captcha_session),
+        'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+        'Sec-Ch-Ua-Mobile': '?0',
+        'Sec-Ch-Ua-Platform': '"macOS"',
+        'Sec-Fetch-Dest': 'empty',
+        'Sec-Fetch-Mode': 'cors',
+        'Sec-Fetch-Site': 'same-origin',
+        'User-Agent': user_agent
+    }
+    urllib3.disable_warnings()
+    s = requests.session()
+    # max_retries=3 重试3次
+    s.mount("http://", HTTPAdapter(max_retries=3))
+    s.mount("https://", HTTPAdapter(max_retries=3))
+    response = s.post(
+        url="https://captcha.zt.kuaishou.com/rest/zt/captcha/sliding/config",
+        headers=headers,
+        data=payload,
+        # proxies=Common.tunnel_proxies(),
+        verify=False,
+        timeout=10,
+    )
+    response.close()
+    print(json.dumps(response.json(), ensure_ascii=False, indent=4))
+    return response.json()['captchaSn']
+
+
+def check(encrypted_param, did, user_agent):
+    payload = {
+        "verifyParam": encrypted_param
+    }
+    headers = {
+        'Accept': 'application/json, text/plain, */*',
+        'Accept-Encoding': 'gzip, deflate, br',
+        'Accept-Language': 'zh-CN,zh;q=0.9',
+        'Connection': 'keep-alive',
+        'Content-Length': '9246',
+        'Content-Type': 'application/json',
+        'Cookie': 'did={}'.format(did),
+        'Host': 'captcha.zt.kuaishou.com',
+        'Origin': 'https://captcha.zt.kuaishou.com',
+        'Sec-Ch-Ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
+        'Sec-Ch-Ua-Mobile': '?0',
+        'Sec-Ch-Ua-Platform': '"macOS"',
+        'Sec-Fetch-Dest': 'empty',
+        'Sec-Fetch-Mode': 'cors',
+        'Sec-Fetch-Site': 'same-origin',
+        'User-Agent': user_agent
+    }
+    urllib3.disable_warnings()
+    s = requests.session()
+    # max_retries=3 重试3次
+    s.mount("http://", HTTPAdapter(max_retries=3))
+    s.mount("https://", HTTPAdapter(max_retries=3))
+    response = s.post(
+        url="https://captcha.zt.kuaishou.com/rest/zt/captcha/sliding/kSecretApiVerify",
+        headers=headers,
+        data=json.dumps(payload),
+        verify=False,
+        timeout=10,
+    )
+    response.close()
+
+
+if __name__ == "__main__":
+    fake_agent = FakeUserAgent().random
+    fake_did = get_did(fake_agent)
+    print(fake_did)
+    captcha_session = get_captchaSession(did=fake_did, user_agent=fake_agent)
+    print(captcha_session)
+    captchaSn = get_config(did=fake_did, user_agent=fake_agent, captcha_session=captcha_session)
+    print(captchaSn)

+ 95 - 0
kuaishou/kuaishou_collect/searchfunctions.py

@@ -0,0 +1,95 @@
+import json
+import time
+
+import requests
+import urllib3
+from requests.adapters import HTTPAdapter
+
+
+# 通过标题去搜索快手视频,并且把视频的下载地址搜索出来
+class SearchTitle:
+    def __init__(self, title, did):
+        self.title = title
+        self.did = did
+        self.pcursor = ""
+        self.search_result = []
+
+    # 搜索代码
+    def search_title(self):
+        payload = json.dumps(
+            {
+                "operationName": "visionSearchPhoto",
+                "variables": {
+                    "keyword": self.title,
+                    "pcursor": self.pcursor,
+                    "page": "search"
+                },
+                "query": "fragment photoContent on PhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n}\n\nfragment recoPhotoFragment on recoPhotoEntity {\n  __typename\n  id\n  duration\n  caption\n  originCaption\n  likeCount\n  viewCount\n  commentCount\n  realLikeCount\n  coverUrl\n  photoUrl\n  photoH265Url\n  manifest\n  manifestH265\n  videoResource\n  coverUrls {\n    url\n    __typename\n  }\n  timestamp\n  expTag\n  animatedCoverUrl\n  distance\n  videoRatio\n  liked\n  stereoType\n  profileUserTopPhoto\n  musicBlocked\n}\n\nfragment feedContent on Feed {\n  type\n  author {\n    id\n    name\n    headerUrl\n    following\n    headerUrls {\n      url\n      __typename\n    }\n    __typename\n  }\n  photo {\n    ...photoContent\n    ...recoPhotoFragment\n    __typename\n  }\n  canAddComment\n  llsid\n  status\n  currentPcursor\n  tags {\n    type\n    name\n    __typename\n  }\n  __typename\n}\n\nquery visionSearchPhoto($keyword: String, $pcursor: String, $searchSessionId: String, $page: String, $webPageArea: String) {\n  visionSearchPhoto(keyword: $keyword, pcursor: $pcursor, searchSessionId: $searchSessionId, page: $page, webPageArea: $webPageArea) {\n    result\n    llsid\n    webPageArea\n    feeds {\n      ...feedContent\n      __typename\n    }\n    searchSessionId\n    pcursor\n    aladdinBanner {\n      imgUrl\n      link\n      __typename\n    }\n    __typename\n  }\n}\n"
+            }
+        )
+        headers = {
+            "Accept": "*/*",
+            "Accept-Encoding": "gzip, deflate, br",
+            "Accept-Language": "zh-CN,zh;q=0.9",
+            "Connection": "keep-alive",
+            "Content-Type": "application/json",
+            "Cookie": "did={}".format(did),
+            "Host": "www.kuaishou.com",
+            "Origin": "https://www.kuaishou.com",
+            # "Referer": "https://www.kuaishou.com/search/video?searchKey=%23%E5%8C%BB%E9%99%A2%E8%B6%A3%E4%BA%8B%23%E4%B8%AD%E5%8C%BB%E8%B0%83%E7%90%86%23%E5%8C%BB%E5%AD%A6%E7%9F%A5%E8%AF%86%E7%A7%91%E6%99%AE",
+            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36"
+        }
+        urllib3.disable_warnings()
+        s = requests.session()
+        # max_retries=3 重试3次
+        s.mount("http://", HTTPAdapter(max_retries=3))
+        s.mount("https://", HTTPAdapter(max_retries=3))
+        response = s.post(
+            url="https://www.kuaishou.com/graphql",
+            headers=headers,
+            data=payload,
+            # proxies=Common.tunnel_proxies(),
+            verify=False,
+            timeout=10,
+        )
+        response.close()
+        return response.json()
+
+    # 翻页搜索代码
+    def search_pages(self):
+        result = []
+        self.search_result = self.search_title()
+        if self.search_result:
+            result.append(self.search_result)
+            for i in range(4):
+                self.pcursor = self.search_result['data']['visionSearchPhoto']['pcursor']
+                self.search_result = self.search_title()
+                result.append(self.search_result)
+                time.sleep(2)
+            return result
+        else:
+            return []
+
+
+def process_item_list(user_name, data_list):
+    result = []
+    for line in data_list:
+        for obj in line['data']['visionSearchPhoto']['feeds']:
+            name = obj['author']['name']
+            if name in user_name:
+                result.append(obj)
+    result = sorted(result, reverse=True, key=lambda x: x['photo']['likeCount'])
+    print(result)
+    return result[0]
+
+
+did = "web_7cf1f7a1dd0e8936fcaeebcd4e0a7061"
+title = "历史会记住! 日本核污水搜 画面,旅游团遭退团,对我们有何影响… 展开"
+
+result = SearchTitle(title, did).search_pages()
+new_r = process_item_list("@主持人颢鑫", result)
+
+
+print(len(new_r))
+print(json.dumps(new_r, ensure_ascii=False, indent=4))
+

+ 0 - 0
xiaoniangao/xiaoniangao_xcx_rec/__init__.py