Ver Fonte

feat:修改日志

zhaohaipeng há 1 semana atrás
pai
commit
f37294098f
1 ficheiros alterados com 29 adições e 17 exclusões
  1. 29 17
      script/channel_automation_provide_job_download.py

+ 29 - 17
script/channel_automation_provide_job_download.py

@@ -1,6 +1,6 @@
 import json
 
-file = "/Users/zhao/Downloads/da94a214-ddbb-491f-8984-9505b30d43cb.json"
+file = "/Users/zhao/Downloads/0fa4225d-ab4f-46cb-ab56-87ac0d96416f.json"
 log_json_list = []
 with open(file) as f:
     line = f.readline()
@@ -8,27 +8,39 @@ with open(file) as f:
         log_json_list.append(json.loads(line))
         line = f.readline()
 
-print(f"videoId,品类,视频截帧,爬取计划ID,站外视频ID,站外账号ID,结果,分享量,点赞量,分享量/点赞量,视频时长(秒),观众年龄50+占比,观众年龄50+TGI,过滤规则表达式")
+print(f"crawlerMode,videoId,品类,视频截帧,爬取计划ID,站外视频ID,站外账号ID,结果,原因,分享量,点赞量,分享量/点赞量,视频时长(秒),观众年龄50+占比,观众年龄50+TGI,过滤规则表达式")
 
 for log in log_json_list:
+    merge_cate2 = log.get("mergeSecondLevelCate", "")
+    crawler_mode = log.get('crawlerMode', '')
     video_id = log['videoId']
-    crawler_plan_id = log['crawlerPlanId']
-    ext_json = json.loads(log['ext'])
-    merge_cate2_map = ext_json['mergeCate2Map']
-    extra_frame_image_url = ext_json['extraFrameImageUrl']
+    crawler_plan_id = log.get('crawlerPlanId', '')
+    result = log.get('result', False)
+    reason = log.get('reason', '成功')
+    if 'ext' not in log:
+        print(f"{crawler_mode},{video_id},{merge_cate2},,'{crawler_plan_id},,,{result},{reason}")
+        continue
+
+    ext_json = json.loads(log.get('ext', "{}"))
+    extra_frame_image_url = ext_json.get('extraFrameImageUrl', '')
     for channel_content_id in ext_json:
         if channel_content_id in ['mergeCate2Map', 'extraFrameImageUrl']:
             continue
         channel_ext_info = ext_json[channel_content_id]
-        channel_account_id = channel_ext_info.get("aweme_info", "{}").get("author", "{}").get("sec_uid", "")
-        result = channel_ext_info['contentDetail'].get('result', False)
-        rule_str = channel_ext_info['rule']
-        rule_context = channel_ext_info['ruleContext']
-        share_cnt = rule_context['shareCnt']
-        video_duration_s = rule_context['videoDuration_s']
-        like_cnt = rule_context['likeCnt']
-        audience_age_50_rate = rule_context['audienceAge50Rate']
-        audience_age_50_tgi = rule_context['audienceAge50TGI']
-        share_div_link = rule_context['shareDivLink']
-        print(f"{video_id},{merge_cate2_map},{extra_frame_image_url},'{crawler_plan_id},'{channel_content_id},{channel_account_id},{result},"
+        if 'ruleContext' not in channel_ext_info:
+            continue
+
+        result = channel_ext_info.get('result', False)
+        rule_str = channel_ext_info.get('rule', "")
+        rule_context = channel_ext_info.get('ruleContext', ())
+
+        content_detail = channel_ext_info.get('contentDetail', ())
+        channel_account_id = content_detail.get('channelAccountId', '')
+        share_cnt = rule_context.get('shareCnt', 0)
+        video_duration_s = rule_context.get('videoDuration_s', 0)
+        like_cnt = rule_context.get('likeCnt', 0)
+        audience_age_50_rate = rule_context.get('audienceAge50Rate', 0)
+        audience_age_50_tgi = rule_context.get('audienceAge50TGI', 0)
+        share_div_link = rule_context.get('shareDivLink', 0)
+        print(f"{crawler_mode},{video_id},{merge_cate2},{extra_frame_image_url},'{crawler_plan_id},'{channel_content_id},{channel_account_id},{result},{reason},"
               f"{share_cnt},{like_cnt},{share_div_link},{video_duration_s},{audience_age_50_rate},{audience_age_50_tgi},{rule_str}")