feishu_form.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import sys
  5. import random
  6. import itertools
  7. sys.path.append(os.getcwd())
  8. from collections import defaultdict
  9. from utils.feishu_utils import Feishu
  10. class Material:
  11. """
  12. 获取对应固定字幕
  13. """
  14. @classmethod
  15. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  16. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  17. for row in data[1:]:
  18. pz_mark = row[0]
  19. pz_zm = row[1]
  20. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  21. if pz_mark == video_share_name:
  22. return pz_zm
  23. return "温馨提示:\n点击下方按钮,传递好运"
  24. """获取去重天数"""
  25. @classmethod
  26. def get_count_restrict(cls, channel):
  27. count_channel = Feishu.get_values_batch("KsoMsyP2ghleM9tzBfmcEEXBnXg", "dQriSJ")
  28. for row in count_channel[1:]:
  29. sheet_channel = row[0]
  30. if sheet_channel == channel:
  31. return row[1]
  32. """
  33. list 重新排序
  34. """
  35. @classmethod
  36. def sort_keyword_data(cls, data):
  37. # 解析 JSON 数据
  38. data = [json.loads(item) for item in data]
  39. # 根据 keyword_name 进行分组
  40. groups = defaultdict(list)
  41. for item in data:
  42. groups[item['keyword_name']].append(item)
  43. # 获取所有唯一的 keyword_name
  44. priority_names = list(groups.keys())
  45. # 对每个分组内的数据按 first_category 进行随机打乱
  46. for name in priority_names:
  47. random.shuffle(groups[name]) # 打乱每个分组内的数据顺序
  48. # 轮流排序每个分组的数据,保持 keyword_name 的顺序
  49. result = []
  50. max_length = max(len(groups[name]) for name in priority_names)
  51. for i in range(max_length):
  52. for name in priority_names:
  53. if i < len(groups[name]):
  54. result.append(groups[name][i])
  55. # 将结果转回 JSON 字符串列表
  56. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  57. return sorted_list
  58. """
  59. 获取品类对应负责人任务明细
  60. """
  61. @classmethod
  62. def get_carry_data(cls, fs_id, fs_sheet):
  63. data = Feishu.get_values_batch( fs_id, fs_sheet )
  64. processed_list = []
  65. try:
  66. for row in data[1:]:
  67. channel = row[1] # 渠道
  68. channel_url = row[2] #账号信息
  69. tag = row[3] #标签
  70. pd_id = row[4] #票圈id
  71. latest_count = row[5] # 最新视频 条数
  72. totality_count = row[6] # 任务总条数
  73. video_share = row[7] # 片中分享文案
  74. video_share_ending = row[8] # 片尾分享
  75. voice = row[9] # 片尾音色
  76. crop_tool = row[10] # 画面裁剪
  77. gg_duration = row[11] #广告时间剪裁
  78. ai_title = row[12] # AI标题
  79. hottest_count = row[13] # 最热视频 条数
  80. first_category = row[14] # 一级品类
  81. secondary_category = row[15] # 二级品类(搜索词)/ 负责人(品类)
  82. keyword_sort = row[16] # 排序条件(搜索词条件筛选)
  83. keyword_time = row[17] # 发布时间(搜索词件筛选)
  84. keyword_duration = row[18] # 视频时长(搜索词件筛选)
  85. keyword_name = row[19] # 负责人(搜索词件筛选)
  86. if not channel:
  87. continue
  88. if ',' in channel_url:
  89. channel_url = channel_url.split(',')
  90. else:
  91. channel_url = [channel_url]
  92. if "搜索" not in channel :
  93. for user in channel_url:
  94. number_dict = {
  95. "task_mark" : f"{channel}-{first_category}",
  96. "channel": channel,
  97. "channel_url": user,
  98. "tag": tag,
  99. "pd_id": pd_id,
  100. "count": latest_count,
  101. "totality_count": totality_count,
  102. "video_share": video_share,
  103. "video_share_ending": video_share_ending,
  104. "voice": voice,
  105. "crop_tool": crop_tool,
  106. "gg_duration": gg_duration,
  107. "ai_title": ai_title,
  108. "first_category": first_category,
  109. "keyword_name": secondary_category
  110. }
  111. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  112. if hottest_count:
  113. for user in channel_url:
  114. number_dict = {
  115. "task_mark" :f"历史{channel}-{first_category}",
  116. "channel": f"历史{channel}",
  117. "channel_url": user,
  118. "tag": tag,
  119. "pd_id": pd_id,
  120. "count": hottest_count,
  121. "totality_count": totality_count,
  122. "video_share": video_share,
  123. "video_share_ending": video_share_ending,
  124. "voice": voice,
  125. "crop_tool": crop_tool,
  126. "gg_duration": gg_duration,
  127. "ai_title": ai_title,
  128. "first_category": first_category,
  129. "keyword_name": secondary_category
  130. }
  131. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  132. else:
  133. keyword_sort_list = keyword_sort.split(',')
  134. keyword_duration_list = keyword_duration.split(',')
  135. keyword_time_list = keyword_time.split(',')
  136. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  137. for user in channel_url:
  138. for combo in combinations:
  139. number_dict = {
  140. "task_mark" : f"{channel}-{first_category}",
  141. "channel": channel,
  142. "channel_url": user,
  143. "tag": tag,
  144. "pd_id": pd_id,
  145. "count": latest_count,
  146. "totality_count": totality_count,
  147. "video_share": video_share,
  148. "video_share_ending": video_share_ending,
  149. "voice": voice,
  150. "crop_tool": crop_tool,
  151. "gg_duration": gg_duration,
  152. "ai_title": ai_title,
  153. "first_category": first_category,
  154. "secondary_category": secondary_category,
  155. "combo": combo ,
  156. "keyword_name": keyword_name,
  157. }
  158. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  159. processed_list = cls.sort_keyword_data(processed_list)
  160. return processed_list
  161. except:
  162. processed_list = cls.sort_keyword_data(processed_list)
  163. return processed_list