feishu_form.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. import itertools
  8. from collections import defaultdict
  9. from common.sql_help import sqlCollect
  10. sys.path.append(os.getcwd())
  11. from common.feishu_utils import Feishu
  12. class Material():
  13. """
  14. 获取汇总表所有负责人列表
  15. """
  16. @classmethod
  17. def feishu_list(cls):
  18. summary = Feishu.get_values_batch("summary", "bc154d")
  19. list = []
  20. for row in summary[1:]:
  21. mark = row[0]
  22. name = row[1]
  23. feishu_id = row[3]
  24. feishu_sheet = row[4]
  25. cookie_sheet = row[5]
  26. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  27. if mark:
  28. list.append(number)
  29. else:
  30. return list
  31. return list
  32. @classmethod
  33. def get_sph_user(cls):
  34. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  35. user_data_list = []
  36. try:
  37. for row in data[1:]:
  38. users = str(row[2])
  39. if users and users != 'None':
  40. if ',' in users:
  41. user_list = users.split(',')
  42. else:
  43. user_list = [users]
  44. for user in user_list:
  45. status = sqlCollect.sph_channel_user(user)
  46. if status:
  47. user_data_list.append(user)
  48. else:
  49. return user_data_list
  50. return user_data_list
  51. except:
  52. return user_data_list
  53. """
  54. list 重新排序
  55. """
  56. @classmethod
  57. def sort_keyword_data(cls, data):
  58. data = [json.loads(item) for item in data]
  59. # 根据 keyword_name 进行分组
  60. groups = defaultdict(list)
  61. for item in data:
  62. groups[item['keyword_name']].append(item)
  63. # 获取所有唯一的 keyword_name 并创建一个轮流顺序
  64. unique_names = list(groups.keys())
  65. priority_names = [name for name in unique_names if name in unique_names]
  66. remaining_names = [name for name in unique_names if name not in priority_names]
  67. # 将 priority_names 列表进行轮流排序
  68. result = []
  69. max_length = max(len(groups[name]) for name in priority_names)
  70. for i in range(max_length):
  71. for name in priority_names:
  72. if i < len(groups[name]):
  73. result.append(groups[name][i])
  74. # 将未列入优先顺序的其余数据添加到结果中
  75. for name in remaining_names:
  76. result.extend(groups[name])
  77. # 将结果转回 JSON 字符串列表
  78. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  79. return sorted_list
  80. """
  81. 获取搜索任务
  82. """
  83. @classmethod
  84. def get_keyword_data(cls, feishu_id, feishu_sheet):
  85. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  86. processed_list = []
  87. try:
  88. for row in data[1:]:
  89. channel_id = row[1]
  90. channel_url = str(row[2])
  91. piaoquan_id = row[3]
  92. number = row[4]
  93. video_share = row[5]
  94. video_ending = row[6]
  95. voice = row[7]
  96. crop_tool = row[8]
  97. gg_duration = row[9]
  98. title = row[10]
  99. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  100. continue
  101. first_category = row[12] # 一级品类
  102. secondary_category = row[13] # 二级品类
  103. def count_items(item, separator):
  104. if item and item not in {'None', ''}:
  105. return len(item.split(separator))
  106. return 0
  107. video_id_total = count_items(str(channel_url), ',')
  108. title_total = count_items(str(title), '/')
  109. video_ending_total = count_items(str(video_ending), ',')
  110. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total, first_category]
  111. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  112. task_mark = "_".join(map(str, filtered_values))
  113. keyword_sort = row[14] # 排序条件
  114. keyword_time = row[15] # 发布时间
  115. keyword_duration = row[16] # 视频时长
  116. keyword_name = row[17] # 负责人
  117. keyword_sort_list = keyword_sort.split(',')
  118. keyword_duration_list = keyword_duration.split(',')
  119. keyword_time_list = keyword_time.split(',')
  120. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  121. if ',' in channel_url:
  122. channel_url = channel_url.split(',')
  123. else:
  124. channel_url = [channel_url]
  125. for user in channel_url:
  126. for combo in combinations:
  127. number_dict = {
  128. "task_mark": task_mark,
  129. "channel_id": channel_id,
  130. "channel_url": user,
  131. "piaoquan_id": piaoquan_id,
  132. "number": number,
  133. "title": title,
  134. "video_share": video_share,
  135. "video_ending": video_ending,
  136. "crop_total": crop_tool,
  137. "gg_duration_total": gg_duration,
  138. "voice": voice,
  139. "first_category": first_category, # 一级品类
  140. "secondary_category": secondary_category, # 二级品类
  141. "combo": combo, # 搜索条件
  142. "keyword_name": keyword_name # 品类负责人
  143. }
  144. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  145. except:
  146. processed_list = cls.sort_keyword_data(processed_list)
  147. return processed_list
  148. processed_list = cls.sort_keyword_data(processed_list)
  149. return processed_list
  150. """
  151. 获取对应负责人任务明细
  152. """
  153. @classmethod
  154. def get_task_data(cls, feishu_id, feishu_sheet):
  155. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  156. processed_list = []
  157. try:
  158. for row in data[1:]:
  159. channel_id = row[1]
  160. channel_url = str(row[2])
  161. piaoquan_id = row[3]
  162. number = row[4]
  163. video_share = row[5]
  164. video_ending = row[6]
  165. voice = row[7]
  166. crop_tool = row[8]
  167. gg_duration = row[9]
  168. title = row[10]
  169. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  170. continue
  171. try:
  172. ls_number = int(row[11])
  173. except:
  174. ls_number = None
  175. def count_items(item, separator):
  176. if item and item not in {'None', ''}:
  177. return len(item.split(separator))
  178. return 0
  179. video_id_total = count_items(str(channel_url), ',')
  180. title_total = count_items(str(title), '/')
  181. video_ending_total = count_items(str(video_ending), ',')
  182. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  183. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  184. task_mark = "_".join(map(str, filtered_values))
  185. if piaoquan_id and piaoquan_id not in {'None', ''}:
  186. if ',' in channel_url:
  187. channel_url = channel_url.split(',')
  188. else:
  189. channel_url = [channel_url]
  190. for user in channel_url:
  191. number_dict = {
  192. "task_mark": task_mark,
  193. "channel_id": channel_id,
  194. "channel_url": user,
  195. "piaoquan_id": piaoquan_id,
  196. "number": number,
  197. "title": title,
  198. "video_share": video_share,
  199. "video_ending": video_ending,
  200. "crop_total": crop_tool,
  201. "gg_duration_total": gg_duration,
  202. "voice": voice
  203. }
  204. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  205. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  206. if ls_number and ls_number not in {'None', ''}:
  207. if channel_id == "抖音":
  208. new_channel_id = "抖音历史"
  209. if channel_id == "快手":
  210. new_channel_id = "快手历史"
  211. if channel_id == "视频号":
  212. new_channel_id = "视频号历史"
  213. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  214. gg_duration, title_total]
  215. filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  216. task_mark1 = "_".join(map(str, filtered_values1))
  217. number_dict = {
  218. "task_mark": task_mark1,
  219. "channel_id": new_channel_id,
  220. "channel_url": user,
  221. "piaoquan_id": piaoquan_id,
  222. "number": ls_number,
  223. "title": title,
  224. "video_share": video_share,
  225. "video_ending": video_ending,
  226. "crop_total": crop_tool,
  227. "gg_duration_total": gg_duration,
  228. "voice": voice
  229. }
  230. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  231. else:
  232. return processed_list
  233. return processed_list
  234. except:
  235. return processed_list
  236. """
  237. 获取对应片尾+srt
  238. """
  239. @classmethod
  240. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  241. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  242. for row in data[1:]:
  243. pw_mark = row[0]
  244. pw_id = row[1]
  245. pw_srt = row[2]
  246. if pw_id != 'None' and pw_id != '' and pw_id != None:
  247. if pw_mark == video_ending:
  248. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  249. return number
  250. return ''
  251. """
  252. 获取对应固定字幕
  253. """
  254. @classmethod
  255. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  256. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  257. for row in data[1:]:
  258. pz_mark = row[0]
  259. pz_zm = row[1]
  260. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  261. if pz_mark == video_share_name:
  262. return pz_zm
  263. return ''
  264. """
  265. 获取 cookie 信息
  266. """
  267. @classmethod
  268. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  269. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  270. for row in data[1:]:
  271. channel_mask = row[0]
  272. cookie = row[1]
  273. if channel_mask == channel:
  274. return cookie