feishu_form.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. import itertools
  8. from collections import defaultdict
  9. from common.sql_help import sqlCollect
  10. sys.path.append(os.getcwd())
  11. from common.feishu_utils import Feishu
  12. class Material():
  13. """
  14. 获取汇总表所有负责人列表
  15. """
  16. @classmethod
  17. def feishu_list(cls):
  18. summary = Feishu.get_values_batch("summary", "bc154d")
  19. list = []
  20. for row in summary[1:]:
  21. mark = row[0]
  22. name = row[1]
  23. feishu_id = row[3]
  24. feishu_sheet = row[4]
  25. cookie_sheet = row[5]
  26. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  27. if mark:
  28. list.append(number)
  29. else:
  30. return list
  31. return list
  32. @classmethod
  33. def get_sph_user(cls):
  34. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  35. user_data_list = []
  36. try:
  37. for row in data[1:]:
  38. users = str(row[2])
  39. if users and users != 'None':
  40. if ',' in users:
  41. user_list = users.split(',')
  42. else:
  43. user_list = [users]
  44. for user in user_list:
  45. status = sqlCollect.sph_channel_user(user)
  46. if status:
  47. user_data_list.append(user)
  48. else:
  49. return user_data_list
  50. return user_data_list
  51. except:
  52. return user_data_list
  53. """
  54. list 重新排序
  55. """
  56. @classmethod
  57. def sort_keyword_data(cls, data):
  58. # 解析 JSON 数据
  59. data = [json.loads(item) for item in data]
  60. # 根据 keyword_name 进行分组
  61. groups = defaultdict(list)
  62. for item in data:
  63. groups[item['keyword_name']].append(item)
  64. # 获取所有唯一的 keyword_name
  65. priority_names = list(groups.keys())
  66. # 对每个分组内的数据按 first_category 进行随机打乱
  67. for name in priority_names:
  68. random.shuffle(groups[name]) # 打乱每个分组内的数据顺序
  69. # 轮流排序每个分组的数据,保持 keyword_name 的顺序
  70. result = []
  71. max_length = max(len(groups[name]) for name in priority_names)
  72. for i in range(max_length):
  73. for name in priority_names:
  74. if i < len(groups[name]):
  75. result.append(groups[name][i])
  76. # 将结果转回 JSON 字符串列表
  77. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  78. return sorted_list
  79. """
  80. 获取搜索任务
  81. """
  82. @classmethod
  83. def get_keyword_data(cls, feishu_id, feishu_sheet):
  84. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  85. processed_list = []
  86. try:
  87. for row in data[1:]:
  88. channel_id = row[1]
  89. channel_url = str(row[2])
  90. piaoquan_id = row[3]
  91. number = row[4]
  92. video_share = row[5]
  93. video_ending = row[6]
  94. voice = row[7]
  95. crop_tool = row[8]
  96. gg_duration = row[9]
  97. title = row[10]
  98. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  99. continue
  100. first_category = row[12] # 一级品类
  101. secondary_category = row[13] # 二级品类
  102. def count_items(item, separator):
  103. if item and item not in {'None', ''}:
  104. return len(item.split(separator))
  105. return 0
  106. video_id_total = count_items(str(channel_url), ',')
  107. title_total = count_items(str(title), '/')
  108. video_ending_total = count_items(str(video_ending), ',')
  109. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total, first_category]
  110. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  111. task_mark = "_".join(map(str, filtered_values))
  112. keyword_sort = row[14] # 排序条件
  113. keyword_time = row[15] # 发布时间
  114. keyword_duration = row[16] # 视频时长
  115. keyword_name = row[17] # 负责人
  116. keyword_sort_list = keyword_sort.split(',')
  117. keyword_duration_list = keyword_duration.split(',')
  118. keyword_time_list = keyword_time.split(',')
  119. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  120. if ',' in channel_url:
  121. channel_url = channel_url.split(',')
  122. else:
  123. channel_url = [channel_url]
  124. for user in channel_url:
  125. for combo in combinations:
  126. number_dict = {
  127. "task_mark": task_mark,
  128. "channel_id": channel_id,
  129. "channel_url": user,
  130. "piaoquan_id": piaoquan_id,
  131. "number": number,
  132. "title": title,
  133. "video_share": video_share,
  134. "video_ending": video_ending,
  135. "crop_total": crop_tool,
  136. "gg_duration_total": gg_duration,
  137. "voice": voice,
  138. "first_category": first_category, # 一级品类
  139. "secondary_category": secondary_category, # 二级品类
  140. "combo": combo, # 搜索条件
  141. "keyword_name": keyword_name # 品类负责人
  142. }
  143. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  144. except:
  145. processed_list = cls.sort_keyword_data(processed_list)
  146. return processed_list
  147. processed_list = cls.sort_keyword_data(processed_list)
  148. return processed_list
  149. """
  150. 获取品类对应负责人任务明细
  151. """
  152. @classmethod
  153. def get_pl_task_data(cls, feishu_id, feishu_sheet):
  154. data = Feishu.get_values_batch( feishu_id, feishu_sheet )
  155. processed_list = []
  156. try:
  157. for row in data[1:]:
  158. channel_id = row[1]
  159. channel_url = str( row[2] )
  160. piaoquan_id = row[3]
  161. number = row[4]
  162. video_share = row[5]
  163. video_ending = row[6]
  164. voice = row[7]
  165. crop_tool = row[8]
  166. gg_duration = row[9]
  167. title = row[10]
  168. if channel_url == None or channel_url == "" or len( channel_url ) == 0:
  169. continue
  170. try:
  171. ls_number = int( row[11] )
  172. except:
  173. ls_number = None
  174. first_category = row[12]
  175. name = row[13]
  176. def count_items(item, separator):
  177. if item and item not in {'None', ''}:
  178. return len( item.split( separator ) )
  179. return 0
  180. video_id_total = count_items( str( channel_url ), ',' )
  181. title_total = count_items( str( title ), '/' )
  182. video_ending_total = count_items( str( video_ending ), ',' )
  183. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  184. gg_duration, title_total]
  185. filtered_values = [str( value ) for value in values if value is not None and value != "None"]
  186. task_mark = "_".join( map( str, filtered_values ) )
  187. if piaoquan_id and piaoquan_id not in {'None', ''}:
  188. if ',' in channel_url:
  189. channel_url = channel_url.split( ',' )
  190. else:
  191. channel_url = [channel_url]
  192. for user in channel_url:
  193. number_dict = {
  194. "task_mark": task_mark,
  195. "channel_id": channel_id,
  196. "channel_url": user,
  197. "piaoquan_id": piaoquan_id,
  198. "number": number,
  199. "title": title,
  200. "video_share": video_share,
  201. "video_ending": video_ending,
  202. "crop_total": crop_tool,
  203. "gg_duration_total": gg_duration,
  204. "voice": voice,
  205. "first_category": first_category, # 一级品类
  206. "keyword_name":name
  207. }
  208. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  209. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  210. if ls_number and ls_number not in {'None', ''}:
  211. if channel_id == "抖音":
  212. new_channel_id = "抖音历史"
  213. if channel_id == "快手":
  214. new_channel_id = "快手历史"
  215. if channel_id == "视频号":
  216. new_channel_id = "视频号历史"
  217. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total,
  218. crop_tool,
  219. gg_duration, title_total]
  220. filtered_values1 = [str( value ) for value in values1 if
  221. value is not None and value != "None"]
  222. task_mark1 = "_".join( map( str, filtered_values1 ) )
  223. number_dict = {
  224. "task_mark": task_mark1,
  225. "channel_id": new_channel_id,
  226. "channel_url": user,
  227. "piaoquan_id": piaoquan_id,
  228. "number": ls_number,
  229. "title": title,
  230. "video_share": video_share,
  231. "video_ending": video_ending,
  232. "crop_total": crop_tool,
  233. "gg_duration_total": gg_duration,
  234. "voice": voice,
  235. "first_category": first_category, # 一级品类
  236. "keyword_name": name
  237. }
  238. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  239. else:
  240. processed_list = cls.sort_keyword_data(processed_list)
  241. return processed_list
  242. processed_list = cls.sort_keyword_data(processed_list)
  243. return processed_list
  244. except:
  245. processed_list = cls.sort_keyword_data(processed_list)
  246. return processed_list
  247. """
  248. 获取对应负责人任务明细
  249. """
  250. @classmethod
  251. def get_task_data(cls, feishu_id, feishu_sheet):
  252. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  253. processed_list = []
  254. try:
  255. for row in data[1:]:
  256. channel_id = row[1]
  257. channel_url = str(row[2])
  258. piaoquan_id = row[3]
  259. number = row[4]
  260. video_share = row[5]
  261. video_ending = row[6]
  262. voice = row[7]
  263. crop_tool = row[8]
  264. gg_duration = row[9]
  265. title = row[10]
  266. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  267. continue
  268. try:
  269. ls_number = int(row[11])
  270. except:
  271. ls_number = None
  272. def count_items(item, separator):
  273. if item and item not in {'None', ''}:
  274. return len(item.split(separator))
  275. return 0
  276. video_id_total = count_items(str(channel_url), ',')
  277. title_total = count_items(str(title), '/')
  278. video_ending_total = count_items(str(video_ending), ',')
  279. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  280. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  281. task_mark = "_".join(map(str, filtered_values))
  282. if piaoquan_id and piaoquan_id not in {'None', ''}:
  283. if ',' in channel_url:
  284. channel_url = channel_url.split(',')
  285. else:
  286. channel_url = [channel_url]
  287. for user in channel_url:
  288. number_dict = {
  289. "task_mark": task_mark,
  290. "channel_id": channel_id,
  291. "channel_url": user,
  292. "piaoquan_id": piaoquan_id,
  293. "number": number,
  294. "title": title,
  295. "video_share": video_share,
  296. "video_ending": video_ending,
  297. "crop_total": crop_tool,
  298. "gg_duration_total": gg_duration,
  299. "voice": voice
  300. }
  301. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  302. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  303. if ls_number and ls_number not in {'None', ''}:
  304. if channel_id == "抖音":
  305. new_channel_id = "抖音历史"
  306. if channel_id == "快手":
  307. new_channel_id = "快手历史"
  308. if channel_id == "视频号":
  309. new_channel_id = "视频号历史"
  310. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  311. gg_duration, title_total]
  312. filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  313. task_mark1 = "_".join(map(str, filtered_values1))
  314. number_dict = {
  315. "task_mark": task_mark1,
  316. "channel_id": new_channel_id,
  317. "channel_url": user,
  318. "piaoquan_id": piaoquan_id,
  319. "number": ls_number,
  320. "title": title,
  321. "video_share": video_share,
  322. "video_ending": video_ending,
  323. "crop_total": crop_tool,
  324. "gg_duration_total": gg_duration,
  325. "voice": voice
  326. }
  327. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  328. else:
  329. return processed_list
  330. return processed_list
  331. except:
  332. return processed_list
  333. """
  334. 获取对应片尾+srt
  335. """
  336. @classmethod
  337. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  338. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  339. for row in data[1:]:
  340. pw_mark = row[0]
  341. pw_id = row[1]
  342. pw_srt = row[2]
  343. if pw_id != 'None' and pw_id != '' and pw_id != None:
  344. if pw_mark == video_ending:
  345. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  346. return number
  347. return ''
  348. """
  349. 获取对应固定字幕
  350. """
  351. @classmethod
  352. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  353. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  354. for row in data[1:]:
  355. pz_mark = row[0]
  356. pz_zm = row[1]
  357. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  358. if pz_mark == video_share_name:
  359. return pz_zm
  360. return ''
  361. """
  362. 获取 cookie 信息
  363. """
  364. @classmethod
  365. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  366. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  367. for row in data[1:]:
  368. channel_mask = row[0]
  369. cookie = row[1]
  370. if channel_mask == channel:
  371. return cookie