feishu_form.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. import itertools
  8. from collections import defaultdict
  9. from common.sql_help import sqlCollect
  10. sys.path.append(os.getcwd())
  11. from common.feishu_utils import Feishu
  12. class Material():
  13. """
  14. 获取汇总表所有负责人列表
  15. """
  16. @classmethod
  17. def feishu_list(cls):
  18. summary = Feishu.get_values_batch("summary", "bc154d")
  19. list = []
  20. for row in summary[1:]:
  21. mark = row[0]
  22. name = row[1]
  23. feishu_id = row[3]
  24. feishu_sheet = row[4]
  25. cookie_sheet = row[5]
  26. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  27. if mark:
  28. list.append(number)
  29. else:
  30. return list
  31. return list
  32. @classmethod
  33. def get_sph_user(cls):
  34. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  35. user_data_list = []
  36. try:
  37. for row in data[1:]:
  38. users = str(row[2])
  39. if users and users != 'None':
  40. if ',' in users:
  41. user_list = users.split(',')
  42. else:
  43. user_list = [users]
  44. for user in user_list:
  45. status = sqlCollect.sph_channel_user(user)
  46. if status:
  47. user_data_list.append(user)
  48. else:
  49. return user_data_list
  50. return user_data_list
  51. except:
  52. return user_data_list
  53. """
  54. list 重新排序
  55. """
  56. @classmethod
  57. def sort_keyword_data(cls, data):
  58. # 解析 JSON 数据
  59. data = [json.loads(item) for item in data]
  60. # 根据 keyword_name 进行分组
  61. groups = defaultdict(list)
  62. for item in data:
  63. groups[item['keyword_name']].append(item)
  64. # 获取所有唯一的 keyword_name
  65. priority_names = list(groups.keys())
  66. # 对每个分组内的数据按 first_category 进行随机打乱
  67. for name in priority_names:
  68. random.shuffle(groups[name]) # 打乱每个分组内的数据顺序
  69. # 轮流排序每个分组的数据,保持 keyword_name 的顺序
  70. result = []
  71. max_length = max(len(groups[name]) for name in priority_names)
  72. for i in range(max_length):
  73. for name in priority_names:
  74. if i < len(groups[name]):
  75. result.append(groups[name][i])
  76. # 将结果转回 JSON 字符串列表
  77. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  78. return sorted_list
  79. """
  80. 获取搜索任务
  81. """
  82. @classmethod
  83. def get_keyword_data(cls, feishu_id, feishu_sheet):
  84. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  85. processed_list = []
  86. try:
  87. for row in data[1:]:
  88. channel_id = row[1]
  89. channel_url = str(row[2])
  90. tags = row[3]
  91. piaoquan_id = row[4]
  92. number = row[5]
  93. video_share = row[6]
  94. video_ending = row[7]
  95. voice = row[8]
  96. crop_tool = row[9]
  97. gg_duration = row[10]
  98. title = row[11]
  99. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  100. continue
  101. first_category = row[13] # 一级品类
  102. secondary_category = row[14] # 二级品类
  103. def count_items(item, separator):
  104. if item and item not in {'None', ''}:
  105. return len(item.split(separator))
  106. return 0
  107. video_id_total = count_items(str(channel_url), ',')
  108. title_total = count_items(str(title), '/')
  109. video_ending_total = count_items(str(video_ending), ',')
  110. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total, first_category]
  111. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  112. task_mark = "_".join(map(str, filtered_values))
  113. keyword_sort = row[15] # 排序条件
  114. keyword_time = row[16] # 发布时间
  115. keyword_duration = row[17] # 视频时长
  116. keyword_name = row[18] # 负责人
  117. keyword_sort_list = keyword_sort.split(',')
  118. keyword_duration_list = keyword_duration.split(',')
  119. keyword_time_list = keyword_time.split(',')
  120. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  121. if ',' in channel_url:
  122. channel_url = channel_url.split(',')
  123. else:
  124. channel_url = [channel_url]
  125. for user in channel_url:
  126. for combo in combinations:
  127. number_dict = {
  128. "task_mark": task_mark,
  129. "channel_id": channel_id,
  130. "channel_url": user,
  131. "piaoquan_id": piaoquan_id,
  132. "number": number,
  133. "title": title,
  134. "video_share": video_share,
  135. "video_ending": video_ending,
  136. "crop_total": crop_tool,
  137. "gg_duration_total": gg_duration,
  138. "voice": voice,
  139. "first_category": first_category, # 一级品类
  140. "secondary_category": secondary_category, # 二级品类
  141. "combo": combo, # 搜索条件
  142. "keyword_name": keyword_name, # 品类负责人
  143. "tags": tags
  144. }
  145. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  146. except:
  147. processed_list = cls.sort_keyword_data(processed_list)
  148. return processed_list
  149. processed_list = cls.sort_keyword_data(processed_list)
  150. return processed_list
  151. """
  152. 获取品类对应负责人任务明细
  153. """
  154. @classmethod
  155. def get_pl_task_data(cls, feishu_id, feishu_sheet):
  156. data = Feishu.get_values_batch( feishu_id, feishu_sheet )
  157. processed_list = []
  158. try:
  159. for row in data[1:]:
  160. channel_id = row[1]
  161. channel_url = str( row[2] )
  162. tags = row[3]
  163. piaoquan_id = row[4]
  164. number = row[5]
  165. video_share = row[6]
  166. video_ending = row[7]
  167. voice = row[8]
  168. crop_tool = row[9]
  169. gg_duration = row[10]
  170. title = row[11]
  171. if channel_url == None or channel_url == "" or len( channel_url ) == 0:
  172. continue
  173. try:
  174. ls_number = int( row[12] )
  175. except:
  176. ls_number = None
  177. first_category = row[13]
  178. name = row[14]
  179. def count_items(item, separator):
  180. if item and item not in {'None', ''}:
  181. return len( item.split( separator ) )
  182. return 0
  183. video_id_total = count_items( str( channel_url ), ',' )
  184. title_total = count_items( str( title ), '/' )
  185. video_ending_total = count_items( str( video_ending ), ',' )
  186. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  187. gg_duration, title_total]
  188. filtered_values = [str( value ) for value in values if value is not None and value != "None"]
  189. task_mark = "_".join( map( str, filtered_values ) )
  190. if piaoquan_id and piaoquan_id not in {'None', ''}:
  191. if ',' in channel_url:
  192. channel_url = channel_url.split( ',' )
  193. else:
  194. channel_url = [channel_url]
  195. for user in channel_url:
  196. number_dict = {
  197. "task_mark": task_mark,
  198. "channel_id": channel_id,
  199. "channel_url": user,
  200. "piaoquan_id": piaoquan_id,
  201. "number": number,
  202. "title": title,
  203. "video_share": video_share,
  204. "video_ending": video_ending,
  205. "crop_total": crop_tool,
  206. "gg_duration_total": gg_duration,
  207. "voice": voice,
  208. "first_category": first_category, # 一级品类
  209. "keyword_name":name,
  210. "tags": tags
  211. }
  212. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  213. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  214. if ls_number and ls_number not in {'None', ''}:
  215. if channel_id == "抖音":
  216. new_channel_id = "抖音历史"
  217. if channel_id == "快手":
  218. new_channel_id = "快手历史"
  219. if channel_id == "视频号":
  220. new_channel_id = "视频号历史"
  221. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total,
  222. crop_tool,
  223. gg_duration, title_total]
  224. filtered_values1 = [str( value ) for value in values1 if
  225. value is not None and value != "None"]
  226. task_mark1 = "_".join( map( str, filtered_values1 ) )
  227. number_dict = {
  228. "task_mark": task_mark1,
  229. "channel_id": new_channel_id,
  230. "channel_url": user,
  231. "piaoquan_id": piaoquan_id,
  232. "number": ls_number,
  233. "title": title,
  234. "video_share": video_share,
  235. "video_ending": video_ending,
  236. "crop_total": crop_tool,
  237. "gg_duration_total": gg_duration,
  238. "voice": voice,
  239. "first_category": first_category, # 一级品类
  240. "keyword_name": name,
  241. "tags": tags
  242. }
  243. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  244. else:
  245. processed_list = cls.sort_keyword_data(processed_list)
  246. return processed_list
  247. processed_list = cls.sort_keyword_data(processed_list)
  248. return processed_list
  249. except:
  250. processed_list = cls.sort_keyword_data(processed_list)
  251. return processed_list
  252. """
  253. 获取对应负责人任务明细
  254. """
  255. @classmethod
  256. def get_task_data(cls, feishu_id, feishu_sheet):
  257. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  258. processed_list = []
  259. try:
  260. for row in data[1:]:
  261. channel_id = row[1]
  262. channel_url = str(row[2])
  263. tags = row[3]
  264. piaoquan_id = row[4]
  265. number = row[5]
  266. video_share = row[6]
  267. video_ending = row[7]
  268. voice = row[8]
  269. crop_tool = row[9]
  270. gg_duration = row[10]
  271. title = row[11]
  272. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  273. continue
  274. try:
  275. ls_number = int(row[12])
  276. except:
  277. ls_number = None
  278. def count_items(item, separator):
  279. if item and item not in {'None', ''}:
  280. return len(item.split(separator))
  281. return 0
  282. video_id_total = count_items(str(channel_url), ',')
  283. title_total = count_items(str(title), '/')
  284. video_ending_total = count_items(str(video_ending), ',')
  285. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  286. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  287. task_mark = "_".join(map(str, filtered_values))
  288. if piaoquan_id and piaoquan_id not in {'None', ''}:
  289. if ',' in channel_url:
  290. channel_url = channel_url.split(',')
  291. else:
  292. channel_url = [channel_url]
  293. for user in channel_url:
  294. number_dict = {
  295. "task_mark": task_mark,
  296. "channel_id": channel_id,
  297. "channel_url": user,
  298. "piaoquan_id": piaoquan_id,
  299. "number": number,
  300. "title": title,
  301. "video_share": video_share,
  302. "video_ending": video_ending,
  303. "crop_total": crop_tool,
  304. "gg_duration_total": gg_duration,
  305. "voice": voice,
  306. "tags":tags
  307. }
  308. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  309. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  310. if ls_number and ls_number not in {'None', ''}:
  311. if channel_id == "抖音":
  312. new_channel_id = "抖音历史"
  313. if channel_id == "快手":
  314. new_channel_id = "快手历史"
  315. if channel_id == "视频号":
  316. new_channel_id = "视频号历史"
  317. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  318. gg_duration, title_total]
  319. filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  320. task_mark1 = "_".join(map(str, filtered_values1))
  321. number_dict = {
  322. "task_mark": task_mark1,
  323. "channel_id": new_channel_id,
  324. "channel_url": user,
  325. "piaoquan_id": piaoquan_id,
  326. "number": ls_number,
  327. "title": title,
  328. "video_share": video_share,
  329. "video_ending": video_ending,
  330. "crop_total": crop_tool,
  331. "gg_duration_total": gg_duration,
  332. "voice": voice,
  333. "tags": tags
  334. }
  335. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  336. else:
  337. return processed_list
  338. return processed_list
  339. except:
  340. return processed_list
  341. """
  342. 获取对应片尾+srt
  343. """
  344. @classmethod
  345. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  346. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  347. for row in data[1:]:
  348. pw_mark = row[0]
  349. pw_id = row[1]
  350. pw_srt = row[2]
  351. if pw_id != 'None' and pw_id != '' and pw_id != None:
  352. if pw_mark == video_ending:
  353. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  354. return number
  355. return ''
  356. """
  357. 获取对应固定字幕
  358. """
  359. @classmethod
  360. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  361. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  362. for row in data[1:]:
  363. pz_mark = row[0]
  364. pz_zm = row[1]
  365. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  366. if pz_mark == video_share_name:
  367. return pz_zm
  368. return ''
  369. """
  370. 获取 cookie 信息
  371. """
  372. @classmethod
  373. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  374. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  375. for row in data[1:]:
  376. channel_mask = row[0]
  377. cookie = row[1]
  378. if channel_mask == channel:
  379. return cookie