feishu_form.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. import itertools
  8. from collections import defaultdict
  9. from common.sql_help import sqlCollect
  10. sys.path.append(os.getcwd())
  11. from common.feishu_utils import Feishu
  12. class Material():
  13. """
  14. 获取汇总表所有负责人列表
  15. """
  16. @classmethod
  17. def feishu_list(cls):
  18. summary = Feishu.get_values_batch("summary", "bc154d")
  19. list = []
  20. for row in summary[1:]:
  21. mark = row[0]
  22. name = row[1]
  23. feishu_id = row[3]
  24. feishu_sheet = row[4]
  25. cookie_sheet = row[5]
  26. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  27. if mark:
  28. list.append(number)
  29. else:
  30. return list
  31. return list
  32. @classmethod
  33. def get_sph_user(cls):
  34. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  35. user_data_list = []
  36. try:
  37. for row in data[1:]:
  38. users = str(row[2])
  39. if users and users != 'None':
  40. if ',' in users:
  41. user_list = users.split(',')
  42. else:
  43. user_list = [users]
  44. for user in user_list:
  45. status = sqlCollect.sph_channel_user(user)
  46. if status:
  47. user_data_list.append(user)
  48. else:
  49. return user_data_list
  50. return user_data_list
  51. except:
  52. return user_data_list
  53. """
  54. list 重新排序
  55. """
  56. @classmethod
  57. def sort_keyword_data(cls, data):
  58. # 解析 JSON 数据
  59. data = [json.loads(item) for item in data]
  60. # 根据 keyword_name 进行分组
  61. groups = defaultdict(list)
  62. for item in data:
  63. groups[item['keyword_name']].append(item)
  64. # 获取所有唯一的 keyword_name
  65. priority_names = list(groups.keys())
  66. # 对每个分组内的数据按 first_category 进行随机打乱
  67. for name in priority_names:
  68. random.shuffle(groups[name]) # 打乱每个分组内的数据顺序
  69. # 轮流排序每个分组的数据,保持 keyword_name 的顺序
  70. result = []
  71. max_length = max(len(groups[name]) for name in priority_names)
  72. for i in range(max_length):
  73. for name in priority_names:
  74. if i < len(groups[name]):
  75. result.append(groups[name][i])
  76. # 将结果转回 JSON 字符串列表
  77. sorted_list = [json.dumps(item, ensure_ascii=False) for item in result]
  78. return sorted_list
  79. """
  80. 获取搜索任务
  81. """
  82. @classmethod
  83. def get_keyword_data(cls, feishu_id, feishu_sheet):
  84. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  85. processed_list = []
  86. try:
  87. for row in data[1:]:
  88. channel_id = row[1]
  89. channel_url = str(row[2])
  90. tags = row[3]
  91. piaoquan_id = row[4]
  92. number = row[5]
  93. limit_number = row[6]
  94. video_share = row[7]
  95. video_ending = row[8]
  96. voice = row[9]
  97. crop_tool = row[10]
  98. gg_duration = row[11]
  99. title = row[12]
  100. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  101. continue
  102. first_category = row[14] # 一级品类
  103. secondary_category = row[15] # 二级品类
  104. def count_items(item, separator):
  105. if item and item not in {'None', ''}:
  106. return len(item.split(separator))
  107. return 0
  108. video_id_total = count_items(str(channel_url), ',')
  109. title_total = count_items(str(title), '/')
  110. video_ending_total = count_items(str(video_ending), ',')
  111. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total, first_category]
  112. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  113. task_mark = "_".join(map(str, filtered_values))
  114. keyword_sort = row[16] # 排序条件
  115. keyword_time = row[17] # 发布时间
  116. keyword_duration = row[18] # 视频时长
  117. keyword_name = row[19] # 负责人
  118. keyword_sort_list = keyword_sort.split(',')
  119. keyword_duration_list = keyword_duration.split(',')
  120. keyword_time_list = keyword_time.split(',')
  121. combinations = list(itertools.product(keyword_sort_list, keyword_time_list, keyword_duration_list))
  122. if ',' in channel_url:
  123. channel_url = channel_url.split(',')
  124. else:
  125. channel_url = [channel_url]
  126. for user in channel_url:
  127. for combo in combinations:
  128. number_dict = {
  129. "task_mark": task_mark,
  130. "channel_id": channel_id,
  131. "channel_url": user,
  132. "piaoquan_id": piaoquan_id,
  133. "number": number,
  134. "title": title,
  135. "video_share": video_share,
  136. "video_ending": video_ending,
  137. "crop_total": crop_tool,
  138. "gg_duration_total": gg_duration,
  139. "voice": voice,
  140. "first_category": first_category, # 一级品类
  141. "secondary_category": secondary_category, # 二级品类
  142. "combo": combo, # 搜索条件
  143. "keyword_name": keyword_name, # 品类负责人
  144. "tags": tags,
  145. "limit_number":limit_number
  146. }
  147. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  148. except:
  149. processed_list = cls.sort_keyword_data(processed_list)
  150. return processed_list
  151. processed_list = cls.sort_keyword_data(processed_list)
  152. return processed_list
  153. """
  154. 获取品类对应负责人任务明细
  155. """
  156. @classmethod
  157. def get_pl_task_data(cls, feishu_id, feishu_sheet):
  158. data = Feishu.get_values_batch( feishu_id, feishu_sheet )
  159. processed_list = []
  160. try:
  161. for row in data[1:]:
  162. channel_id = row[1]
  163. channel_url = str( row[2] )
  164. tags = row[3]
  165. piaoquan_id = row[4]
  166. number = row[5]
  167. limit_number = row[6]
  168. video_share = row[7]
  169. video_ending = row[8]
  170. voice = row[9]
  171. crop_tool = row[10]
  172. gg_duration = row[11]
  173. title = row[12]
  174. if channel_url == None or channel_url == "" or len( channel_url ) == 0:
  175. continue
  176. try:
  177. ls_number = int( row[13] )
  178. except:
  179. ls_number = None
  180. first_category = row[14]
  181. name = row[15]
  182. def count_items(item, separator):
  183. if item and item not in {'None', ''}:
  184. return len( item.split( separator ) )
  185. return 0
  186. video_id_total = count_items( str( channel_url ), ',' )
  187. title_total = count_items( str( title ), '/' )
  188. video_ending_total = count_items( str( video_ending ), ',' )
  189. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  190. gg_duration, title_total]
  191. filtered_values = [str( value ) for value in values if value is not None and value != "None"]
  192. task_mark = "_".join( map( str, filtered_values ) )
  193. if piaoquan_id and piaoquan_id not in {'None', ''}:
  194. if ',' in channel_url:
  195. channel_url = channel_url.split( ',' )
  196. else:
  197. channel_url = [channel_url]
  198. for user in channel_url:
  199. number_dict = {
  200. "task_mark": task_mark,
  201. "channel_id": channel_id,
  202. "channel_url": user,
  203. "piaoquan_id": piaoquan_id,
  204. "number": number,
  205. "title": title,
  206. "video_share": video_share,
  207. "video_ending": video_ending,
  208. "crop_total": crop_tool,
  209. "gg_duration_total": gg_duration,
  210. "voice": voice,
  211. "first_category": first_category, # 一级品类
  212. "keyword_name":name,
  213. "tags": tags,
  214. "limit_number":limit_number
  215. }
  216. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  217. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  218. if ls_number and ls_number not in {'None', ''}:
  219. if channel_id == "抖音":
  220. new_channel_id = "抖音历史"
  221. if channel_id == "快手":
  222. new_channel_id = "快手历史"
  223. if channel_id == "视频号":
  224. new_channel_id = "视频号历史"
  225. # values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total,
  226. # crop_tool,
  227. # gg_duration, title_total]
  228. # filtered_values1 = [str( value ) for value in values1 if
  229. # value is not None and value != "None"]
  230. # task_mark1 = "_".join( map( str, filtered_values1 ) )
  231. number_dict = {
  232. "task_mark": task_mark,
  233. "channel_id": new_channel_id,
  234. "channel_url": user,
  235. "piaoquan_id": piaoquan_id,
  236. "number": ls_number,
  237. "title": title,
  238. "video_share": video_share,
  239. "video_ending": video_ending,
  240. "crop_total": crop_tool,
  241. "gg_duration_total": gg_duration,
  242. "voice": voice,
  243. "first_category": first_category, # 一级品类
  244. "keyword_name": name,
  245. "tags": tags,
  246. "limit_number":limit_number
  247. }
  248. processed_list.append( json.dumps( number_dict, ensure_ascii=False ) )
  249. else:
  250. processed_list = cls.sort_keyword_data(processed_list)
  251. return processed_list
  252. processed_list = cls.sort_keyword_data(processed_list)
  253. return processed_list
  254. except:
  255. processed_list = cls.sort_keyword_data(processed_list)
  256. return processed_list
  257. """
  258. 获取对应负责人任务明细
  259. """
  260. @classmethod
  261. def get_task_data(cls, feishu_id, feishu_sheet):
  262. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  263. processed_list = []
  264. try:
  265. for row in data[1:]:
  266. channel_id = row[1]
  267. channel_url = str(row[2])
  268. tags = row[3]
  269. piaoquan_id = row[4]
  270. number = row[5]
  271. limit_number = row[6]
  272. video_share = row[7]
  273. video_ending = row[8]
  274. voice = row[9]
  275. crop_tool = row[10]
  276. gg_duration = row[11]
  277. title = row[12]
  278. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  279. continue
  280. try:
  281. ls_number = int(row[13])
  282. except:
  283. ls_number = None
  284. def count_items(item, separator):
  285. if item and item not in {'None', ''}:
  286. return len(item.split(separator))
  287. return 0
  288. video_id_total = count_items(str(channel_url), ',')
  289. title_total = count_items(str(title), '/')
  290. video_ending_total = count_items(str(video_ending), ',')
  291. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  292. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  293. task_mark = "_".join(map(str, filtered_values))
  294. if piaoquan_id and piaoquan_id not in {'None', ''}:
  295. if ',' in channel_url:
  296. channel_url = channel_url.split(',')
  297. else:
  298. channel_url = [channel_url]
  299. for user in channel_url:
  300. number_dict = {
  301. "task_mark": task_mark,
  302. "channel_id": channel_id,
  303. "channel_url": user,
  304. "piaoquan_id": piaoquan_id,
  305. "number": number,
  306. "title": title,
  307. "video_share": video_share,
  308. "video_ending": video_ending,
  309. "crop_total": crop_tool,
  310. "gg_duration_total": gg_duration,
  311. "voice": voice,
  312. "tags":tags,
  313. "limit_number":limit_number
  314. }
  315. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  316. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  317. if ls_number and ls_number not in {'None', ''}:
  318. if channel_id == "抖音":
  319. new_channel_id = "抖音历史"
  320. if channel_id == "快手":
  321. new_channel_id = "快手历史"
  322. if channel_id == "视频号":
  323. new_channel_id = "视频号历史"
  324. # values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  325. # gg_duration, title_total]
  326. # filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  327. # task_mark1 = "_".join(map(str, filtered_values1))
  328. number_dict = {
  329. "task_mark": task_mark,
  330. "channel_id": new_channel_id,
  331. "channel_url": user,
  332. "piaoquan_id": piaoquan_id,
  333. "number": ls_number,
  334. "title": title,
  335. "video_share": video_share,
  336. "video_ending": video_ending,
  337. "crop_total": crop_tool,
  338. "gg_duration_total": gg_duration,
  339. "voice": voice,
  340. "tags": tags,
  341. "limit_number":limit_number
  342. }
  343. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  344. else:
  345. return processed_list
  346. return processed_list
  347. except:
  348. return processed_list
  349. """
  350. 获取对应片尾+srt
  351. """
  352. @classmethod
  353. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  354. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  355. for row in data[1:]:
  356. pw_mark = row[0]
  357. pw_id = row[1]
  358. pw_srt = row[2]
  359. if pw_id != 'None' and pw_id != '' and pw_id != None:
  360. if pw_mark == video_ending:
  361. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  362. return number
  363. return ''
  364. """
  365. 获取对应固定字幕
  366. """
  367. @classmethod
  368. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  369. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  370. for row in data[1:]:
  371. pz_mark = row[0]
  372. pz_zm = row[1]
  373. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  374. if pz_mark == video_share_name:
  375. return pz_zm
  376. return ''
  377. """
  378. 获取 cookie 信息
  379. """
  380. @classmethod
  381. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  382. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  383. for row in data[1:]:
  384. channel_mask = row[0]
  385. cookie = row[1]
  386. if channel_mask == channel:
  387. return cookie