feishu_form.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. # -*- coding: utf-8 -*-
  2. import json
  3. import os
  4. import random
  5. import sys
  6. import datetime
  7. from common.sql_help import sqlCollect
  8. sys.path.append(os.getcwd())
  9. from common.feishu_utils import Feishu
  10. class Material():
  11. """
  12. 获取汇总表所有负责人列表
  13. """
  14. @classmethod
  15. def feishu_list(cls):
  16. summary = Feishu.get_values_batch("summary", "bc154d")
  17. list = []
  18. for row in summary[1:]:
  19. mark = row[0]
  20. name = row[1]
  21. feishu_id = row[3]
  22. feishu_sheet = row[4]
  23. cookie_sheet = row[5]
  24. number = {"mark": mark, "name": name, "feishu_id": feishu_id, "feishu_sheet": feishu_sheet, "cookie_sheet": cookie_sheet}
  25. if mark:
  26. list.append(number)
  27. else:
  28. return list
  29. return list
  30. @classmethod
  31. def get_sph_user(cls):
  32. data = Feishu.get_values_batch("GPbhsb5vchAN3qtzot6cu1f0n1c", "cc7ef0")
  33. user_data_list = []
  34. try:
  35. for row in data[1:]:
  36. users = str(row[2])
  37. if users and users != 'None':
  38. if ',' in users:
  39. user_list = users.split(',')
  40. else:
  41. user_list = [users]
  42. for user in user_list:
  43. status = sqlCollect.sph_channel_user(user)
  44. if status:
  45. user_data_list.append(user)
  46. else:
  47. return user_data_list
  48. return user_data_list
  49. except:
  50. return user_data_list
  51. """
  52. 获取对应负责人任务明细
  53. """
  54. @classmethod
  55. def get_task_data(cls, feishu_id, feishu_sheet):
  56. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  57. processed_list = []
  58. try:
  59. for row in data[1:]:
  60. channel_id = row[1]
  61. channel_url = str(row[2])
  62. piaoquan_id = row[3]
  63. number = row[4]
  64. video_share = row[5]
  65. video_ending = row[6]
  66. crop_tool = row[7]
  67. gg_duration = row[8]
  68. title = row[9]
  69. if channel_url == None or channel_url == "" or len(channel_url) == 0:
  70. continue
  71. try:
  72. ls_number = int(row[10])
  73. except:
  74. ls_number = None
  75. def count_items(item, separator):
  76. if item and item not in {'None', ''}:
  77. return len(item.split(separator))
  78. return 0
  79. video_id_total = count_items(str(channel_url), ',')
  80. title_total = count_items(str(title), '/')
  81. video_ending_total = count_items(str(video_ending), ',')
  82. values = [channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool, gg_duration, title_total]
  83. filtered_values = [str(value) for value in values if value is not None and value != "None"]
  84. task_mark = "_".join(map(str, filtered_values))
  85. if piaoquan_id and piaoquan_id not in {'None', ''}:
  86. if ',' in channel_url:
  87. channel_url = channel_url.split(',')
  88. else:
  89. channel_url = [channel_url]
  90. for user in channel_url:
  91. number_dict = {
  92. "task_mark": task_mark,
  93. "channel_id": channel_id,
  94. "channel_url": user,
  95. "piaoquan_id": piaoquan_id,
  96. "number": number,
  97. "title": title,
  98. "video_share": video_share,
  99. "video_ending": video_ending,
  100. "crop_total": crop_tool,
  101. "gg_duration_total": gg_duration,
  102. }
  103. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  104. if channel_id == "抖音" or channel_id == "快手" or channel_id == "视频号":
  105. if ls_number and ls_number not in {'None', ''}:
  106. if channel_id == "抖音":
  107. new_channel_id = "抖音历史"
  108. if channel_id == "快手":
  109. new_channel_id = "快手历史"
  110. if channel_id == "视频号":
  111. new_channel_id = "视频号历史"
  112. values1 = [new_channel_id, video_id_total, piaoquan_id, video_share, video_ending_total, crop_tool,
  113. gg_duration, title_total]
  114. filtered_values1 = [str(value) for value in values1 if value is not None and value != "None"]
  115. task_mark1 = "_".join(map(str, filtered_values1))
  116. number_dict = {
  117. "task_mark": task_mark1,
  118. "channel_id": new_channel_id,
  119. "channel_url": user,
  120. "piaoquan_id": piaoquan_id,
  121. "number": ls_number,
  122. "title": title,
  123. "video_share": video_share,
  124. "video_ending": video_ending,
  125. "crop_total": crop_tool,
  126. "gg_duration_total": gg_duration,
  127. }
  128. processed_list.append(json.dumps(number_dict, ensure_ascii=False))
  129. else:
  130. return processed_list
  131. return processed_list
  132. except:
  133. return processed_list
  134. """
  135. 获取对应片尾+srt
  136. """
  137. @classmethod
  138. def get_pwsrt_data(cls, feishu_id, feishu_sheet, video_ending):
  139. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  140. for row in data[1:]:
  141. pw_mark = row[0]
  142. pw_id = row[1]
  143. pw_srt = row[2]
  144. if pw_id != 'None' and pw_id != '' and pw_id != None:
  145. if pw_mark == video_ending:
  146. number = {"pw_id": pw_id, "pw_srt": pw_srt}
  147. return number
  148. return ''
  149. """
  150. 获取对应固定字幕
  151. """
  152. @classmethod
  153. def get_pzsrt_data(cls, feishu_id, feishu_sheet, video_share_name):
  154. data = Feishu.get_values_batch(feishu_id, feishu_sheet)
  155. for row in data[1:]:
  156. pz_mark = row[0]
  157. pz_zm = row[1]
  158. if pz_zm != 'None' and pz_zm != '' and pz_zm != None:
  159. if pz_mark == video_share_name:
  160. return pz_zm
  161. return ''
  162. """
  163. 获取 cookie 信息
  164. """
  165. @classmethod
  166. def get_cookie_data(cls, feishu_id, cookie_sheet, channel):
  167. data = Feishu.get_values_batch(feishu_id, cookie_sheet)
  168. for row in data[1:]:
  169. channel_mask = row[0]
  170. cookie = row[1]
  171. if channel_mask == channel:
  172. return cookie