upLevel.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. """
  2. @author: luojunhui
  3. """
  4. import json
  5. from pandas import DataFrame
  6. from datetime import datetime
  7. from applications import longArticlesMySQL
  8. lam = longArticlesMySQL()
  9. class articleLevelUp(object):
  10. """
  11. 文章晋级
  12. """
  13. columns = [
  14. "位置",
  15. "粉丝量",
  16. "阅读量",
  17. "平均阅读量",
  18. "头条阅读量",
  19. "头条平均阅读量",
  20. "阅读均值倍数",
  21. "阅读率",
  22. "小程序打开率",
  23. "T+0裂变率",
  24. "标题",
  25. "链接"
  26. ]
  27. statMapThreeToEight = {
  28. "阅读均值倍数": {
  29. "mean": 1.1388723507368606,
  30. "max": 62.50000000000001,
  31. "min": 0.0,
  32. "median": 0.8890469416785206,
  33. "75%": 1.2617516081147946,
  34. "80%": 1.37797320398902,
  35. "90%": 1.8733429945338946,
  36. "95%": 2.6455874825730517,
  37. "99%": 6.252251764489181
  38. },
  39. "阅读率": {
  40. "mean": 0.0006051220910642054,
  41. "max": 0.06252537555826228,
  42. "min": 0.0,
  43. "median": 0.0002241206067691894,
  44. "75%": 0.0005117154674215644,
  45. "80%": 0.0006449975188817015,
  46. "90%": 0.001255232384471895,
  47. "95%": 0.002233845658277497,
  48. "99%": 0.00633843067255787
  49. },
  50. "小程序打开率": {
  51. "mean": 0.062085135696479415,
  52. "max": 1.0,
  53. "min": 0.0,
  54. "median": 0.045454545454545456,
  55. "75%": 0.08695652173913043,
  56. "80%": 0.1,
  57. "90%": 0.14285714285714285,
  58. "95%": 0.18518518518518517,
  59. "99%": 0.310463054187192
  60. },
  61. "T+0裂变率": {
  62. "mean": 0.35277482885383377,
  63. "max": 181.0,
  64. "min": 0.0,
  65. "median": 0.0,
  66. "75%": 0.0,
  67. "80%": 0.09090909090909091,
  68. "90%": 0.6666666666666666,
  69. "95%": 1.5,
  70. "99%": 6.0
  71. }
  72. }
  73. statMapTwoToOne = {
  74. "阅读均值倍数": {
  75. "mean": 1.0242728432910957,
  76. "max": 4.921632060507756,
  77. "min": 0.04236315118498048,
  78. "median": 0.9604958720021857,
  79. "75%": 1.237352622811623,
  80. "80%": 1.3131587863024974,
  81. "90%": 1.5778563945144477,
  82. "95%": 1.8312064951656155,
  83. "99%": 2.5125234834603165
  84. },
  85. "阅读率": {
  86. "mean": 0.0073535037464145655,
  87. "max": 0.05265662356955502,
  88. "min": 0.00020895172629276676,
  89. "median": 0.005941952332154309,
  90. "75%": 0.009324205525316574,
  91. "80%": 0.010420614811741105,
  92. "90%": 0.013728137204835086,
  93. "95%": 0.01704242661483454,
  94. "99%": 0.02622215995438508
  95. },
  96. "小程序打开率": {
  97. "mean": 0.14893695109764848,
  98. "max": 2.5,
  99. "min": 0.0,
  100. "median": 0.1360318513603185,
  101. "75%": 0.1875,
  102. "80%": 0.20230028849345147,
  103. "90%": 0.25449906489537877,
  104. "95%": 0.3051369784478383,
  105. "99%": 0.4016107123469446
  106. },
  107. "T+0裂变率": {
  108. "mean": 0.6465295965706923,
  109. "max": 12.804878048780488,
  110. "min": 0.0,
  111. "median": 0.48770491803278687,
  112. "75%": 0.8011363636363636,
  113. "80%": 0.9144722345551121,
  114. "90%": 1.317362236032163,
  115. "95%": 1.792137476827772,
  116. "99%": 3.277849462365585
  117. }
  118. }
  119. @classmethod
  120. def getBaseData(cls):
  121. """
  122. :return:
  123. """
  124. # today = datetime.today().strftime("%Y%m%d")
  125. sql = f"""
  126. SELECT
  127. position, fans, view_count, avg_view_count, first_view_count, first_avg_view_count, read_rate, read_fans_rate, first_read_rate, fission0_first_rate, title, link
  128. FROM
  129. datastat_sort_strategy;
  130. """
  131. response = lam.select(sql)
  132. df = DataFrame(response, columns=cls.columns)
  133. return df
  134. @classmethod
  135. def analysisDF(cls, indexList):
  136. """
  137. 分析 dataframe 中数据占比
  138. :return:
  139. """
  140. DF = cls.getBaseData()
  141. DF = DF[(DF["位置"].isin(indexList))]
  142. print(len(DF))
  143. avg_read_times = DF['阅读均值倍数'].sort_values(ascending=False)
  144. read_rate = DF['阅读率'].sort_values(ascending=False)
  145. mini_open_rate = DF['小程序打开率'].sort_values(ascending=False)
  146. t_plus_0_fission = DF['T+0裂变率'].sort_values(ascending=False)
  147. detail = {
  148. "阅读均值倍数": {
  149. "mean": avg_read_times.mean(),
  150. "max": avg_read_times.max(),
  151. "min": avg_read_times.min(),
  152. "median": avg_read_times.median(),
  153. "75%": avg_read_times.quantile(0.75),
  154. "80%": avg_read_times.quantile(0.8),
  155. "90%": avg_read_times.quantile(0.9),
  156. "95%": avg_read_times.quantile(0.95),
  157. "99%": avg_read_times.quantile(0.99)
  158. },
  159. "阅读率": {
  160. "mean": read_rate.mean(),
  161. "max": read_rate.max(),
  162. "min": read_rate.min(),
  163. "median": read_rate.median(),
  164. "75%": read_rate.quantile(0.75),
  165. "80%": read_rate.quantile(0.8),
  166. "90%": read_rate.quantile(0.9),
  167. "95%": read_rate.quantile(0.95),
  168. "99%": read_rate.quantile(0.99)
  169. },
  170. "小程序打开率": {
  171. "mean": mini_open_rate.mean(),
  172. "max": mini_open_rate.max(),
  173. "min": mini_open_rate.min(),
  174. "median": mini_open_rate.median(),
  175. "75%": mini_open_rate.quantile(0.75),
  176. "80%": mini_open_rate.quantile(0.8),
  177. "90%": mini_open_rate.quantile(0.9),
  178. "95%": mini_open_rate.quantile(0.95),
  179. "99%": mini_open_rate.quantile(0.99)
  180. },
  181. "T+0裂变率": {
  182. "mean": t_plus_0_fission.mean(),
  183. "max": t_plus_0_fission.max(),
  184. "min": t_plus_0_fission.min(),
  185. "median": t_plus_0_fission.median(),
  186. "75%": t_plus_0_fission.quantile(0.75),
  187. "80%": t_plus_0_fission.quantile(0.8),
  188. "90%": t_plus_0_fission.quantile(0.9),
  189. "95%": t_plus_0_fission.quantile(0.95),
  190. "99%": t_plus_0_fission.quantile(0.99)
  191. }
  192. }
  193. print(json.dumps(detail, ensure_ascii=False, indent=4))
  194. @classmethod
  195. def upLevel38To2(cls):
  196. """
  197. :return:
  198. """
  199. dataThreeToEight = cls.getBaseData()
  200. dataThreeToEight = dataThreeToEight[dataThreeToEight['位置'].isin([3, 4, 5, 6, 7, 8])]
  201. filter_data = dataThreeToEight[
  202. (dataThreeToEight['T+0裂变率'] > cls.statMapThreeToEight['T+0裂变率']['95%'])
  203. & (dataThreeToEight['阅读均值倍数'] > cls.statMapThreeToEight['阅读均值倍数']['95%'])
  204. ]
  205. return filter_data
  206. @classmethod
  207. def upLevel2To1(cls):
  208. """
  209. :return:
  210. """
  211. dataThreeToEight = cls.getBaseData()
  212. dataThreeToEight = dataThreeToEight[dataThreeToEight['位置'].isin([2])]
  213. filter_data = dataThreeToEight[
  214. (dataThreeToEight['T+0裂变率'] > cls.statMapThreeToEight['T+0裂变率']['90%'])
  215. & (dataThreeToEight['阅读均值倍数'] > cls.statMapThreeToEight['阅读均值倍数']['90%'])
  216. ]
  217. return filter_data
  218. U = articleLevelUp()
  219. f_d = U.upLevel2To1()
  220. for line in list(zip(f_d['标题'], f_d['链接'])):
  221. print(line[0])
  222. print(line[1])
  223. print("\n")