chinese.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. import os
  2. import re
  3. import jieba.posseg as psg
  4. from pypinyin import Style, lazy_pinyin
  5. from tn.chinese.normalizer import Normalizer
  6. from fish_speech.text.symbols import punctuation
  7. from fish_speech.text.tone_sandhi import ToneSandhi
  8. current_file_path = os.path.dirname(__file__)
  9. OPENCPOP_DICT_PATH = os.path.join(current_file_path, "opencpop-strict.txt")
  10. pinyin_to_symbol_map = {
  11. line.split("\t")[0]: line.strip().split("\t")[1]
  12. for line in open(OPENCPOP_DICT_PATH).readlines()
  13. }
  14. normalizer = Normalizer()
  15. tone_modifier = ToneSandhi()
  16. def replace_punctuation(text):
  17. text = text.replace("嗯", "恩").replace("呣", "母")
  18. replaced_text = re.sub(r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", text)
  19. return replaced_text
  20. def g2p(text):
  21. text = text_normalize(text)
  22. text = replace_punctuation(text)
  23. pattern = r"(?<=[{0}])\s*".format("".join(punctuation))
  24. sentences = [i for i in re.split(pattern, text) if i.strip() != ""]
  25. phones = _g2p(sentences)
  26. return phones
  27. def _get_initials_finals(word):
  28. initials = []
  29. finals = []
  30. orig_initials = lazy_pinyin(word, neutral_tone_with_five=True, style=Style.INITIALS)
  31. orig_finals = lazy_pinyin(
  32. word, neutral_tone_with_five=True, style=Style.FINALS_TONE3
  33. )
  34. for c, v in zip(orig_initials, orig_finals):
  35. initials.append(c)
  36. finals.append(v)
  37. return initials, finals
  38. def _g2p(segments):
  39. phones_list = []
  40. for seg in segments:
  41. pinyins = []
  42. # Replace all English words in the sentence
  43. seg = re.sub("[a-zA-Z]+", "", seg)
  44. seg_cut = psg.lcut(seg)
  45. initials = []
  46. finals = []
  47. seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
  48. for word, pos in seg_cut:
  49. if pos == "eng":
  50. continue
  51. sub_initials, sub_finals = _get_initials_finals(word)
  52. sub_finals = tone_modifier.modified_tone(word, pos, sub_finals)
  53. initials.append(sub_initials)
  54. finals.append(sub_finals)
  55. # assert len(sub_initials) == len(sub_finals) == len(word)
  56. initials = sum(initials, [])
  57. finals = sum(finals, [])
  58. #
  59. for c, v in zip(initials, finals):
  60. raw_pinyin = c + v
  61. # NOTE: post process for pypinyin outputs
  62. # we discriminate i, ii and iii
  63. if c == v:
  64. assert c in punctuation
  65. phone = [c]
  66. else:
  67. v_without_tone = v[:-1]
  68. tone = v[-1]
  69. pinyin = c + v_without_tone
  70. assert tone in "12345"
  71. if c:
  72. # 多音节
  73. v_rep_map = {
  74. "uei": "ui",
  75. "iou": "iu",
  76. "uen": "un",
  77. }
  78. if v_without_tone in v_rep_map.keys():
  79. pinyin = c + v_rep_map[v_without_tone]
  80. else:
  81. # 单音节
  82. pinyin_rep_map = {
  83. "ing": "ying",
  84. "i": "yi",
  85. "in": "yin",
  86. "u": "wu",
  87. }
  88. if pinyin in pinyin_rep_map.keys():
  89. pinyin = pinyin_rep_map[pinyin]
  90. else:
  91. single_rep_map = {
  92. "v": "yu",
  93. "e": "e",
  94. "i": "y",
  95. "u": "w",
  96. }
  97. if pinyin[0] in single_rep_map.keys():
  98. pinyin = single_rep_map[pinyin[0]] + pinyin[1:]
  99. assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
  100. new_c, new_v = pinyin_to_symbol_map[pinyin].split(" ")
  101. new_v = new_v + tone
  102. phone = [new_c, new_v]
  103. phones_list += phone
  104. return phones_list
  105. def text_normalize(text):
  106. return normalizer.normalize(text)
  107. if __name__ == "__main__":
  108. text = "啊——但是《原神》是由,米哈\游自主,研发的一款全.新开放世界.冒险游戏"
  109. text = "呣呣呣~就是…大人的鼹鼠党吧?"
  110. # text = "你好"
  111. text = text_normalize(text)
  112. print(g2p(text))