diff --git a/models/tts/maskgct/g2p/g2p/__init__.py b/models/tts/maskgct/g2p/g2p/__init__.py index ca96b67e..6a750c56 100644 --- a/models/tts/maskgct/g2p/g2p/__init__.py +++ b/models/tts/maskgct/g2p/g2p/__init__.py @@ -25,7 +25,7 @@ def __init__(self, vacab_path="./models/tts/maskgct/g2p/g2p/vocab.json"): self.text_tokenizers = {} self.int_text_tokenizers() - with open(vacab_path, "r") as f: + with open(vacab_path, "r", encoding="utf-8") as f: json_data = f.read() data = json.loads(json_data) self.vocab = data["vocab"] diff --git a/models/tts/maskgct/g2p/g2p_generation.py b/models/tts/maskgct/g2p/g2p_generation.py index ca3d9841..69f99d20 100644 --- a/models/tts/maskgct/g2p/g2p_generation.py +++ b/models/tts/maskgct/g2p/g2p_generation.py @@ -114,7 +114,7 @@ def chn_eng_g2p(text: str): text_tokenizer = PhonemeBpeTokenizer() -with open("./models/tts/maskgct/g2p/g2p/vocab.json", "r") as f: +with open("./models/tts/maskgct/g2p/g2p/vocab.json", "r", encoding="utf-8") as f: json_data = f.read() data = json.loads(json_data) vocab = data["vocab"] diff --git a/models/tts/maskgct/g2p/utils/g2p.py b/models/tts/maskgct/g2p/utils/g2p.py index f71e0c8d..f586ba57 100644 --- a/models/tts/maskgct/g2p/utils/g2p.py +++ b/models/tts/maskgct/g2p/utils/g2p.py @@ -60,7 +60,7 @@ "de": phonemizer_de, } -with open("./models/tts/maskgct/g2p/utils/mls_en.json", "r") as f: +with open("./models/tts/maskgct/g2p/utils/mls_en.json", "r", encoding="utf-8") as f: json_data = f.read() token = json.loads(json_data)