|
@@ -45,55 +45,60 @@ public class BFRecordScreenViewModel: NSObject {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /// 分段识别录音
|
|
|
- /// - Parameters:
|
|
|
- /// - sectionIndex: 段落id
|
|
|
- /// - locationPaths: 分段录音
|
|
|
- /// - completeHander: <#completeHander description#>
|
|
|
- /// - Returns: description
|
|
|
- public class func batchUploadRecordVoiceMatarialData(beginTime _: CMTime, locationPath: String, completeHander _: @escaping (_ subTitleList: List<PQEditVisionTrackMaterialsModel>?, _ msg: String?) -> Void) {
|
|
|
- BFLog(message: "录音文件开始上传--\(locationPath)")
|
|
|
+ // 取发音人列表数据
|
|
|
+ class func getAllVoiceCates(completeHander: @escaping (_ voices: [String: Any]) -> Void) {
|
|
|
+ BFNetRequestAdaptor.getRequestData(url: PQENVUtil.shared.clipapiapi + listAllCateVoicesUrl, parames: nil, commonParams: commonParams()) { response, _, _, _ in
|
|
|
|
|
|
- if !FileManager.default.fileExists(atPath: documensDirectory + locationPath) {
|
|
|
- BFLog(message: "上传录音-录音文件不存在")
|
|
|
- } else {
|
|
|
- let assert = AVURLAsset(url: URL(fileURLWithPath: documensDirectory + locationPath), options: nil)
|
|
|
- let voiceMaterials: PQEditVisionTrackMaterialsModel = PQEditVisionTrackMaterialsModel()
|
|
|
- voiceMaterials.locationPath = locationPath
|
|
|
- voiceMaterials.type = "voice"
|
|
|
- voiceMaterials.duration = assert.duration.seconds * 1_000_000
|
|
|
- BFLog(message: "上传录音-开始上传录音")
|
|
|
-// PQVideoEditViewModel.uploadMatarialData(isBatchUpload: false, materialData: voiceMaterials) { _, _, _, _, _, _, matarialInfo, _ in
|
|
|
-// let materialType: String = "\(matarialInfo?["materialType"] ?? "")"
|
|
|
-// let localPath: String = "\(matarialInfo?["localPath"] ?? "")"
|
|
|
-//
|
|
|
-// BFLog(message: "上传录音-录音上传返回--\(String(describing: matarialInfo))")
|
|
|
-// if matarialInfo != nil, matarialInfo?.keys.contains("localPath") ?? false, materialType == StickerType.VOICE.rawValue && localPath.contains("_noise_") {
|
|
|
-// BFLog(message: "上传录音-录音上传成功开始转化字幕")
|
|
|
-// let materialId: String = "\(matarialInfo?["materialId"] ?? "")"
|
|
|
-// let duration: Float64 = Float64("\(matarialInfo?["duration"] ?? "")") ?? 0
|
|
|
-// PQVideoEditViewModel.transferAudioMaterialToTextData(Int64(materialId) ?? 0, dutation: duration) { _, _, subTitleList, _ in
|
|
|
-// BFLog(message: "上传录音-字幕转化完成:\(subTitleList.count)")
|
|
|
-//
|
|
|
-// if subTitleList.count > 0 {
|
|
|
-//
|
|
|
-// }
|
|
|
-//
|
|
|
-// }
|
|
|
-// }
|
|
|
-// }
|
|
|
- }
|
|
|
+ var voicesData: [PQVoiceModel] = Array()
|
|
|
+ var voccesDic: [String: Any] = Dictionary()
|
|
|
+ if response != nil, !(response is NSNull) {
|
|
|
+ for voices in response as! [[String: Any]] {
|
|
|
+
|
|
|
+ let cateName = voices["cateName"] as? String ?? ""
|
|
|
+ let cateId = voices["cateId"] as? Int ?? 0
|
|
|
+ BFLog(message: "cateName is \(cateName) cateId is \(cateId)")
|
|
|
+ for voice in voices["voiceDatas"] as! [[String: Any]] {
|
|
|
+ if voice.keys.contains("channel"), "\(voice["channel"] ?? "")" == "aliyun", voice.keys.contains("appEnable"), "\(voice["appEnable"] ?? "")" == "1" {
|
|
|
+ let voiceModel = PQVoiceModel()
|
|
|
+ voiceModel.voice = voice["voice"] as? String ?? ""
|
|
|
+ voiceModel.name = voice["name"] as? String ?? ""
|
|
|
+ voiceModel.cateId = voice["cateId"] as? Int ?? 0
|
|
|
+ voiceModel.avatarUrl = voice["avatarUrl"] as? String ?? ""
|
|
|
+ voiceModel.gender = voice["gender"] as? Int ?? 0
|
|
|
+ voiceModel.channel = voice["channel"] as? String ?? ""
|
|
|
+ voiceModel.qualityFlag = voice["qualityFlag"] as? Int ?? 0
|
|
|
+ let azureStyleConfigStr: String = voice["azureStyleConfig"] as? String ?? ""
|
|
|
+ let congfigArray = jsonStringToArray(azureStyleConfigStr)
|
|
|
+ BFLog(message: " voiceModel.name is \(voiceModel.name)")
|
|
|
+ if congfigArray?.count ?? 0 > 0 {
|
|
|
+ BFLog(message: "\(voiceModel.name) 有语气设置 azureStyleConfig is\(String(describing: voiceModel.azureStyleConfig))")
|
|
|
+ for dic in congfigArray! {
|
|
|
+ let azureStyleModel = PQAzureStyleModel()
|
|
|
+ azureStyleModel.name = dic["name"] ?? ""
|
|
|
+ azureStyleModel.style = dic["style"] ?? ""
|
|
|
+ azureStyleModel.isSelected = false
|
|
|
+ voiceModel.azureStyleConfig.append(azureStyleModel)
|
|
|
+
|
|
|
+ let statusLabelText: String = azureStyleModel.name
|
|
|
+
|
|
|
+ let size = CGSize(width: CGFloat(MAXFLOAT), height: 12)
|
|
|
|
|
|
- // dispatchGroup.notify(queue: DispatchQueue.main) {
|
|
|
- // BFLog(message: "语音均已识别完成")
|
|
|
- // let subTitleList: List<PQEditVisionTrackMaterialsModel> = List<PQEditVisionTrackMaterialsModel>.init()
|
|
|
- // BFLog(message: "tempSubTitles == \(String(describing: tempSubTitles.first))")
|
|
|
- // tempSubTitles.forEach { tempItems in
|
|
|
- // tempItems.forEach { item in
|
|
|
- // subTitleList.append(item)
|
|
|
- // }
|
|
|
- // }
|
|
|
- // completeHander(sectionIndex, subTitleList, nil)
|
|
|
- // }
|
|
|
+ let strSize = statusLabelText.boundingRect(with: size, options: .usesLineFragmentOrigin, attributes: [NSAttributedString.Key.font: UIFont.systemFont(ofSize: 16)], context: nil).size
|
|
|
+
|
|
|
+ azureStyleModel.showWith = strSize.width
|
|
|
+ }
|
|
|
+ }
|
|
|
+ voicesData.append(voiceModel)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ voccesDic[cateName] = voicesData
|
|
|
+ }
|
|
|
+ completeHander(voccesDic)
|
|
|
+
|
|
|
+ } else {
|
|
|
+ completeHander(voccesDic)
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
}
|