wenweiwei 3 năm trước cách đây
mục cha
commit
b0b631454d

+ 4 - 0
BFRecordScreenKit/Classes/BFVoiceRecordManager.swift

@@ -43,6 +43,10 @@ class BFVoiceRecordManager: NSObject {
         audioRecorder = BFRecorderManager()
         audioRecorder?.delegate = self
     }
+    
+    deinit{
+        audioRecorder?.delegate = nil
+    }
 
     /// 开始录音
     func startRecord() {

+ 31 - 18
BFRecordScreenKit/Classes/RecordScreen/Controller/BFRecordScreenController.swift

@@ -75,7 +75,6 @@ public class BFRecordScreenController: BFBaseViewController {
     var recordStartPlayTime: CMTime = .zero
     // 某个录音开始播放时间
     var currenStartPlayTime: CMTime = .zero
-//    var recordStartTime: Double = 0 // 录制开始时间
     var pauseTime: Double = 0 // 停止无操作的时间点
 
     var assetPlayer: AVPlayer? // 原视频音频播放器
@@ -432,7 +431,7 @@ public class BFRecordScreenController: BFBaseViewController {
 
         // 录音进度
         recorderManager?.recorderProgrossHandle = { [weak self] progress in
-//            BFLog(1, message: "curr:录音进度--\(progress) \(self?.recordStartTime ?? 0) \(self?.isRecording ?? false)")
+            BFLog(1, message: "curr:录音进度--\(progress),\(self?.isRecording ?? false)")
             self?.drawProgressIndication(progress: (progress.isNaN || progress.isInfinite) ? 0 : progress)
         }
 
@@ -479,6 +478,7 @@ public class BFRecordScreenController: BFBaseViewController {
             if let sself = self, let model = voideModel, FileManager.default.fileExists(atPath: model.wavFilePath ?? "") {
                 // 加入到语音数组里
                 model.endTime = sself.currentAssetProgress.seconds
+                BFLog(message: "录制结束当前录音文件:\(model.wavFilePath ?? "")-\(model.startTime)-\(model.endTime)-\(model.endTime - model.startTime)")
                 // ********** 开始处理冲突的录制部分
                 let newRange = CMTimeRange(start: CMTime(seconds: model.startTime, preferredTimescale: 1000), end: CMTime(seconds: model.endTime, preferredTimescale: 1000))
 
@@ -486,23 +486,44 @@ public class BFRecordScreenController: BFBaseViewController {
                 // 要删除的字幕
                 var deletedTitlesTemp = [(PQEditSubTitleModel, Int)]()
                 for (i, m) in sself.itemModels[sself.currItemModelIndex].voiceStickers.enumerated() {
+                    BFLog(message: "录制结束查询时间重合录音文件:\(i)-\(m.wavFilePath ?? "")-\(m.startTime)-\(m.endTime)-\(m.endTime - m.startTime)")
                     let originRange = CMTimeRange(start: CMTime(seconds: m.startTime, preferredTimescale: 1000), end: CMTime(seconds: m.endTime, preferredTimescale: 1000))
-
                     if CMTimeRangeGetIntersection(originRange, otherRange: newRange).duration.seconds > 0 {
                         deletedVoices.append((m, i))
                         deletedTitlesTemp += sself.deleteTitles(voiceModel: m)
                         continue
                     }
                 }
-
+                for (item,index) in deletedVoices {
+                    BFLog(message: "录制结束需要删除的录音文件:\(index)-\(item.wavFilePath ?? "")-\(item.startTime)-\(item.endTime)-\(item.endTime - item.startTime)")
+                }
                 // 删除冲突的音频
-                sself.itemModels[sself.currItemModelIndex].voiceStickers.removeAll { m in
-                    let originRange = CMTimeRange(start: CMTime(seconds: m.startTime, preferredTimescale: 1000), end: CMTime(seconds: m.endTime, preferredTimescale: 1000))
-                    return CMTimeRangeGetIntersection(originRange, otherRange: newRange).duration.seconds > 0
+                deletedVoices.forEach { (m,i) in
+                    sself.itemModels[sself.currItemModelIndex].voiceStickers.removeAll { tempM in
+                        return m.wavFilePath == tempM.wavFilePath
+                    }
                 }
                 BFLog(1, message: "添加录音文件:\(model.startTime) -- \(model.endTime)")
-
                 sself.itemModels[sself.currItemModelIndex].voiceStickers.append(model)
+                // 如果是图片素材同时有需要删除的录音时需要调整录音文件开始结束时间
+                if sself.itemModels[sself.currItemModelIndex].mediaType == .IMAGE {
+                    if deletedVoices.count > 0  {
+                        // 如果是图片先排序在计算区间
+                        sself.itemModels[sself.currItemModelIndex].voiceStickers = sself.itemModels[sself.currItemModelIndex].voiceStickers.sorted { voice1, voice2 in
+                            voice1.startTime < voice2.startTime
+                        }
+                        for (index, item) in sself.itemModels[sself.currItemModelIndex].voiceStickers.enumerated() {
+//                            if index > 0,let duration = item.endTime - item.startTime {
+//                                // 注:开始时间减去duration or 等一前一段录音的结束时间
+//                                item.startTime = sself.itemModels[sself.currItemModelIndex].voiceStickers[index - 1].endTime
+//                                item.endTime -= deleteDuration
+//                            }
+                            BFLog(message: "录制结束重新排序录音文件:\(index)-\(item.wavFilePath ?? "")-\(item.startTime)-\(item.endTime)-\(item.endTime - item.startTime)")
+                        }
+                    }
+                } else {
+                    sself.itemModels[sself.currItemModelIndex].voiceStickers.append(model)
+                }
 
                 // ***********处理冲突的录音部分资源 end
 
@@ -515,6 +536,7 @@ public class BFRecordScreenController: BFBaseViewController {
                 if sself.itemModels[sself.currItemModelIndex].mediaType == .IMAGE {
                     var duration: Double = 0
                     sself.itemModels[sself.currItemModelIndex].voiceStickers.forEach { temp in
+                        BFLog(message: "录制结束-最终:\(temp.wavFilePath ?? "")-\(temp.startTime)-\(temp.endTime)-\(temp.endTime - temp.startTime)")
                         temp.duration = "\(temp.endTime - temp.startTime)"
                         duration = duration + (temp.endTime - temp.startTime)
                     }
@@ -540,8 +562,6 @@ public class BFRecordScreenController: BFBaseViewController {
                 }
                 sself.currentPlayRecordIndex = -3 // 刚录音完,不需要播放录音
                 BFLog(3, message: "重置播放index-\(#function) = \(sself.currentPlayRecordIndex)")
-//                // 重置录制开始时间
-//                sself.recordStartTime = 0
             }
         }
         recorderManager?.cancelRecordHandle = { [weak self] voiceModel in
@@ -559,9 +579,6 @@ public class BFRecordScreenController: BFBaseViewController {
             }
             subtitleCount = self?.itemModels[self?.currItemModelIndex ?? 0].titleStickers.count ?? 0
             BFLog(2, message: "删除\(voiceModel?.wavFilePath ?? "")对应的字幕  后 count\(subtitleCount)")
-
-//            // 重置录制开始时间
-//            self?.recordStartTime = 0
             /// 重置进度
             self?.currentAssetProgress = CMTime(seconds: voiceModel?.startTime ?? 0, preferredTimescale: 1000)
             self?.resetCurrentProgress()
@@ -940,10 +957,6 @@ public class BFRecordScreenController: BFBaseViewController {
         recorderManager?.voiceModel = model
         recorderManager?.startRecord()
         recorderManager?.audioRecorder?.startNeoNui(NeoNuiToken ?? "", appid: NeoNuiAPPID ?? "")
-//        if recordStartTime <= 0 {
-//            recordStartTime = currentAssetProgress.seconds
-//        }
-
         isRecording = true
 
         if !avatarView.isHidden {
@@ -1904,7 +1917,7 @@ public extension BFRecordScreenController {
     /// 处理图片素材录音
     func imageRecordProgress(isRecord: Bool = false, progress: Float64) {
         if isRecord {
-            currentAssetProgress = CMTime(seconds: itemModels[currItemModelIndex].materialDuraion + progress, preferredTimescale: 1000)
+            currentAssetProgress = CMTime(seconds: (recorderManager?.voiceModel?.startTime ?? 0) + progress, preferredTimescale: 1000)
         } else {
             currentAssetProgress = CMTime(seconds: recordStartPlayTime.seconds + progress, preferredTimescale: 1000)
         }