av_tools.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. //
  2. // Created by 阳坤 on 2022/1/12.
  3. //
  4. #ifndef JNI_WEB_TOOLS_H
  5. #define JNI_WEB_TOOLS_H
  6. #define LOGE(format, ...) Log(format,##__VA_ARGS__);
  7. #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
  8. #define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
  9. #include <libavcodec/avcodec.h>
  10. #include <libavformat/avformat.h>
  11. #include <libavutil/imgutils.h>
  12. #include <libavutil/avutil.h>
  13. #include <libswscale/swscale.h>
  14. #include <libavutil/fifo.h>
  15. #include <libavformat/avio.h>
  16. #include <libavfilter/avfilter.h>
  17. #include <sys/time.h>
  18. #include <unistd.h>
  19. #include <libavfilter/buffersink.h>
  20. #include <libavfilter/buffersrc.h>
  21. #include <libswresample/swresample.h>
  22. #include <libavutil/opt.h>
  23. #include "stdio.h"
  24. #define kCustomIoBufferSize 32 * 1024
  25. #define kInitialPcmBufferSize 128 * 1024
  26. #define kDefaultFifoSize 1 * 1024 * 1024
  27. #define kMaxFifoSize 16 * 1024 * 1024
  28. #define DEFAULT_AUDIO_SAMPLE_RATE 44100
  29. #define DEFAULT_AUDIO_CHANNELS 2
  30. #define DEFAULT_VIDEO_FPS 25
  31. #define DEFAULT_FORCE_PRECISION 0
  32. static FILE *pFile = NULL;
  33. static inline void print_ffmpeg_suport_codec() {
  34. AVCodec *codec = av_codec_next(NULL);
  35. char *info = (char *) malloc(40000);
  36. memset(info, 0, 40000);
  37. while (codec != NULL) {
  38. if (codec->decode != NULL) {
  39. strcat(info, "[Decode]");
  40. } else {
  41. strcat(info, "[Encode]");
  42. }
  43. switch (codec->type) {
  44. case AVMEDIA_TYPE_VIDEO:
  45. strcat(info, "[Video]");
  46. break;
  47. case AVMEDIA_TYPE_AUDIO:
  48. strcat(info, "[Audeo]");
  49. break;
  50. default:
  51. strcat(info, "[Other]");
  52. break;
  53. }
  54. sprintf(info, "%s %10s\n", info, codec->name);
  55. codec = codec->next;
  56. }
  57. puts(info);
  58. free(info);
  59. }
  60. static inline void split(char *str_, char delims[], long ret[], int *count) {
  61. int len = strlen(str_);
  62. char str[strlen(str_) + 1];
  63. for (int i = 0; i < len; i++) {
  64. str[i] = str_[i];
  65. }
  66. str[len] = '\0';
  67. char *result = NULL;
  68. result = strtok(str, delims);
  69. int index = 0;
  70. while (result != NULL) {
  71. ret[index++] = atol(result);
  72. result = strtok(NULL, delims);
  73. }
  74. *count = index;
  75. }
  76. static inline long getCurrentTimeMills() {
  77. struct timeval tv;
  78. gettimeofday(&tv, NULL);
  79. return tv.tv_sec * 1000 + tv.tv_usec / 1000;
  80. }
  81. static inline void Log(const char *format, ...) {
  82. char szBuffer[1024] = {0};
  83. char szTime[1024] = {0};
  84. char *p = NULL;
  85. int prefixLength = 0;
  86. const char *tag = "Core";
  87. time_t t; //秒时间
  88. struct tm *local; //本地时间
  89. t = time(NULL); //获取目前秒时间
  90. local = localtime(&t); //转为本地时间,注意,该函数非线程安全,下面的例子会使用线程安全函数localtime_r
  91. if (1) {
  92. strftime(szTime, 64, "%Y-%m-%d %H:%M:%S", local); //根据需要自定义格式
  93. }
  94. prefixLength = sprintf(szBuffer, "[%s][%s][DT] ", szTime, tag);
  95. p = szBuffer + prefixLength;
  96. if (1) {
  97. va_list ap;
  98. va_start(ap, format);
  99. vsnprintf(p, 1024 - prefixLength, format, ap);
  100. va_end(ap);
  101. }
  102. printf("%s\n", szBuffer);
  103. }
  104. static int getVideoRotate(AVDictionary *metadata);
  105. static int
  106. drop_or_add_frame_countV2(int cur_fps, int fix_fps, double sync_ipts, double sync_opts, int64_t next_pkt_duration,
  107. AVRational st_time_base);
  108. static int
  109. drop_or_add_frame_countV2(int cur_fps, int fix_fps, double sync_ipts, double sync_opts, int64_t next_pkt_duration,
  110. AVRational st_time_base) {
  111. int nb_frames;
  112. double delta, delta0;
  113. double duration = 1.0 / ((cur_fps / 1.0) * (1.0 / fix_fps));
  114. duration = FFMIN(duration, 1.0 / ((fix_fps / 1.0) * (1.0 / fix_fps)));
  115. duration = lrintf(next_pkt_duration * av_q2d(st_time_base) / (1.0 / fix_fps));
  116. delta0 = sync_ipts -
  117. sync_opts;
  118. delta = delta0 + duration;
  119. // LOGE("sync_ipts=%f sync_opts=%f delta=%f drop_frame=%d duration=%f \n", sync_ipts, sync_opts, delta,
  120. // delta < -1.1, duration);
  121. nb_frames = 1;
  122. //��ʱ����� ffmpeg.c Դ��Ҫ����Щ��û���õ���!
  123. if (delta0 < 0 &&
  124. delta > 0
  125. ) {
  126. // if (delta0 < -0.6) {
  127. // LOGE("Past duration %f too large\n", -delta0);
  128. // } else
  129. // LOGE("Clipping frame in rate conversion by %f\n", -delta0);
  130. sync_ipts = sync_opts;
  131. duration += delta0;
  132. delta0 = 0;
  133. }
  134. if (delta < -1.1)
  135. nb_frames = 0;
  136. else if (delta > 1.1) {
  137. nb_frames = lrintf(delta);
  138. }
  139. return nb_frames;
  140. }
  141. static int getVideoRotate(AVDictionary *metadata) {
  142. AVDictionaryEntry *tag = NULL;
  143. int m_Rotate = 0;
  144. tag = av_dict_get(metadata, "rotate", tag, m_Rotate);
  145. if (tag == NULL) {
  146. m_Rotate = 0;
  147. } else {
  148. int angle = atoi(tag->value);
  149. angle %= 360;
  150. if (angle == 90) {
  151. m_Rotate = 90;
  152. } else if (angle == 180) {
  153. m_Rotate = 180;
  154. } else if (angle == 270) {
  155. m_Rotate = 270;
  156. } else {
  157. m_Rotate = 0;
  158. }
  159. }
  160. return m_Rotate;
  161. }
  162. static void ffmpegLogCallback(void *ptr, int level, const char *fmt, va_list vl) {
  163. static int printPrefix = 1;
  164. static int count = 0;
  165. static char prev[1024] = {0};
  166. char line[1024] = {0};
  167. static int is_atty;
  168. AVClass *avc = ptr ? *(AVClass **) ptr : NULL;
  169. if (level > AV_LOG_DEBUG) {
  170. return;
  171. }
  172. line[0] = 0;
  173. if (printPrefix && avc) {
  174. if (avc->parent_log_context_offset) {
  175. AVClass **parent = *(AVClass ***) (((uint8_t *) ptr) + avc->parent_log_context_offset);
  176. if (parent && *parent) {
  177. snprintf(line, sizeof(line), "[%s @ %p] ", (*parent)->item_name(parent), parent);
  178. }
  179. }
  180. snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
  181. }
  182. vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl);
  183. line[strlen(line) + 1] = 0;
  184. LOGE("%s", line);
  185. }
  186. static int GetAACSampleRateIndex(int sampling_frequency) {
  187. switch (sampling_frequency) {
  188. case 96000:
  189. return 0;
  190. case 88200:
  191. return 1;
  192. case 64000:
  193. return 2;
  194. case 48000:
  195. return 3;
  196. case 44100:
  197. return 4;
  198. case 32000:
  199. return 5;
  200. case 24000:
  201. return 6;
  202. case 22050:
  203. return 7;
  204. case 16000:
  205. return 8;
  206. case 12000:
  207. return 9;
  208. case 11025:
  209. return 10;
  210. case 8000:
  211. return 11;
  212. case 7350:
  213. return 12;
  214. default:
  215. return 0;
  216. }
  217. }
  218. static void close_file() {
  219. if (pFile) {
  220. fclose(pFile);
  221. pFile = NULL;
  222. }
  223. }
  224. static void savaFrame(uint8_t *pFrame, int width, int height, int frame, int size, int isRgb) {
  225. char szFilename[1024];
  226. //生成文件名称
  227. sprintf(szFilename, "/Users/devyk/Data/Project/piaoquan/PQMedia/temp/image/frame%d.yuv", frame);
  228. //创建或打开文件
  229. if (!pFile)
  230. pFile = fopen(szFilename, "wb");
  231. if (pFile == NULL) {
  232. return;
  233. }
  234. if (isRgb)
  235. //写入头信息
  236. fprintf(pFile, "P6\n%d %d\n225\n", width, height);
  237. //写入数据
  238. fwrite(pFrame, 1, size, pFile);
  239. }
  240. //创建默认的帧
  241. static int bf_create_default_frame(enum AVMediaType type, AVCodecContext *ctx, AVFrame **frame) {
  242. if (frame) {
  243. *frame = av_frame_alloc();
  244. if (!(*frame = av_frame_alloc())) {
  245. LOGE("Could not allocate output frame\n");
  246. return -1;
  247. }
  248. if (type == AVMEDIA_TYPE_AUDIO){
  249. (*frame)->format = AV_SAMPLE_FMT_FLTP;
  250. (*frame)->channel_layout = AV_CH_LAYOUT_STEREO;
  251. (*frame)->sample_rate = DEFAULT_AUDIO_SAMPLE_RATE;
  252. (*frame)->channels = av_get_channel_layout_nb_channels((*frame)->channel_layout);
  253. (*frame)->nb_samples = 1024;
  254. if ((av_frame_get_buffer((*frame), 1)) != 0) {
  255. LOGE("av_frame_get_buffer fatal��\n");
  256. av_frame_free(&(*frame));
  257. (*frame) = NULL;
  258. return -1;
  259. }
  260. av_samples_set_silence((*frame)->data, 0, (*frame)->nb_samples, (*frame)->channels,
  261. (enum AVSampleFormat) (*frame)->format);
  262. (*frame)->pts = (*frame)->nb_samples;
  263. (*frame)->pkt_pts = (*frame)->nb_samples;
  264. (*frame)->pkt_dts = (*frame)->nb_samples;
  265. (*frame)->pkt_duration = (*frame)->nb_samples;
  266. } else if (type == AVMEDIA_TYPE_VIDEO){
  267. (*frame)->format = AV_PIX_FMT_YUV420P;
  268. (*frame)->width = ctx->width;
  269. (*frame)->height = ctx->height;
  270. int ret = av_frame_get_buffer(*frame, 1);
  271. if (ret < 0) {
  272. LOGE("Could not allocate the video frame data\n");
  273. return -1;
  274. }
  275. /* 生成黑色背景 */
  276. /* Y */
  277. for (int y = 0; y < (*frame)->height; y++) {
  278. for (int x = 0; x < (*frame)->width; x++) {
  279. (*frame)->data[0][y * (*frame)->linesize[0] + x] = 0;
  280. }
  281. }
  282. /* Cb and Cr */
  283. for (int y = 0; y < (*frame)->height / 2; y++) {
  284. for (int x = 0; x < (*frame)->width / 2; x++) {
  285. (*frame)->data[1][y * (*frame)->linesize[1] + x] = 0x80;
  286. (*frame)->data[2][y * (*frame)->linesize[2] + x] = 0x80;
  287. }
  288. }
  289. }
  290. }
  291. return 0;
  292. }
  293. static int bf_frame_2_uint8_ptr(enum AVMediaType type,AVFrame *in,uint8_t**out_data,int format){
  294. int sampleSize = 0;
  295. int audioDataSize = 0;
  296. int offset = 0;
  297. int i = 0;
  298. int ch = 0;
  299. enum AVSampleFormat sampleFormat = (enum AVSampleFormat)format;
  300. sampleSize = av_get_bytes_per_sample(sampleFormat);
  301. if (sampleSize < 0) {
  302. LOGE("Failed to calculate data size.");
  303. return -1;
  304. }
  305. audioDataSize = in->nb_samples * in->channels * sampleSize;
  306. int size = av_samples_get_buffer_size(NULL, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
  307. in->nb_samples,
  308. sampleFormat, 0);
  309. *out_data = (uint8_t *) av_mallocz(audioDataSize);
  310. if (!(*out_data))return -1;
  311. if (av_sample_fmt_is_planar(sampleFormat)) {//如果是平面的
  312. for (i = 0; i < in->nb_samples; i++) {
  313. for (ch = 0; ch < in->channels; ch++) {
  314. memcpy((*out_data) + offset, in->data[ch] + sampleSize * i, sampleSize);
  315. offset += sampleSize;
  316. }
  317. }
  318. } else {
  319. memcpy(*out_data, in->data[0], audioDataSize);
  320. }
  321. return audioDataSize;
  322. }
  323. #endif //JNI_WEB_TOOLS_H