Pārlūkot izejas kodu

完成视频解码到获取帧指纹信息

DevYK 3 gadi atpakaļ
vecāks
revīzija
ccc69bf091

+ 18 - 13
CMakeLists.txt

@@ -1,13 +1,16 @@
 set(CMAKE_VERSION 3.6.2)
 cmake_minimum_required(VERSION ${CMAKE_VERSION})
 set(CMAKE_CXX_STANDARD 11)
+
 project(bytesflow-opencv-media)
 
 SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/output/bin)
 SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/output/lib)
 
-add_subdirectory(src)
+add_subdirectory(src/opencv)
 add_subdirectory(jni)
+add_subdirectory(src/utils)
+add_subdirectory(src/ffmpeg)
 
 
 ############################################# 加入jni支持############################################
@@ -49,8 +52,8 @@ ELSEIF (CMAKE_SYSTEM_NAME MATCHES "Darwin")
 
 
     set(FFMPEG_PREFIX /usr/local/Cellar/ffmpeg@4/4.4.1)
-    set(FFMPEG_INCLUDE_DIRS ${FFMPEG_PREFIX}/include/ffmpeg/)
-    set(FFMPEG_LIBS ${FFMPEG_PREFIX}/lib64/)
+    set(FFMPEG_INCLUDE_DIRS ${FFMPEG_PREFIX}/include/)
+    set(FFMPEG_LIBS ${FFMPEG_PREFIX}/lib/)
     MESSAGE(FFMPEG_PREFIX ${FFMPEG_PREFIX})
     MESSAGE(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS})
     MESSAGE(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES})
@@ -63,6 +66,7 @@ ENDIF (CMAKE_SYSTEM_NAME MATCHES "Linux")
 message(OPENCV_INCLUDE>${OPENCV_INCLUDE})
 message(OPENCV_LIBS>${OPENCV_LIBS})
 message(JAVA_HOME>${JAVA_HOME})
+message(FFMPEG_INCLUDE_DIRS>${FFMPEG_INCLUDE_DIRS})
 
 include_directories(${OPENCV_INCLUDE})
 include_directories(${JNI_MD_PATH})
@@ -73,9 +77,11 @@ link_directories(${FFMPEG_LIBS})
 
 add_library(piaoquan_java_opencv SHARED
         ${Project_SRC}
+        ${UTILS_SRC}
         ${JAVA_JNI_SRC})
 
 add_library(video_similarity_detection SHARED
+        ${UTILS_SRC}
         ${Project_SRC})
 
 set(FFmpeg_LIBS
@@ -158,15 +164,8 @@ if (CMAKE_BUILD_TYPE AND (CMAKE_BUILD_TYPE STREQUAL "Debug"))
     target_link_libraries(ImageBlurDetection_debug -lpthread
             ${OpenCV_LIBS}
             )
-
-    add_executable(video_similarity_detection_debug main.cpp
-            ${Project_SRC}
-            count_down_latch.h
-            ${JAVA_JNI_SRC}
-            )
-    target_link_libraries(video_similarity_detection_debug -lpthread
-            ${OpenCV_LIBS}
-            ${FFmpeg_LIBS}
+    add_executable(test_md5 md5_main.cpp
+            ${UTILS_SRC}
             )
 elseif (CMAKE_BUILD_TYPE AND (CMAKE_BUILD_TYPE STREQUAL "Release"))
     set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall -O3")
@@ -179,7 +178,13 @@ endif ()
 target_link_libraries(piaoquan_java_opencv
         ${OpenCV_LIBS}
         )
-target_link_libraries(video_similarity_detection
+
+add_executable(video_similarity_comparison src/video_similarity_comparison.cpp
+        ${Project_SRC}
+        ${FFMPEG_SRC}
+        ${UTILS_SRC}
+        )
+target_link_libraries(video_similarity_comparison -lpthread
         ${OpenCV_LIBS}
         ${FFmpeg_LIBS}
         )

+ 1 - 1
jni/ImageBlurDetection.h

@@ -2,7 +2,7 @@
 // Created by 阳坤 on 2022/2/23.
 //
 #include "jni.h"
-#include "../opencv/image_blur_detection.h"
+#include "../src/detection/image_blur_detection.h"
 #ifndef LOPENCVSAMPLE_IMAGEBLURDETECTION_H
 #define LOPENCVSAMPLE_IMAGEBLURDETECTION_H
 

+ 2 - 2
main.cpp

@@ -1,8 +1,7 @@
 #include <iostream>
-#include "opencv/image_blur_detection.h"
+#include "src/detection/image_blur_detection.h"
 #include "count_down_latch.h"
 
-
 static void *detection_thread(void *arg) {
     const char *filename = "/Users/devyk/Downloads/IMG_3067.PNG";
 //    const char *filename = "/Users/devyk/Downloads/black.png";
@@ -24,6 +23,7 @@ static void *detection_thread(void *arg) {
 }
 
 int main() {
+
     int count = 1;
     for (int i = 0; i < 1; ++i) {
         auto *countdown = new CountDownLatch(count);

+ 17 - 0
md5_main.cpp

@@ -0,0 +1,17 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+extern "C"{
+#include "src/utils/md5.h"
+}
+static void print_file_md5(const char *filepath) {
+    char md5_str[64] = {0};
+    int len = get_file_md5(filepath, md5_str);
+    if (len > 0)
+        printf("md5:%s len=%d\n", md5_str, len);
+}
+
+int main(){
+    print_file_md5("/Users/devyk/Data/Project/sample/github_code/OpenCVSample/README.md");
+    return 1;
+}

+ 0 - 6
src/CMakeLists.txt

@@ -1,6 +0,0 @@
-set(Project_SRC ${Project_SRC}
-        src/image_blur_detection.cpp
-        src/image_blur_detection.h
-        PARENT_SCOPE
-        )
-

+ 7 - 0
src/ffmpeg/CMakeLists.txt

@@ -0,0 +1,7 @@
+set(FFMPEG_SRC
+        ${FFMPEG_SRC}
+        src/ffmpeg/av_decode.cpp
+        src/ffmpeg/av_decode.h
+        src/ffmpeg/av_tools.h
+        PARENT_SCOPE
+        )

+ 365 - 0
src/ffmpeg/av_decode.cpp

@@ -0,0 +1,365 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+#include "av_decode.h"
+
+
+static int
+openFFmpegDecoderContext(AVFormatContext *fmtCtx, enum AVMediaType type, int *streamIdx, AVCodecContext **decCtx,
+                         enum AVCodecID codecId) {
+    if (!fmtCtx)return -1;
+    int ret = -1;
+    int streamIndex = *streamIdx;
+    AVCodecContext *ctx = NULL;
+    AVCodecParameters *parameters = fmtCtx->streams[streamIndex]->codecpar;;
+    AVCodec *codec = avcodec_find_decoder(codecId);
+    if (codec == NULL) {
+        if (codecId == AV_CODEC_ID_PNG)
+            LOGE("openFFmpegDecoderContext avcodec_find_decoder png not found. \n");
+        LOGE("openFFmpegDecoderContext avcodec_find_decoder=%d not found. \n", codecId);
+        return -1;
+    }
+    ctx = avcodec_alloc_context3(codec);
+
+    if (!ctx || !parameters || !codec) {
+        LOGE("openFFmpegDecoderContext init error %d ctx==null=%d  parameters==null=%d  codec==null=%d \n", codecId,
+             ctx == NULL, parameters == NULL, codec == NULL);
+        return -1;
+    }
+
+
+    if ((ret = avcodec_parameters_to_context(ctx, parameters)) != 0) {
+        Log("Failed to copy %s codec parameters to decoder context.", av_get_media_type_string(type));
+        return -1;
+    }
+
+    switch (type) {
+        case AVMEDIA_TYPE_VIDEO:
+            ctx->framerate = av_guess_frame_rate(fmtCtx, fmtCtx->streams[streamIndex],
+                                                 NULL);
+            break;
+        case AVMEDIA_TYPE_AUDIO:
+            break;
+    }
+    ctx->thread_count = 3;
+    if ((ret = avcodec_open2(ctx, codec, NULL)) != 0) {
+        char buf[512];
+        av_strerror(ret, buf, 512);
+        LOGE("Failed to open %s codec. error=%s \n", av_get_media_type_string(type), buf);
+        return -1;
+    }
+    *decCtx = ctx;
+    avcodec_flush_buffers(ctx);
+    ret = 0;
+    return ret;
+}
+
+
+static int av_decode(struct DecoderContext *pContext, AVPacket *pPacket, enum AVMediaType type) {
+    AVCodecContext *ctx = NULL;
+    switch (type) {
+        case AVMEDIA_TYPE_VIDEO:
+            ctx = pContext->videoCodecContext;
+            break;
+        case AVMEDIA_TYPE_AUDIO:
+            ctx = pContext->audioCodecContext;
+            break;
+    }
+    if (!ctx)return -1;
+    int ret = 0;
+    if (pPacket)
+        ret = avcodec_send_packet(ctx, pPacket);
+    else ret = avcodec_send_packet(ctx, NULL);
+    if (ret != 0) {
+        return -1;
+    }
+    while (1) {
+        AVFrame *out_frame = av_frame_alloc();
+        ret = avcodec_receive_frame(ctx, out_frame);
+        if (ret != 0) {
+            av_frame_free(&out_frame);
+            break;
+        }
+        if (pContext->frame_count > 0 && pContext->decode_frame_count >= pContext->frame_count)break;
+
+        pContext->decode_frame_count += 1;
+
+        int64_t pts = -1;
+        if (pPacket)
+            pts = out_frame->pts * (1000 *
+                                    (av_q2d(pContext->avformatContext->streams[(pPacket)->stream_index]->time_base)));
+        else
+            pts = out_frame->pts * (1000 *
+                                    (av_q2d(pContext->avformatContext->streams[pContext->st_index[type]]->time_base)));
+
+        out_frame->pts = pts;
+        out_frame->pkt_dts = pts;
+        switch (type) {
+            case AVMEDIA_TYPE_VIDEO:
+                pContext->video_queue->PushBack(out_frame);
+                break;
+            case AVMEDIA_TYPE_AUDIO:
+                pContext->audio_queue->PushBack(out_frame);
+                break;
+        }
+    }
+    return ret;
+}
+
+/**
+ * 初始化解码器
+ * @param url 
+ * @param outVideoFormat 
+ * @param force_Iframe 
+ * @param decode_frame_count 
+ * @return 
+ */
+long initDecoder(const char *url, int force_Iframe, int decode_frame_count, DisableMediaType disableMediaType) {
+    struct DecoderContext *dctx = (struct DecoderContext *) malloc(sizeof(struct DecoderContext));
+    if (!dctx) {
+        LOGE("DecoderContext create fail.");
+        return -1;
+    }
+    memset(dctx, 0, sizeof(struct DecoderContext));
+    dctx->url = strdup(url);
+    dctx->force_Iframe = force_Iframe;
+    dctx->frame_count = decode_frame_count;
+    dctx->ffthread_count = 3;
+    dctx->disableType = disableMediaType;
+    char err_info[512] = {0};
+    int ret = avformat_open_input(&(dctx->avformatContext), dctx->url, NULL, NULL);
+
+    if (ret != 0) {
+        av_strerror(ret, err_info, 512);
+        Log("avformat_open_input failed %d %s. path=%s \n", ret, err_info, dctx->url);
+        close_decoder((long) dctx);
+        return ret;
+    }
+
+    ret = avformat_find_stream_info(dctx->avformatContext, 0);
+    if (ret < 0) {
+        av_strerror(ret, err_info, 512);
+        Log("Failed to retrieve input stream information msg=%s\n", err_info);
+        close_decoder((long) dctx);
+        return ret;
+    }
+
+    dctx->st_index[AVMEDIA_TYPE_VIDEO] =
+            av_find_best_stream(dctx->avformatContext, AVMEDIA_TYPE_VIDEO,
+                                dctx->st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
+    dctx->st_index[AVMEDIA_TYPE_AUDIO] = av_find_best_stream(dctx->avformatContext,
+                                                             AVMEDIA_TYPE_AUDIO,
+                                                             -1,
+                                                             -1,
+                                                             NULL, 0);
+    if (dctx->st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+        AVStream *as = dctx->avformatContext->streams[dctx->st_index[AVMEDIA_TYPE_VIDEO]];
+        dctx->v_codec_id = as->codecpar->codec_id;
+        dctx->vformat = as->codecpar->format;
+        dctx->width = as->codecpar->width;
+        dctx->height = as->codecpar->height;
+        dctx->v_bit_rate = as->codecpar->bit_rate;
+        if (dctx->v_bit_rate <= 0 && dctx->avformatContext->bit_rate > 0) {
+            dctx->v_bit_rate = dctx->avformatContext->bit_rate;
+        } else if (dctx->v_bit_rate <= 0) {
+            dctx->v_bit_rate = dctx->width * dctx->height * 3;
+        }
+        dctx->fps = av_q2d(as->avg_frame_rate);//拿到视频码率
+        dctx->rotate = getVideoRotate(as->metadata);
+
+        LOGE("找到视频流 width=%d height=%d bit_rate=%d fps=%d rotate=%d\n", dctx->width,
+             dctx->height, dctx->v_bit_rate, dctx->fps,
+             dctx->rotate);
+
+    }
+    if (dctx->st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
+        AVStream *as = dctx->avformatContext->streams[dctx->st_index[AVMEDIA_TYPE_AUDIO]];
+        dctx->a_codec_id = as->codecpar->codec_id;
+        dctx->sampleRate = as->codecpar->sample_rate;
+        dctx->channels = as->codecpar->channels;
+        dctx->a_bit_rate = as->codecpar->bit_rate;
+        dctx->aformat = as->codecpar->format;
+        dctx->channels_layout = as->codecpar->channel_layout;
+        LOGE("找到音频流 sampleRate=%d channels=%d bit_rate=%d aformat=%d channels_layout=%d\n",
+             dctx->sampleRate, dctx->channels, dctx->a_bit_rate,
+             dctx->aformat, dctx->channels_layout);
+
+
+    }
+    dctx->totalMs = dctx->avformatContext->duration / (AV_TIME_BASE / 1000);
+    LOGE("媒体总时长 totalMs=%lld \n", dctx->totalMs);
+
+    if (dctx->avformatContext->start_time != AV_NOPTS_VALUE)
+        dctx->offset = dctx->avformatContext->start_time;
+
+
+    if (dctx->st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
+        ret = openFFmpegDecoderContext(dctx->avformatContext, AVMEDIA_TYPE_AUDIO,
+                                       &dctx->st_index[AVMEDIA_TYPE_AUDIO],
+                                       &dctx->audioCodecContext, dctx->a_codec_id);
+
+        if (ret != 0) {
+            LOGE("audio openFFmpegDecoderContext error!");
+            close_decoder((long) dctx);
+            return ret;
+        }
+
+
+    }
+
+    if (dctx->st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+        ret = openFFmpegDecoderContext(dctx->avformatContext, AVMEDIA_TYPE_VIDEO,
+                                       &dctx->st_index[AVMEDIA_TYPE_VIDEO],
+                                       &dctx->videoCodecContext, dctx->v_codec_id);
+        if (ret != 0) {
+            LOGE("video openFFmpegDecoderContext error!");
+            close_decoder((long) dctx);
+            return ret;
+        }
+        dctx->audio_queue = new BlockQueue<AVFrame *>(dctx->fps);
+        dctx->video_queue = new BlockQueue<AVFrame *>(dctx->fps);
+    }
+
+    int64_t timestamp = 0;
+    if (dctx->offset != AV_NOPTS_VALUE)
+        timestamp += dctx->offset;
+    if (timestamp > 0) {
+        avformat_flush(dctx->avformatContext);
+        int ret = avformat_seek_file(dctx->avformatContext, -1, INT64_MIN, timestamp,
+                                     INT64_MAX,
+                                     AVSEEK_FLAG_BACKWARD);
+        if (ret >= 0) {
+            if (dctx->videoCodecContext)
+                avcodec_flush_buffers(dctx->videoCodecContext);
+            if (dctx->audioCodecContext)
+                avcodec_flush_buffers(dctx->audioCodecContext);
+        }
+    }
+
+    return ret == 0 ? (long) dctx : ret;
+}
+
+void close_decoder(long decodec_id) {
+    if (decodec_id > 0) {
+        struct DecoderContext *ctx = (struct DecoderContext *) decodec_id;
+        if (ctx->url) {
+            free((void *) ctx->url);
+            ctx->url = NULL;
+        }
+
+        if (ctx->avformatContext) {
+            avformat_network_deinit();
+            avformat_flush(ctx->avformatContext);
+            avformat_close_input(&(ctx->avformatContext));
+            avformat_free_context(ctx->avformatContext);
+            ctx->avformatContext = NULL;
+        }
+
+
+        if (ctx->audioCodecContext) {
+            avcodec_flush_buffers(ctx->audioCodecContext);
+            avcodec_free_context(&(ctx->audioCodecContext));
+            avcodec_close(ctx->audioCodecContext);
+            ctx->audioCodecContext = NULL;
+        }
+
+        if (ctx->videoCodecContext) {
+            avcodec_flush_buffers(ctx->videoCodecContext);
+            avcodec_free_context(&(ctx->videoCodecContext));
+            avcodec_close(ctx->videoCodecContext);
+            ctx->videoCodecContext = NULL;
+        }
+        if (ctx->audio_queue) {
+            AVFrame *frame = NULL;
+            do {
+                if (ctx->audio_queue->Size() > 0) {
+                    ctx->audio_queue->PopBack(frame);
+                }
+                av_frame_free(&frame);
+            } while (frame != NULL);
+            ctx->audio_queue->Close();
+        }
+
+        if (ctx->video_queue) {
+            AVFrame *frame = NULL;
+            do {
+                if (ctx->video_queue->Size() > 0) {
+                    ctx->video_queue->PopBack(frame);
+                }
+                av_frame_free(&frame);
+            } while (frame != NULL);
+            ctx->video_queue->Close();
+        }
+        free(ctx);
+        ctx = NULL;
+    }
+}
+
+int av_read_decode_frame(long decodec_id) {
+    auto *ctx = (DecoderContext *) decodec_id;
+    pthread_create(&ctx->thread_id, 0, av_read_decode_thread, ctx);
+}
+
+void *av_read_decode_thread(void *pVoid) {
+    int ret = 0;
+    if (pVoid) {
+        struct DecoderContext *ctx = (struct DecoderContext *) pVoid;
+        if (ctx && ctx->avformatContext) {
+            while (1) {
+                if (ctx->decode_frame_count > ctx->frame_count && ctx->frame_count > 0)
+                    break;
+                AVPacket *pkg = av_packet_alloc();
+                ret = av_read_frame(ctx->avformatContext, pkg);
+                if (ret == AVERROR_EOF) {
+                    LOGE("startDecoder flush \n");
+                    av_decode(ctx, NULL, AVMEDIA_TYPE_VIDEO);
+                    av_decode(ctx, NULL, AVMEDIA_TYPE_AUDIO);
+                    av_packet_free(&pkg);
+                    ctx->audio_queue->PushBack(NULL);
+                    ctx->video_queue->PushBack(NULL);
+                    LOGE("av_read_frame eof!\n");
+                    return 0;
+                }
+
+                enum AVMediaType type = ctx->avformatContext->streams[pkg->stream_index]->codecpar->codec_type;
+                switch (type) {
+                    case AVMEDIA_TYPE_AUDIO:
+                        if (ctx->disableType == AUDIO) {
+                            av_packet_free(&pkg);
+                            continue;
+                        }
+                        break;
+                    case AVMEDIA_TYPE_VIDEO:
+                        if (ctx->disableType == VIDEO) {
+                            av_packet_free(&pkg);
+                            continue;
+                        }
+                        break;
+                    default:
+                        break;
+                }
+
+                //对 pts ,dts 校准,因为有偏移
+                if (pkg->dts != AV_NOPTS_VALUE && ctx->avformatContext->start_time > 0)
+                    pkg->dts += av_rescale_q(-ctx->avformatContext->start_time, AV_TIME_BASE_Q,
+                                             ctx->avformatContext->streams[pkg->stream_index]->time_base);
+                if (pkg->pts != AV_NOPTS_VALUE && ctx->avformatContext->start_time > 0)
+                    pkg->pts += av_rescale_q(-ctx->avformatContext->start_time, AV_TIME_BASE_Q,
+                                             ctx->avformatContext->streams[pkg->stream_index]->time_base);
+
+                if (ctx->force_Iframe && pkg->flags != AV_PKT_FLAG_KEY &&
+                    ctx->avformatContext->streams[pkg->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+                    av_packet_free(&pkg);
+                    continue;
+                } else {
+                    ret = av_decode(ctx, pkg, type);
+                }
+                av_packet_free(&pkg);
+            }
+        }
+        return 0;
+    }
+    pthread_exit(NULL);
+    return 0;
+}

+ 98 - 0
src/ffmpeg/av_decode.h

@@ -0,0 +1,98 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+#ifndef BYTESFLOW_OPENCV_MEDIA_AV_DECODE_H
+#define BYTESFLOW_OPENCV_MEDIA_AV_DECODE_H
+
+extern "C"{
+#include "av_tools.h"
+};
+#include "../utils/pq_blockingqueue.hpp"
+
+
+typedef enum DisableMediaType {
+    DEFAULT = -1,
+    VIDEO = 0,
+    AUDIO,
+}DisableMediaType;
+
+
+typedef struct DecoderContext {
+    /**
+     * 封装格式上下文
+     */
+    AVFormatContext *avformatContext;
+    /**
+    /**
+     * 视频解码器上下文
+     */
+    AVCodecContext *videoCodecContext;
+    /**
+     * 音频解码器上下文
+     */
+    AVCodecContext *audioCodecContext;
+
+    /**
+     * 文件偏移
+     */
+    int64_t offset;
+    /**
+     * 输入数据
+     */
+    const char *url;
+    /**
+     * 流索引
+     */
+    int st_index[AVMEDIA_TYPE_NB];
+
+    /**
+     * 视频参数
+     */
+    int width, height, v_bit_rate, vformat, rotate, fps;
+    /**
+     * 音频参数
+     */
+    int sampleRate, channels, aformat, channels_layout, a_bit_rate;
+    int o_aformat;
+    /**
+     * ffmpeg 解码参数
+     */
+    int ffthread_count;
+
+    /**
+     * 解码器 ID
+     */
+    enum AVCodecID v_codec_id, a_codec_id;
+
+    /**
+     * 是否初始化成功
+     */
+    int isInitOK;
+
+    /**
+     * 关闭媒体资源类型
+     */
+    enum DisableMediaType disableType;
+
+    /*强制 I frame 输出, 解码多少帧*/
+    int force_Iframe,decode_frame_count,frame_count;
+    long totalMs;
+
+    //音视频解码器缓存队列
+    BlockQueue<AVFrame*> * video_queue;
+    BlockQueue<AVFrame*> * audio_queue;
+
+    pthread_t thread_id;
+};
+
+/*最简单的解码器实现*/
+long initDecoder(const char *url, int force_Iframe, int decode_frame_count,DisableMediaType disableMediaType);
+
+
+int av_read_decode_frame(long decodec_id);
+static void* av_read_decode_thread(void*pVoid);
+
+void close_decoder(long decodec_id);
+
+#endif //BYTESFLOW_OPENCV_MEDIA_AV_DECODE_H

+ 365 - 0
src/ffmpeg/av_tools.h

@@ -0,0 +1,365 @@
+//
+// Created by 阳坤 on 2022/1/12.
+//
+
+#ifndef JNI_WEB_TOOLS_H
+#define JNI_WEB_TOOLS_H
+
+#define LOGE(format, ...)  Log(format,##__VA_ARGS__);
+#define MIN(X, Y)  ((X) < (Y) ? (X) : (Y))
+#define MAX(X, Y)  ((X) > (Y) ? (X) : (Y))
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/imgutils.h>
+#include <libavutil/avutil.h>
+#include <libswscale/swscale.h>
+#include <libavutil/fifo.h>
+#include <libavformat/avio.h>
+#include <libavfilter/avfilter.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libswresample/swresample.h>
+#include <libavutil/opt.h>
+#include "stdio.h"
+
+#define kCustomIoBufferSize  32 * 1024
+#define kInitialPcmBufferSize  128 * 1024
+#define kDefaultFifoSize  1 * 1024 * 1024
+#define kMaxFifoSize  16 * 1024 * 1024
+
+#define DEFAULT_AUDIO_SAMPLE_RATE 44100
+#define DEFAULT_AUDIO_CHANNELS 2
+#define DEFAULT_VIDEO_FPS 25
+#define DEFAULT_FORCE_PRECISION 0
+
+static FILE *pFile = NULL;
+
+static inline void print_ffmpeg_suport_codec() {
+    AVCodec *codec = av_codec_next(NULL);
+    char *info = (char *) malloc(40000);
+    memset(info, 0, 40000);
+    while (codec != NULL) {
+        if (codec->decode != NULL) {
+            strcat(info, "[Decode]");
+        } else {
+            strcat(info, "[Encode]");
+        }
+        switch (codec->type) {
+            case AVMEDIA_TYPE_VIDEO:
+                strcat(info, "[Video]");
+                break;
+            case AVMEDIA_TYPE_AUDIO:
+                strcat(info, "[Audeo]");
+                break;
+            default:
+                strcat(info, "[Other]");
+                break;
+        }
+        sprintf(info, "%s %10s\n", info, codec->name);
+        codec = codec->next;
+    }
+    puts(info);
+    free(info);
+}
+
+static inline void split(char *str_, char delims[], long ret[], int *count) {
+    int len = strlen(str_);
+    char str[strlen(str_) + 1];
+    for (int i = 0; i < len; i++) {
+        str[i] = str_[i];
+    }
+    str[len] = '\0';
+    char *result = NULL;
+    result = strtok(str, delims);
+    int index = 0;
+    while (result != NULL) {
+        ret[index++] = atol(result);
+        result = strtok(NULL, delims);
+    }
+    *count = index;
+}
+
+static inline void Log(const char *format, ...) {
+    char szBuffer[1024] = {0};
+    char szTime[1024] = {0};
+    char *p = NULL;
+    int prefixLength = 0;
+    const char *tag = "Core";
+
+    time_t t;  //秒时间
+    struct tm *local; //本地时间
+
+    t = time(NULL); //获取目前秒时间
+
+    local = localtime(&t); //转为本地时间,注意,该函数非线程安全,下面的例子会使用线程安全函数localtime_r
+
+    if (1) {
+        strftime(szTime, 64, "%Y-%m-%d %H:%M:%S", local); //根据需要自定义格式
+    }
+
+    prefixLength = sprintf(szBuffer, "[%s][%s][DT] ", szTime, tag);
+    p = szBuffer + prefixLength;
+
+    if (1) {
+        va_list ap;
+        va_start(ap, format);
+        vsnprintf(p, 1024 - prefixLength, format, ap);
+        va_end(ap);
+    }
+    printf("%s\n", szBuffer);
+}
+
+static inline long getCurrentTimeMills() {
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+static int getVideoRotate(AVDictionary *metadata);
+
+static int
+drop_or_add_frame_countV2(int cur_fps, int fix_fps, double sync_ipts, double sync_opts, int64_t next_pkt_duration,
+                          AVRational st_time_base);
+
+
+static int
+drop_or_add_frame_countV2(int cur_fps, int fix_fps, double sync_ipts, double sync_opts, int64_t next_pkt_duration,
+                          AVRational st_time_base) {
+    int nb_frames;
+    double delta, delta0;
+    double duration = 1.0 / ((cur_fps / 1.0) * (1.0 / fix_fps));
+    duration = FFMIN(duration, 1.0 / ((fix_fps / 1.0) * (1.0 / fix_fps)));
+
+    duration = lrintf(next_pkt_duration * av_q2d(st_time_base) / (1.0 / fix_fps));
+    delta0 = sync_ipts -
+             sync_opts;
+    delta = delta0 + duration;
+//    LOGE("sync_ipts=%f sync_opts=%f delta=%f drop_frame=%d  duration=%f \n", sync_ipts, sync_opts, delta,
+//         delta < -1.1, duration);
+    nb_frames = 1;
+
+    //��ʱ�����  ffmpeg.c Դ��Ҫ����Щ��û���õ���!
+    if (delta0 < 0 &&
+        delta > 0
+            ) {
+//        if (delta0 < -0.6) {
+//            LOGE("Past duration %f too large\n", -delta0);
+//        } else
+//            LOGE("Clipping frame in rate conversion by %f\n", -delta0);
+        sync_ipts = sync_opts;
+        duration += delta0;
+        delta0 = 0;
+    }
+    if (delta < -1.1)
+        nb_frames = 0;
+    else if (delta > 1.1) {
+        nb_frames = lrintf(delta);
+    }
+    return nb_frames;
+}
+
+
+static int getVideoRotate(AVDictionary *metadata) {
+    AVDictionaryEntry *tag = NULL;
+    int m_Rotate = 0;
+    tag = av_dict_get(metadata, "rotate", tag, m_Rotate);
+    if (tag == NULL) {
+        m_Rotate = 0;
+    } else {
+        int angle = atoi(tag->value);
+        angle %= 360;
+        if (angle == 90) {
+            m_Rotate = 90;
+        } else if (angle == 180) {
+            m_Rotate = 180;
+        } else if (angle == 270) {
+            m_Rotate = 270;
+        } else {
+            m_Rotate = 0;
+        }
+    }
+    return m_Rotate;
+}
+
+static void ffmpegLogCallback(void *ptr, int level, const char *fmt, va_list vl) {
+    static int printPrefix = 1;
+    static int count = 0;
+    static char prev[1024] = {0};
+    char line[1024] = {0};
+    static int is_atty;
+    AVClass *avc = ptr ? *(AVClass **) ptr : NULL;
+    if (level > AV_LOG_DEBUG) {
+        return;
+    }
+
+    line[0] = 0;
+
+    if (printPrefix && avc) {
+        if (avc->parent_log_context_offset) {
+            AVClass **parent = *(AVClass ***) (((uint8_t *) ptr) + avc->parent_log_context_offset);
+            if (parent && *parent) {
+                snprintf(line, sizeof(line), "[%s @ %p] ", (*parent)->item_name(parent), parent);
+            }
+        }
+        snprintf(line + strlen(line), sizeof(line) - strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
+    }
+
+    vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl);
+    line[strlen(line) + 1] = 0;
+    LOGE("%s", line);
+}
+
+static int GetAACSampleRateIndex(int sampling_frequency) {
+    switch (sampling_frequency) {
+        case 96000:
+            return 0;
+        case 88200:
+            return 1;
+        case 64000:
+            return 2;
+        case 48000:
+            return 3;
+        case 44100:
+            return 4;
+        case 32000:
+            return 5;
+        case 24000:
+            return 6;
+        case 22050:
+            return 7;
+        case 16000:
+            return 8;
+        case 12000:
+            return 9;
+        case 11025:
+            return 10;
+        case 8000:
+            return 11;
+        case 7350:
+            return 12;
+        default:
+            return 0;
+    }
+}
+
+
+static void close_file() {
+    if (pFile) {
+        fclose(pFile);
+        pFile = NULL;
+    }
+}
+
+static void savaFrame(uint8_t *pFrame, int width, int height, int frame, int size, int isRgb) {
+
+    char szFilename[1024];
+    //生成文件名称
+    sprintf(szFilename, "/Users/devyk/Data/Project/piaoquan/PQMedia/temp/image/frame%d.yuv", frame);
+
+    //创建或打开文件
+    if (!pFile)
+        pFile = fopen(szFilename, "wb");
+
+    if (pFile == NULL) {
+        return;
+    }
+    if (isRgb)
+        //写入头信息
+        fprintf(pFile, "P6\n%d %d\n225\n", width, height);
+    //写入数据
+    fwrite(pFrame, 1, size, pFile);
+
+}
+
+
+//创建默认的帧
+static int bf_create_default_frame(enum AVMediaType type, AVCodecContext *ctx, AVFrame **frame) {
+    if (frame) {
+        *frame = av_frame_alloc();
+        if (!(*frame = av_frame_alloc())) {
+            LOGE("Could not allocate output frame\n");
+            return -1;
+        }
+        if (type == AVMEDIA_TYPE_AUDIO){
+            (*frame)->format = AV_SAMPLE_FMT_FLTP;
+            (*frame)->channel_layout = AV_CH_LAYOUT_STEREO;
+            (*frame)->sample_rate = DEFAULT_AUDIO_SAMPLE_RATE;
+            (*frame)->channels = av_get_channel_layout_nb_channels((*frame)->channel_layout);
+            (*frame)->nb_samples = 1024;
+            if ((av_frame_get_buffer((*frame), 1)) != 0) {
+                LOGE("av_frame_get_buffer fatal��\n");
+                av_frame_free(&(*frame));
+                (*frame) = NULL;
+                return -1;
+            }
+            av_samples_set_silence((*frame)->data, 0, (*frame)->nb_samples, (*frame)->channels,
+                                   (enum AVSampleFormat) (*frame)->format);
+            (*frame)->pts = (*frame)->nb_samples;
+            (*frame)->pkt_pts = (*frame)->nb_samples;
+            (*frame)->pkt_dts = (*frame)->nb_samples;
+            (*frame)->pkt_duration = (*frame)->nb_samples;
+        } else if (type == AVMEDIA_TYPE_VIDEO){
+            (*frame)->format = AV_PIX_FMT_YUV420P;
+            (*frame)->width = ctx->width;
+            (*frame)->height = ctx->height;
+            int ret = av_frame_get_buffer(*frame, 1);
+            if (ret < 0) {
+                LOGE("Could not allocate the video frame data\n");
+                return -1;
+            }
+            /* 生成黑色背景 */
+            /* Y */
+            for (int y = 0; y < (*frame)->height; y++) {
+                for (int x = 0; x < (*frame)->width; x++) {
+                    (*frame)->data[0][y * (*frame)->linesize[0] + x] = 0;
+                }
+            }
+            /* Cb and Cr */
+            for (int y = 0; y < (*frame)->height / 2; y++) {
+                for (int x = 0; x < (*frame)->width / 2; x++) {
+                    (*frame)->data[1][y * (*frame)->linesize[1] + x] = 0x80;
+                    (*frame)->data[2][y * (*frame)->linesize[2] + x] = 0x80;
+                }
+            }
+        }
+    }
+    return 0;
+}
+
+static int bf_frame_2_uint8_ptr(enum AVMediaType type,AVFrame *in,uint8_t**out_data,int format){
+    int sampleSize = 0;
+    int audioDataSize = 0;
+    int offset = 0;
+    int i = 0;
+    int ch = 0;
+    enum AVSampleFormat sampleFormat = (enum AVSampleFormat)format;
+    sampleSize = av_get_bytes_per_sample(sampleFormat);
+    if (sampleSize < 0) {
+        LOGE("Failed to calculate data size.");
+        return -1;
+    }
+    audioDataSize = in->nb_samples * in->channels * sampleSize;
+    int size = av_samples_get_buffer_size(NULL, av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
+                                          in->nb_samples,
+                                          sampleFormat, 0);
+    *out_data = (uint8_t *) av_mallocz(audioDataSize);
+    if (!(*out_data))return -1;
+
+    if (av_sample_fmt_is_planar(sampleFormat)) {//如果是平面的
+        for (i = 0; i < in->nb_samples; i++) {
+            for (ch = 0; ch < in->channels; ch++) {
+                memcpy((*out_data) + offset, in->data[ch] + sampleSize * i, sampleSize);
+                offset += sampleSize;
+            }
+        }
+    } else {
+        memcpy(*out_data, in->data[0], audioDataSize);
+    }
+    return audioDataSize;
+}
+
+
+#endif //JNI_WEB_TOOLS_H

+ 9 - 0
src/opencv/CMakeLists.txt

@@ -0,0 +1,9 @@
+set(Project_SRC
+        ${Project_SRC}
+        src/opencv/image_blur_detection.cpp
+        src/opencv/image_blur_detection.h
+        src/opencv/image_fingerprint.cpp
+        src/opencv/image_fingerprint.h
+        PARENT_SCOPE
+        )
+

+ 0 - 0
src/image_blur_detection.cpp → src/opencv/image_blur_detection.cpp


+ 0 - 0
src/image_blur_detection.h → src/opencv/image_blur_detection.h


+ 74 - 0
src/opencv/image_fingerprint.cpp

@@ -0,0 +1,74 @@
+//
+// Created by 阳坤 on 2022/3/2.
+//
+
+
+#include "image_fingerprint.h"
+
+//AVFrame 转 cv::mat
+cv::Mat avframeToCvmat(const AVFrame *frame) {
+    int width = frame->width;
+    int height = frame->height;
+    cv::Mat image(height, width, CV_8UC3);
+    int cvLinesizes[1];
+    cvLinesizes[0] = image.step1();
+    SwsContext *conversion = sws_getContext(width, height, (AVPixelFormat) frame->format, width, height,
+                                            AVPixelFormat::AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+    sws_scale(conversion, frame->data, frame->linesize, 0, height, &image.data, cvLinesizes);
+    sws_freeContext(conversion);
+    return image;
+}
+
+//cv::Mat 转 AVFrame
+AVFrame *cvmatToAvframe(cv::Mat *image, AVFrame *frame) {
+    int width = image->cols;
+    int height = image->rows;
+    int cvLinesizes[1];
+    cvLinesizes[0] = image->step1();
+    if (frame == NULL) {
+        frame = av_frame_alloc();
+        av_image_alloc(frame->data, frame->linesize, width, height, AVPixelFormat::AV_PIX_FMT_YUV420P, 1);
+    }
+    SwsContext *conversion = sws_getContext(width, height, AVPixelFormat::AV_PIX_FMT_BGR24, width, height,
+                                            (AVPixelFormat) frame->format, SWS_FAST_BILINEAR, NULL, NULL, NULL);
+    sws_scale(conversion, &image->data, cvLinesizes, 0, height, frame->data, frame->linesize);
+    sws_freeContext(conversion);
+    return frame;
+}
+
+const char *fingerprintFromFFAVFrame(AVFrame *frame, int *len) {
+    if (!frame)return NULL;
+    auto img = avframeToCvmat(frame);
+    if (img.empty()) {
+        printf("image is empty.");
+        return NULL;
+    }
+    std::string str = frame->pts + "";
+    imshow(str, img);
+    int scale_width = 8, scale_height = 8;
+    Mat gray, res;
+    //缩放成8x8大小灰度图
+    resize(img, res, Size(scale_width, scale_height));
+    cvtColor(res, gray, COLOR_BGR2GRAY);
+    //获取灰度平均值
+    double mn = mean(gray)[0];
+    char *buf = (char *) malloc(sizeof(char) * scale_width * scale_height);
+    //得到图像指纹值
+    for (int i = 0; i < 8; i++) {
+        for (int j = 0; j < 8; j++) {
+            buf[i * 8 + j] = (gray.at<unsigned char>(i, j) > mn) ? 1 : 0;
+        }
+    }
+    *len = scale_width * scale_height;
+    return buf;
+}
+
+float fingerprint_compare(char *arr, char *arr2, int len) {
+    int size = len;
+    int sim_sum = 0;
+    for (int i = 0; i < size; ++i) {
+        if (arr[i] == arr2[i])
+            sim_sum++;
+    }
+    return sim_sum * 1.0 / size;
+}

+ 33 - 0
src/opencv/image_fingerprint.h

@@ -0,0 +1,33 @@
+//
+// Created by 阳坤 on 2022/3/2.
+//
+
+
+#ifndef BYTESFLOW_OPENCV_MEDIA_IMAGE_FINGERPRINT_H
+#define BYTESFLOW_OPENCV_MEDIA_IMAGE_FINGERPRINT_H
+
+
+extern "C" {
+#include "libswscale/swscale.h"
+#include <libavutil/frame.h>
+#include <libavutil/imgutils.h>
+}
+
+#include "opencv2/core/core.hpp"
+#include "opencv2/highgui/highgui.hpp"
+#include "opencv2/imgproc/imgproc.hpp"
+
+using namespace cv;
+
+
+cv::Mat avframeToCvmat(const AVFrame * frame);
+AVFrame* cvmatToAvframe(cv::Mat* image, AVFrame * frame);
+
+
+const char *fingerprintFromFFAVFrame(AVFrame *frame, int *pInt);
+
+float fingerprint_compare(char * arr,char * arr2,int len);
+
+char *fingerprintFromImagePath(const char *path);
+
+#endif //BYTESFLOW_OPENCV_MEDIA_IMAGE_FINGERPRINT_H

+ 8 - 0
src/utils/CMakeLists.txt

@@ -0,0 +1,8 @@
+set(UTILS_SRC ${UTILS_SRC}
+        src/utils/md5.h
+        src/utils/md5.c
+        src/utils/video_similarity.cpp
+        src/utils/video_similarity.h
+        src/utils/pq_blockingqueue.hpp
+        PARENT_SCOPE
+        )

+ 200 - 0
src/utils/md5.c

@@ -0,0 +1,200 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+
+#include "md5.h"
+
+unsigned char PADDING[] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                           0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                           0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                           0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+void MD5Init(MD5_CTX *context) {
+    context->count[0] = 0;
+    context->count[1] = 0;
+    context->state[0] = 0x67452301;
+    context->state[1] = 0xEFCDAB89;
+    context->state[2] = 0x98BADCFE;
+    context->state[3] = 0x10325476;
+}
+
+void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputlen) {
+    unsigned int i = 0, index = 0, partlen = 0;
+    index = (context->count[0] >> 3) & 0x3F;
+    partlen = 64 - index;
+    context->count[0] += inputlen << 3;
+    if (context->count[0] < (inputlen << 3))
+        context->count[1]++;
+    context->count[1] += inputlen >> 29;
+
+    if (inputlen >= partlen) {
+        memcpy(&context->buffer[index], input, partlen);
+        MD5Transform(context->state, context->buffer);
+        for (i = partlen; i + 64 <= inputlen; i += 64)
+            MD5Transform(context->state, &input[i]);
+        index = 0;
+    } else {
+        i = 0;
+    }
+    memcpy(&context->buffer[index], &input[i], inputlen - i);
+}
+
+void MD5Final(MD5_CTX *context, unsigned char digest[16]) {
+    unsigned int index = 0, padlen = 0;
+    unsigned char bits[8];
+    index = (context->count[0] >> 3) & 0x3F;
+    padlen = (index < 56) ? (56 - index) : (120 - index);
+    MD5Encode(bits, context->count, 8);
+    MD5Update(context, PADDING, padlen);
+    MD5Update(context, bits, 8);
+    MD5Encode(digest, context->state, 16);
+}
+
+void MD5Encode(unsigned char *output, unsigned int *input, unsigned int len) {
+    unsigned int i = 0, j = 0;
+    while (j < len) {
+        output[j] = input[i] & 0xFF;
+        output[j + 1] = (input[i] >> 8) & 0xFF;
+        output[j + 2] = (input[i] >> 16) & 0xFF;
+        output[j + 3] = (input[i] >> 24) & 0xFF;
+        i++;
+        j += 4;
+    }
+}
+
+void MD5Decode(unsigned int *output, unsigned char *input, unsigned int len) {
+    unsigned int i = 0, j = 0;
+    while (j < len) {
+        output[i] = (input[j]) |
+                    (input[j + 1] << 8) |
+                    (input[j + 2] << 16) |
+                    (input[j + 3] << 24);
+        i++;
+        j += 4;
+    }
+}
+
+void MD5Transform(unsigned int state[4], unsigned char block[64]) {
+    unsigned int a = state[0];
+    unsigned int b = state[1];
+    unsigned int c = state[2];
+    unsigned int d = state[3];
+    unsigned int x[64];
+    MD5Decode(x, block, 64);
+    FF(a, b, c, d, x[0], 7, 0xd76aa478); /* 1 */
+    FF(d, a, b, c, x[1], 12, 0xe8c7b756); /* 2 */
+    FF(c, d, a, b, x[2], 17, 0x242070db); /* 3 */
+    FF(b, c, d, a, x[3], 22, 0xc1bdceee); /* 4 */
+    FF(a, b, c, d, x[4], 7, 0xf57c0faf); /* 5 */
+    FF(d, a, b, c, x[5], 12, 0x4787c62a); /* 6 */
+    FF(c, d, a, b, x[6], 17, 0xa8304613); /* 7 */
+    FF(b, c, d, a, x[7], 22, 0xfd469501); /* 8 */
+    FF(a, b, c, d, x[8], 7, 0x698098d8); /* 9 */
+    FF(d, a, b, c, x[9], 12, 0x8b44f7af); /* 10 */
+    FF(c, d, a, b, x[10], 17, 0xffff5bb1); /* 11 */
+    FF(b, c, d, a, x[11], 22, 0x895cd7be); /* 12 */
+    FF(a, b, c, d, x[12], 7, 0x6b901122); /* 13 */
+    FF(d, a, b, c, x[13], 12, 0xfd987193); /* 14 */
+    FF(c, d, a, b, x[14], 17, 0xa679438e); /* 15 */
+    FF(b, c, d, a, x[15], 22, 0x49b40821); /* 16 */
+
+    /* Round 2 */
+    GG(a, b, c, d, x[1], 5, 0xf61e2562); /* 17 */
+    GG(d, a, b, c, x[6], 9, 0xc040b340); /* 18 */
+    GG(c, d, a, b, x[11], 14, 0x265e5a51); /* 19 */
+    GG(b, c, d, a, x[0], 20, 0xe9b6c7aa); /* 20 */
+    GG(a, b, c, d, x[5], 5, 0xd62f105d); /* 21 */
+    GG(d, a, b, c, x[10], 9, 0x2441453); /* 22 */
+    GG(c, d, a, b, x[15], 14, 0xd8a1e681); /* 23 */
+    GG(b, c, d, a, x[4], 20, 0xe7d3fbc8); /* 24 */
+    GG(a, b, c, d, x[9], 5, 0x21e1cde6); /* 25 */
+    GG(d, a, b, c, x[14], 9, 0xc33707d6); /* 26 */
+    GG(c, d, a, b, x[3], 14, 0xf4d50d87); /* 27 */
+    GG(b, c, d, a, x[8], 20, 0x455a14ed); /* 28 */
+    GG(a, b, c, d, x[13], 5, 0xa9e3e905); /* 29 */
+    GG(d, a, b, c, x[2], 9, 0xfcefa3f8); /* 30 */
+    GG(c, d, a, b, x[7], 14, 0x676f02d9); /* 31 */
+    GG(b, c, d, a, x[12], 20, 0x8d2a4c8a); /* 32 */
+
+    /* Round 3 */
+    HH(a, b, c, d, x[5], 4, 0xfffa3942); /* 33 */
+    HH(d, a, b, c, x[8], 11, 0x8771f681); /* 34 */
+    HH(c, d, a, b, x[11], 16, 0x6d9d6122); /* 35 */
+    HH(b, c, d, a, x[14], 23, 0xfde5380c); /* 36 */
+    HH(a, b, c, d, x[1], 4, 0xa4beea44); /* 37 */
+    HH(d, a, b, c, x[4], 11, 0x4bdecfa9); /* 38 */
+    HH(c, d, a, b, x[7], 16, 0xf6bb4b60); /* 39 */
+    HH(b, c, d, a, x[10], 23, 0xbebfbc70); /* 40 */
+    HH(a, b, c, d, x[13], 4, 0x289b7ec6); /* 41 */
+    HH(d, a, b, c, x[0], 11, 0xeaa127fa); /* 42 */
+    HH(c, d, a, b, x[3], 16, 0xd4ef3085); /* 43 */
+    HH(b, c, d, a, x[6], 23, 0x4881d05); /* 44 */
+    HH(a, b, c, d, x[9], 4, 0xd9d4d039); /* 45 */
+    HH(d, a, b, c, x[12], 11, 0xe6db99e5); /* 46 */
+    HH(c, d, a, b, x[15], 16, 0x1fa27cf8); /* 47 */
+    HH(b, c, d, a, x[2], 23, 0xc4ac5665); /* 48 */
+
+    /* Round 4 */
+    II(a, b, c, d, x[0], 6, 0xf4292244); /* 49 */
+    II(d, a, b, c, x[7], 10, 0x432aff97); /* 50 */
+    II(c, d, a, b, x[14], 15, 0xab9423a7); /* 51 */
+    II(b, c, d, a, x[5], 21, 0xfc93a039); /* 52 */
+    II(a, b, c, d, x[12], 6, 0x655b59c3); /* 53 */
+    II(d, a, b, c, x[3], 10, 0x8f0ccc92); /* 54 */
+    II(c, d, a, b, x[10], 15, 0xffeff47d); /* 55 */
+    II(b, c, d, a, x[1], 21, 0x85845dd1); /* 56 */
+    II(a, b, c, d, x[8], 6, 0x6fa87e4f); /* 57 */
+    II(d, a, b, c, x[15], 10, 0xfe2ce6e0); /* 58 */
+    II(c, d, a, b, x[6], 15, 0xa3014314); /* 59 */
+    II(b, c, d, a, x[13], 21, 0x4e0811a1); /* 60 */
+    II(a, b, c, d, x[4], 6, 0xf7537e82); /* 61 */
+    II(d, a, b, c, x[11], 10, 0xbd3af235); /* 62 */
+    II(c, d, a, b, x[2], 15, 0x2ad7d2bb); /* 63 */
+    II(b, c, d, a, x[9], 21, 0xeb86d391); /* 64 */
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+}
+
+int get_file_md5(const char *filename, char *dest) {
+    int i;
+    int filelen = 0;
+    int read_len;
+    char temp[8] = {0};
+    char hexbuf[128] = {0};
+    unsigned char decrypt[16] = {0};
+    unsigned char decrypt32[64] = {0};
+    MD5_CTX md5;
+    char fw_path[128];
+    int fdf;
+    fdf = open(filename, O_RDONLY);
+    if (fdf < 0) {
+        printf("%s not exist\n", filename);
+        return -1;
+    }
+    MD5Init(&md5);
+    while (1) {
+        read_len = read(fdf, hexbuf, sizeof(hexbuf));
+        if (read_len < 0) {
+            close(fdf);
+            return -1;
+        }
+        if (read_len == 0) {
+            break;
+        }
+        filelen += read_len;
+        MD5Update(&md5, (unsigned char *) hexbuf, read_len);
+    }
+
+    MD5Final(&md5, decrypt);
+    strcpy((char *) decrypt32, "");
+    for (i = 0; i < 16; i++) {
+        sprintf(temp, "%02x", decrypt[i]);
+        strcat((char *) decrypt32, temp);
+    }
+    strcpy(dest, decrypt32);
+    close(fdf);
+    return filelen;
+}

+ 97 - 0
src/utils/md5.h

@@ -0,0 +1,97 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//参考:https://cloud.tencent.com/developer/article/1806329
+//
+
+#ifndef BYTESFLOW_OPENCV_MEDIA_MD5_H
+#define BYTESFLOW_OPENCV_MEDIA_MD5_H
+
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+typedef struct {
+    unsigned int count[2];
+    unsigned int state[4];
+    unsigned char buffer[64];
+} MD5_CTX;
+
+
+#define F(x, y, z) ((x & y) | (~x & z))
+#define G(x, y, z) ((x & z) | (y & ~z))
+#define H(x, y, z) (x^y^z)
+#define I(x, y, z) (y ^ (x | ~z))
+#define ROTATE_LEFT(x, n) ((x << n) | (x >> (32-n)))
+#define FF(a, b, c, d, x, s, ac) \
+          { \
+          a += F(b,c,d) + x + ac; \
+          a = ROTATE_LEFT(a,s); \
+          a += b; \
+          }
+#define GG(a, b, c, d, x, s, ac) \
+          { \
+          a += G(b,c,d) + x + ac; \
+          a = ROTATE_LEFT(a,s); \
+          a += b; \
+          }
+#define HH(a, b, c, d, x, s, ac) \
+          { \
+          a += H(b,c,d) + x + ac; \
+          a = ROTATE_LEFT(a,s); \
+          a += b; \
+          }
+#define II(a, b, c, d, x, s, ac) \
+          { \
+          a += I(b,c,d) + x + ac; \
+          a = ROTATE_LEFT(a,s); \
+          a += b; \
+          }
+
+/********************************************************
+* 名    称: MD5Init()
+* 功    能: 初始化MD5结构体
+* 入口参数:
+   context:要初始化的MD5结构体
+* 出口参数: 无
+*********************************************************/
+void MD5Init(MD5_CTX *context);
+
+/*********************************************************
+* 名    称: MD5Update()
+* 功    能: 将要加密的信息传递给初始化过的MD5结构体,无返回值
+* 入口参数:
+   context:初始化过了的MD5结构体
+    input:需要加密的信息,可以任意长度
+    inputLen:指定input的长度
+* 出口参数: 无
+*********************************************************/
+void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputlen);
+/*********************************************************
+* 名    称: MD5Update()
+* 功    能: 将加密结果存储到,无返回值
+* 入口参数:
+   context:初始化过了的MD5结构体
+    digest :加密过的结果
+* 出口参数: 无
+*********************************************************/
+void MD5Final(MD5_CTX *context, unsigned char digest[16]);
+
+void MD5Transform(unsigned int state[4], unsigned char block[64]);
+
+void MD5Encode(unsigned char *output, unsigned int *input, unsigned int len);
+
+void MD5Decode(unsigned int *output, unsigned char *input, unsigned int len);
+
+/**
+ * 在线验证:http://www.metools.info/other/o21.html
+ * 获取文件的 md5 值
+ * @param filepath
+ * @param dst
+ */
+int get_file_md5(const char * filepath,char *dst);
+
+#endif //BYTESFLOW_OPENCV_MEDIA_MD5_H

+ 159 - 0
src/utils/pq_blockingqueue.hpp

@@ -0,0 +1,159 @@
+//
+// Created by devyk on 2021/10/4.
+//
+
+#ifndef BITSTREAMANALYZE_PQ_BLOCKINGQUEUE_HPP
+#define BITSTREAMANALYZE_PQ_BLOCKINGQUEUE_HPP
+#include <queue>
+#include <mutex>
+#include <condition_variable>
+
+#include <mutex>
+#include <deque>
+#include <condition_variable>
+
+template<typename T>
+class BlockQueue {
+public:
+    explicit BlockQueue (int MaxSize = 1000);
+    ~BlockQueue();
+
+    bool Empty();
+    bool Full();
+    int Size();
+    int Capacity();
+
+    T Front();
+    T Back();
+    void PushFront(const T &item);
+    void PushBack(const T &item);
+    bool PopFront(T &item);
+    bool PopBack(T &item);
+    void Flush();
+    void Close();
+
+private:
+    bool m_isClose;
+    int m_maxSize;
+    std::deque<T> m_deque;
+    std::mutex m_mutex;
+    std::condition_variable m_noFullCondVar;
+    std::condition_variable m_noEmptyCondVar;
+};
+
+template<typename T>
+BlockQueue<T>::BlockQueue(int MaxSize) :
+        m_isClose(false),
+        m_maxSize(MaxSize)
+{
+}
+
+template<typename T>
+BlockQueue<T>::~BlockQueue() {
+    Close();
+}
+
+template<typename T>
+void BlockQueue<T>::Close() {
+    {
+        std::unique_lock<std::mutex> locker(m_mutex);
+        m_deque.clear();
+        m_deque.shrink_to_fit();
+        m_isClose = true;
+    }
+    m_noFullCondVar.notify_all();
+    m_noEmptyCondVar.notify_all();
+}
+
+template<typename T>
+bool BlockQueue<T>::Empty() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_deque.empty();
+}
+
+template<typename T>
+bool BlockQueue<T>::Full() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_deque.empty() >= m_maxSize;
+}
+
+
+template<typename T>
+int BlockQueue<T>::Size() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_deque.size();
+}
+
+template<typename T>
+int BlockQueue<T>::Capacity() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_maxSize;
+}
+
+template<typename T>
+T BlockQueue<T>::Front() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_deque.front();
+}
+
+template<typename T>
+T BlockQueue<T>::Back() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    return m_deque.back();
+}
+
+template<typename T>
+void BlockQueue<T>::PushFront(const T &item) {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    while (m_deque.size() >= m_maxSize) {
+        m_noFullCondVar.wait(locker);
+    }
+    m_deque.emplace_front(item);
+    m_noEmptyCondVar.notify_one();
+}
+
+template<typename T>
+void BlockQueue<T>::PushBack(const T &item) {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    while (m_deque.size() >= m_maxSize) {
+        m_noFullCondVar.wait(locker);
+    }
+    m_deque.emplace_back(item);
+    m_noEmptyCondVar.notify_one();
+}
+
+template<typename T>
+bool BlockQueue<T>::PopFront(T &item) {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    while (m_deque.empty()) {
+        m_noEmptyCondVar.wait(locker);
+        if (m_isClose) return false;
+    }
+
+    item = m_deque.front();
+    m_deque.pop_front();
+    m_noFullCondVar.notify_one();
+    return true;
+}
+
+template<typename T>
+bool BlockQueue<T>::PopBack(T &item) {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    while (m_deque.empty()) {
+        m_noEmptyCondVar.wait(locker);
+        if (m_isClose) return false;
+    }
+
+    item = m_deque.back();
+    m_deque.pop_back();
+    m_noFullCondVar.notify_one();
+    return true;
+}
+
+template<typename T>
+void BlockQueue<T>::Flush() {
+    std::unique_lock<std::mutex> locker(m_mutex);
+    m_noEmptyCondVar.notify_one();
+}
+
+#endif //BITSTREAMANALYZE_PQ_BLOCKINGQUEUE_HPP

+ 65 - 0
src/utils/video_similarity.cpp

@@ -0,0 +1,65 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+#include "video_similarity.h"
+
+
+long video_similarity_detection_init(const char *url) {
+    VideoSimilarityContext *ctx = (VideoSimilarityContext *) malloc(sizeof(VideoSimilarityContext));
+    if (!ctx)return -1;
+    memset(ctx, 0, sizeof(VideoSimilarityContext));
+    ctx->video_path = strdup(url);
+    return (long) ctx;
+}
+
+
+int video_similarity_detection_start(long id, int force_keyframe, int frame_count, DisableMediaType disableMediaType,
+                                     std::vector<VideoSimilarityModel *> &lists) {
+    if (id <= 0)return -1;
+    auto *ctx = (VideoSimilarityContext *) id;
+    long d_id = initDecoder(ctx->video_path, force_keyframe, frame_count, disableMediaType);
+    ctx->decode_obj_id = d_id;
+    if (ctx->decode_obj_id <= 0)return ctx->decode_obj_id;
+    int exit = 0, ret = 0;
+    av_read_decode_frame(ctx->decode_obj_id);
+    auto *dctx = (DecoderContext *) ctx->decode_obj_id;
+    while (!exit) {
+        AVFrame *video_frame = NULL;
+        dctx->video_queue->PopFront(video_frame);
+        if (video_frame == NULL) {
+            LOGE("read video frame end.");
+            break;
+        }
+
+        int len = 0;
+        const char *image_hash = fingerprintFromFFAVFrame(video_frame, &len);
+        auto model = new VideoSimilarityModel();
+        model->pts = video_frame->pts;
+        model->img_len = len;
+        if (len > 0) {
+            model->image_hash = static_cast<const char *>(malloc(sizeof(char) * len));
+            memcpy((void*)model->image_hash, image_hash, len);
+        }
+        model->pict_type = video_frame->pict_type;
+        lists.push_back(model);
+        if (image_hash)
+            free((void *) image_hash);
+        av_frame_free(&video_frame);
+    }
+    return 0;
+}
+
+int video_similarity_detection_close(long id) {
+    if (id <= 0)return -1;
+    auto *ctx = (VideoSimilarityContext *) id;
+
+    if (ctx->video_path) {
+        free((void *) ctx->video_path);
+        ctx->video_path = NULL;
+    }
+    close_decoder(ctx->decode_obj_id);
+    free(ctx);
+    ctx = NULL;
+    return 0;
+}

+ 55 - 0
src/utils/video_similarity.h

@@ -0,0 +1,55 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+#include "../ffmpeg/av_decode.h"
+#include "../opencv/image_fingerprint.h"
+
+#ifndef BYTESFLOW_OPENCV_MEDIA_VIDEO_SIMILARITY_H
+#define BYTESFLOW_OPENCV_MEDIA_VIDEO_SIMILARITY_H
+
+/**
+ * 定义一个视频相似上下文结构体
+ */
+typedef struct VideoSimilarityContext {
+    /*视频路径*/
+    const char *video_path;
+    /*强制使用关键帧检查*/
+    int force_keyframe;
+    /*检查帧数量*/
+    int frame_count;
+    /*解码器 id*/
+    long decode_obj_id;
+} VideoSimilarityContext;
+
+typedef struct VideoSimilarityModel{
+    /*图片指纹个数*/
+    int img_len;
+    /*帧对应的指纹*/
+    const char *image_hash;
+    int pict_type;
+    long pts;
+};
+
+
+/**
+ * 视频相似检测初始化
+ * @param url 视频路径
+ * @return 对象 id
+ */
+long video_similarity_detection_init(const char *url);
+
+/**
+ * 开始分析
+ * @param force_keyframe 强制关键帧
+ * @param frame_count 解码帧的数量
+ * */
+int video_similarity_detection_start(long id,int force_keyframe, int frame_count,DisableMediaType disableMediaType,std::vector<VideoSimilarityModel*> &lists);
+
+/**
+ * 关闭
+ * @return
+ */
+int video_similarity_detection_close(long id);
+
+#endif //BYTESFLOW_OPENCV_MEDIA_VIDEO_SIMILARITY_H

+ 42 - 0
src/video_similarity_comparison.cpp

@@ -0,0 +1,42 @@
+//
+// Created by 阳坤 on 2022/3/1.
+//
+
+#include "utils/video_similarity.h"
+
+/**
+ * 1、判断比对文件的 MD5 是否相等
+ *  1.1、如果相等,
+ *   1.1.1、判断媒体数据 mate 信息是否一致,如果一致,判断前面 N 关键帧的指纹码
+ *  1.2、如果不等
+ *   1.2.1、判断媒体数据 mate 信息是否一致, 如果一致,判断前面 N 关键帧的指纹码
+ * @param argc
+ * @param argv
+ * @return
+ */
+int main(int argc, char *argv[]) {
+    const char *filepath = "/Users/devyk/Data/Project/piaoquan/PQMedia/temp/4.mp4";
+    long id = video_similarity_detection_init(filepath);
+    std::vector<VideoSimilarityModel *> lists;
+    int ret = video_similarity_detection_start(id, 1, -1, AUDIO, lists);
+    for (int i = 0; i < lists.size(); ++i) {
+        auto model = lists[i];
+        int len = model->img_len;
+        const char *image_hash = model->image_hash;
+        for (int i = 0; i < len; ++i) {
+            printf("%d ", image_hash[i]);
+        }
+        printf("\n");
+        LOGE("frame-video I-Frame=%d pts=%lld\n", model->pict_type == AV_PICTURE_TYPE_I,
+             model->pts);
+        if (image_hash)
+            free((void *) image_hash);
+        delete model;
+    }
+    lists.clear();
+    lists.shrink_to_fit();
+
+    video_similarity_detection_close(id);
+    return 0;
+}
+