video_similarity.cpp 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. //
  2. // Created by 阳坤 on 2022/3/1.
  3. //
  4. #include <json/reader.h>
  5. #include <json/writer.h>
  6. #include <sys/stat.h>
  7. #include <signal.h>
  8. #include <execinfo.h>
  9. #include "video_similarity.h"
  10. const int MAX_STACK_FRAMES = 128;
  11. void sig_crash(int sig) {
  12. printf("crash---->sig=%d \n", sig);
  13. FILE *fd;
  14. struct stat buf;
  15. const char *crash_path = "./video_similarity_comparison_crash.txt";
  16. stat(crash_path, &buf);
  17. if (buf.st_size > 10 * 1000 * 1000) { // 超过10兆则清空内容
  18. fd = fopen(crash_path, "w");
  19. } else {
  20. fd = fopen(crash_path, "at");
  21. }
  22. if (NULL == fd) {
  23. exit(0);
  24. }
  25. try {
  26. char szLine[512] = {0,};
  27. time_t t = time(NULL);
  28. tm *now = localtime(&t);
  29. int nLen1 = sprintf(szLine,
  30. "#########################################################\n[%04d-%02d-%02d %02d:%02d:%02d][crash signal number:%d]\n",
  31. now->tm_year + 1900,
  32. now->tm_mon + 1,
  33. now->tm_mday,
  34. now->tm_hour,
  35. now->tm_min,
  36. now->tm_sec,
  37. sig);
  38. printf("crash---->%s \n", szLine);
  39. fwrite(szLine, 1, strlen(szLine), fd);
  40. #ifdef __linux
  41. void* array[MAX_STACK_FRAMES];
  42. size_t size = 0;
  43. char** strings = NULL;
  44. size_t i, j;
  45. signal(sig, SIG_DFL);
  46. size = backtrace(array, MAX_STACK_FRAMES);
  47. strings = (char**)backtrace_symbols(array, size);
  48. //fprintf(stderr, "oncrash;\n");
  49. for (i = 0; i < size; ++i)
  50. {
  51. char szLine[512] = {0, };
  52. sprintf(szLine, "%d %s\n", i, strings[i]);
  53. fwrite(szLine, 1, strlen(szLine), fd);
  54. std::string symbol(strings[i]);
  55. size_t pos1 = symbol.find_first_of("[");
  56. size_t pos2 = symbol.find_last_of("]");
  57. std::string address = symbol.substr(pos1 + 1, pos2 - pos1 -1);
  58. char cmd[128] = {0, };
  59. sprintf(cmd, "addr2line -e /home/libpiaoquan_java_opencv.so %s", address.c_str()); // test为应用程序名称,需要改为用户自己的应用程序名
  60. FILE *fPipe = popen(cmd, "r");
  61. if(fPipe != NULL){
  62. char buff[1024];
  63. memset(buff, 0, sizeof(buff));
  64. char* ret = fgets(buff, sizeof(buff), fPipe);
  65. pclose(fPipe);
  66. fwrite(ret, 1, strlen(ret), fd);
  67. }
  68. }
  69. free(strings);
  70. #endif // __linux
  71. } catch (...) {
  72. //
  73. }
  74. signal(sig, SIG_DFL);
  75. fflush(fd);
  76. fclose(fd);
  77. fd = NULL;
  78. exit(0);
  79. }
  80. long video_similarity_detection_init(const char *url) {
  81. auto *ctx = (VideoSimilarityContext *) malloc(sizeof(VideoSimilarityContext));
  82. if (!ctx)return -1;
  83. memset(ctx, 0, sizeof(VideoSimilarityContext));
  84. ctx->video_path = strdup(url);
  85. return (long) ctx;
  86. }
  87. static const char *GetFileName(const char *ptr, int n) {
  88. int i = n; //这里i只是为循环即使终止了也未找到/而准备
  89. ptr += n; //把指针移到字符串的尾部,即'\0'处
  90. while (i-- > 0) {
  91. if ((*ptr--) == '/') //指针不断回移并判断是否为/符号
  92. {
  93. break; //从后向前遇到第一个/后退出循环
  94. }
  95. }
  96. ptr += 2;
  97. return ptr; //反回最后一个/后面的字符串即名称
  98. }
  99. int video_similarity_detection_start(long id, int force_keyframe,
  100. DisableMediaType disableMediaType,
  101. std::vector<VideoSimilarityModel *> *lists) {
  102. if (id <= 0)return -1;
  103. auto *ctx = (VideoSimilarityContext *) id;
  104. long d_id = initDecoder(ctx->video_path, force_keyframe, disableMediaType);
  105. ctx->decode_obj_id = d_id;
  106. if (ctx->decode_obj_id <= 0)return ctx->decode_obj_id;
  107. int exit = 0, ret = 0;
  108. av_read_decode_frame(ctx->decode_obj_id);
  109. auto *dctx = (DecoderContext *) ctx->decode_obj_id;
  110. dctx->vs_decodes = *lists;
  111. int index = 0;
  112. auto endTimeMs = (*lists)[index]->endTimeMs;
  113. while (!exit) {
  114. AVFrame *video_frame = NULL;
  115. dctx->video_queue->PopFront(video_frame);
  116. if (video_frame == NULL) {
  117. LOGE("read video frame end.");
  118. break;
  119. }
  120. int len = 0;
  121. if (ctx->log) fprintf(ctx->log, "fingerprintFromFFAVFrame 1\n");
  122. const char *image_hash = fingerprintFromFFAVFrame(ctx->log, video_frame, &len);
  123. // char image_hash[64] = {0};
  124. // if (1) {
  125. // for (int i = 0; i < 64; ++i) {
  126. // image_hash[i] = 1;
  127. // }
  128. // len = 64;
  129. // }
  130. if (ctx->log) fprintf(ctx->log, "fingerprintFromFFAVFrame exit \n");
  131. if (len > 64) {
  132. LOGE("size=%d \n", len);
  133. }
  134. auto *model = (ImageHashModel *) malloc(sizeof(ImageHashModel));
  135. memset(model, 0, sizeof(ImageHashModel));
  136. model->img_len = len;
  137. if (len > 0) {
  138. model->image_hash = static_cast<char *>(malloc(sizeof(char) * len+1));
  139. memset((void *) model->image_hash, '\0', sizeof(char) * len+1);
  140. string append_array;
  141. for (int i = 0; i < len; ++i) {
  142. char c = image_hash[i];
  143. if (c == 0) {
  144. append_array.append("0");
  145. } else if (c == 1) {
  146. append_array.append("1");
  147. }
  148. }
  149. append_array.append("\0");
  150. memcpy((char*)model->image_hash, append_array.c_str(), strlen(append_array.c_str()));
  151. }
  152. if (video_frame->pts >= endTimeMs && (*lists).size() > 1 && index + 1 < (*lists).size()) {
  153. index++;
  154. endTimeMs = (*lists)[index]->endTimeMs;
  155. }
  156. model->pts = video_frame->pts;
  157. // LOGE("push pts >>>>= %lld \n", model->pts);
  158. (*lists)[index]->hashs.push_back(model);
  159. if (ctx->log)fprintf(ctx->log, "image_hash \n");
  160. if (ctx->log) fprintf(ctx->log, "av_frame_free\n");
  161. av_frame_free(&video_frame);
  162. if (ctx->log) fprintf(ctx->log, "av_frame_free 2\n");
  163. }
  164. if (ctx->log) fprintf(ctx->log, "loop exit \n");
  165. return 0;
  166. }
  167. int video_similarity_detection_close(long id) {
  168. if (id <= 0)return -1;
  169. auto *ctx = (VideoSimilarityContext *) id;
  170. if (ctx->video_path) {
  171. free((void *) ctx->video_path);
  172. ctx->video_path = NULL;
  173. }
  174. if (ctx->log) {
  175. fclose(ctx->log);
  176. ctx->log = NULL;
  177. }
  178. close_decoder(ctx->decode_obj_id);
  179. free(ctx);
  180. ctx = NULL;
  181. return 0;
  182. }
  183. VideoSimilarity *json2VideoSimilarity(const char *json) {
  184. VideoSimilarity *videoSimilarity;
  185. videoSimilarity = new VideoSimilarity();
  186. Json::Reader reader;
  187. Json::Value value;
  188. if (reader.parse(json, value)) {
  189. Json::Value json_obj = value;
  190. if (json_obj.isMember("videoPath"))
  191. videoSimilarity->videoPath = strdup(json_obj["videoPath"].asString().c_str());
  192. if (json_obj.isMember("clips") && json_obj["clips"].type() == Json::arrayValue) {
  193. for (int i = 0; i < json_obj["clips"].size(); ++i) {
  194. auto *videoSim = (VideoSimilarityModel *) malloc(sizeof(VideoSimilarityModel));
  195. memset(videoSim, 0, sizeof(VideoSimilarityModel));
  196. videoSim->startTimeMs = json_obj["clips"][i]["startTimeMs"].asInt();
  197. videoSim->endTimeMs = json_obj["clips"][i]["endTimeMs"].asInt();
  198. videoSimilarity->clips.push_back(videoSim);
  199. }
  200. }
  201. }
  202. return videoSimilarity;
  203. }
  204. const char *videoSimilarity2json(VideoSimilarity *videoSimilarity) {
  205. if (!videoSimilarity)return NULL;
  206. Json::Value obj;
  207. Json::Value array;
  208. Json::StreamWriterBuilder builder;
  209. if (videoSimilarity->videoPath) {
  210. obj["videoPath"] = videoSimilarity->videoPath;
  211. free((void *) videoSimilarity->videoPath);
  212. videoSimilarity->videoPath = NULL;
  213. }
  214. for (auto it = videoSimilarity->clips.begin();
  215. it != videoSimilarity->clips.end();) {
  216. auto item = *it;
  217. Json::Value v;
  218. Json::Value v_array;
  219. v["startTimeMs"] = item->startTimeMs;
  220. v["endTimeMs"] = item->endTimeMs;
  221. for (auto it2 = item->hashs.begin();
  222. it2 != item->hashs.end();) {
  223. ImageHashModel *item2 = *it2;
  224. Json::Value va;
  225. if (item->hashs.size() <= 1499) {
  226. // LOGE("size=%d %d\n", item->hashs.size(), item2->img_len);
  227. }
  228. if (item2->image_hash && item2->img_len > 0) {
  229. // string append_array;
  230. // for (int i = 0; i < item2->img_len; ++i) {
  231. // char c = item2->image_hash[i];
  232. // if (c == 0) {
  233. // append_array.append("0");
  234. // } else if (c == 1) {
  235. // append_array.append("1");
  236. // }
  237. // }
  238. va["imageHash"] = item2->image_hash;
  239. printf("item2->image_hash address=%p \n",&item2->image_hash);
  240. free((void *) item2->image_hash);
  241. item2->image_hash = NULL;
  242. }
  243. va["hashSize"] = item2->img_len;
  244. va["pts"] = item2->pts;
  245. v_array.append(va);
  246. free(item2);
  247. item2 = NULL;
  248. it2 = item->hashs.erase(it2);
  249. }
  250. v["hashs"] = v_array;
  251. array.append(v);
  252. free(item);
  253. item = NULL;
  254. it = videoSimilarity->clips.erase(it);
  255. }
  256. obj["clips"] = array;
  257. const std::string json_file = Json::writeString(builder, obj);
  258. const char *ret_json = strdup(json_file.c_str());
  259. videoSimilarity->clips.clear();
  260. return ret_json;
  261. }
  262. /**
  263. * 获取视频相似值列表
  264. * @param inputfile
  265. * @return
  266. */
  267. const char *get_video_similarity_list(const char *inputjson) {
  268. signal(SIGABRT, sig_crash);
  269. signal(SIGSEGV, sig_crash);
  270. if (!inputjson)return NULL;
  271. // free((void*)inputjson);
  272. printf("get_video_similarity_list=%s \n", inputjson);
  273. auto *vs = json2VideoSimilarity(inputjson);
  274. long id = video_similarity_detection_init(vs->videoPath);
  275. const char *ret_json = NULL;
  276. if (id > 0) {
  277. if (!(((VideoSimilarityContext *) id)->log)) {
  278. int n = strlen(((VideoSimilarityContext *) id)->video_path); //计算字符串的长度(包括'\0')
  279. const char *name = GetFileName(((VideoSimilarityContext *) id)->video_path, n); //把字符串及其长度传给函数
  280. printf("%s\n", name); //name就是那个名称,可以输出
  281. char args[512];
  282. sprintf(args, "/Users/devyk/Data/Project/sample/github_code/OpenCVSample/temp/%s.log", name);
  283. // sprintf(args, "/datalog/ffmpeg_opencv/%s.log", name);
  284. ((VideoSimilarityContext *) id)->log = fopen(args, "wb+");
  285. printf("args===%s \n", args);
  286. if (((VideoSimilarityContext *) id)->log)fprintf(((VideoSimilarityContext *) id)->log, inputjson);
  287. }
  288. if (((VideoSimilarityContext *) id)->log)
  289. fprintf(((VideoSimilarityContext *) id)->log, "video_similarity_detection_start \n");
  290. video_similarity_detection_start(id, 0, AUDIO,
  291. &vs->clips);
  292. if (((VideoSimilarityContext *) id)->log)
  293. fprintf(((VideoSimilarityContext *) id)->log, "video_similarity_detection_start exit \n");
  294. video_similarity_detection_close(id);
  295. ret_json = videoSimilarity2json(vs);
  296. delete vs;
  297. vs = NULL;
  298. }
  299. return ret_json;
  300. }