root vor 4 Monaten
Ursprung
Commit
64461d2972

BIN
recommend-model-jni/paddle/libpaddle_inference.so


BIN
recommend-model-jni/paddle/profile.log


BIN
recommend-model-jni/src/test/resources/libpaddle_inference.so


+ 90 - 0
recommend-model-produce/src/main/python/models/dssm/infer.py

@@ -0,0 +1,90 @@
+import numpy as np
+import argparse
+import cv2
+
+from paddle.inference import Config
+from paddle.inference import create_predictor
+
+from img_preprocess import preprocess
+
+
+def init_predictor(args):
+    if args.model_dir is not "":
+        config = Config(args.model_dir)
+    else:
+        config = Config(args.model_file, args.params_file)
+
+    config.enable_memory_optim()
+    if args.use_onnxruntime:
+        config.enable_onnxruntime()
+        config.enable_ort_optimization()
+        config.set_cpu_math_library_num_threads(4)
+    else:
+        # If not specific mkldnn, you can set the blas thread.
+        # The thread num should not be greater than the number of cores in the CPU.
+        config.set_cpu_math_library_num_threads(4)
+        config.enable_mkldnn()
+
+    predictor = create_predictor(config)
+    return predictor
+
+
+def run(predictor, img):
+    # copy img data to input tensor
+    input_names = predictor.get_input_names()
+    for i, name in enumerate(input_names):
+        input_tensor = predictor.get_input_handle(name)
+        input_tensor.reshape(img[i].shape)
+        input_tensor.copy_from_cpu(img[i])
+
+    # do the inference
+    predictor.run()
+
+    results = []
+    # get out data from output tensor
+    output_names = predictor.get_output_names()
+    for i, name in enumerate(output_names):
+        output_tensor = predictor.get_output_handle(name)
+        output_data = output_tensor.copy_to_cpu()
+        results.append(output_data)
+
+    return results
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--model_file",
+        type=str,
+        default="",
+        help="Model filename, Specify this when your model is a combined model."
+    )
+    parser.add_argument(
+        "--params_file",
+        type=str,
+        default="",
+        help=
+        "Parameter filename, Specify this when your model is a combined model."
+    )
+    parser.add_argument(
+        "--model_dir",
+        type=str,
+        default="",
+        help=
+        "Model dir, If you load a non-combined model, specify the directory of the model."
+    )
+    parser.add_argument("--use_onnxruntime",
+                        type=int,
+                        default=0,
+                        help="Whether use onnxruntime.")
+    return parser.parse_args()
+
+
+if __name__ == '__main__':
+    args = parse_args()
+    pred = init_predictor(args)
+    img = cv2.imread('./ILSVRC2012_val_00000247.jpeg')
+    img = preprocess(img)
+    #img = np.ones((1, 3, 224, 224)).astype(np.float32)
+    result = run(pred, [img])
+    print("class index: ", np.argmax(result[0][0]))

BIN
recommend-model-produce/src/main/python/tools/__pycache__/tools.cpython-310.pyc


+ 21 - 0
recommend-model-produce/src/main/python/tools/inferv2.py

@@ -0,0 +1,21 @@
+import os
+__dir__ = os.path.dirname(os.path.abspath(__file__))
+sys.path.append(__dir__)
+from utils.oss_client import HangZhouOSSClient
+import utils.compress as compress
+from utils.my_hdfs_client import MyHDFSClient
+import logging
+import paddle
+
+logging.basicConfig(
+    format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+hadoop_home = "/app/env/hadoop-3.2.4"  # Hadoop 安装目录
+configs = {
+    "fs.default.name": "hdfs://192.168.141.208:9000",  # HDFS 名称和端口
+    "hadoop.job.ugi": ""  # HDFS 用户和密码
+}
+hdfs_client = MyHDFSClient(hadoop_home, configs)
+
+

BIN
recommend-model-produce/src/main/python/tools/utils/__pycache__/__init__.cpython-310.pyc


BIN
recommend-model-produce/src/main/python/tools/utils/__pycache__/compress.cpython-310.pyc