package com.baidu.paddle.inference; public class test { static { System.out.println(System.getProperty("java.library.path")); System.out.println(System.mapLibraryName("paddle_inference")); System.loadLibrary("paddle_inference"); } public static void main(String[] args) { Config config = new Config(); config.setCppModel(args[0], args[1]); config.enableMemoryOptim(true); config.enableProfile(); config.enableMKLDNN(); System.out.println("summary:\n" + config.summary()); System.out.println("model dir:\n" + config.getCppModelDir()); System.out.println("prog file:\n" + config.getProgFile()); System.out.println("params file:\n" + config.getCppParamsFile()); config.getCpuMathLibraryNumThreads(); config.getFractionOfGpuMemoryForPool(); config.switchIrDebug(false); System.out.println(config.summary()); Predictor predictor = Predictor.createPaddlePredictor(config); String inNames = predictor.getInputNameById(0); Tensor inHandle = predictor.getInputHandle(inNames); inHandle.reshape(4, new int[]{1, 3, 224, 224}); float[] inData = new float[1 * 3 * 224 * 224]; inHandle.copyFromCpu(inData); predictor.run(); String outNames = predictor.getOutputNameById(0); Tensor outHandle = predictor.getOutputHandle(outNames); float[] outData = new float[outHandle.getSize()]; outHandle.copyToCpu(outData); predictor.tryShrinkMemory(); predictor.clearIntermediateTensor(); System.out.println("predictor1: " + outData[0]); System.out.println("predictor1: " + outData.length); test(predictor); outHandle.destroyNativeTensor(); inHandle.destroyNativeTensor(); predictor.destroyNativePredictor(); Config newConfig = new Config(); newConfig.setCppModelDir("/model_dir"); newConfig.setCppProgFile("/prog_file"); newConfig.setCppParamsFile("/param"); System.out.println("model dir:\n" + newConfig.getCppModelDir()); System.out.println("prog file:\n" + newConfig.getProgFile()); System.out.println("params file:\n" + newConfig.getCppParamsFile()); config.destroyNativeConfig(); } private static void test(Predictor predictor) { Predictor predictor2 = Predictor.clonePaddlePredictor(predictor); String inNames = predictor.getInputNameById(0); Tensor inHandle = predictor.getInputHandle(inNames); inHandle.reshape(4, new int[]{1, 3, 224, 224}); float[] inData = new float[1 * 3 * 224 * 224]; inHandle.copyFromCpu(inData); predictor.run(); String outNames = predictor.getOutputNameById(0); Tensor outHandle = predictor.getOutputHandle(outNames); float[] outData = new float[outHandle.getSize()]; outHandle.copyToCpu(outData); predictor.tryShrinkMemory(); predictor.clearIntermediateTensor(); System.out.println("predictor2: " + outData[0]); System.out.println("predictor2: " + outData.length); outHandle.destroyNativeTensor(); inHandle.destroyNativeTensor(); predictor.destroyNativePredictor(); } }