其他分享
首页 > 其他分享> > 服务器部署PaddleServing

服务器部署PaddleServing

作者:互联网

1.添加用户、赋予权限、更新系统

passwd root
# 添加用户
adduser dapeng
sudo su
# root all 下面添加自己的用户名
 vim /etc/sudoers
 su dapeng
 sudo apt-get update
 sudo apt-get upgrade

2. 安装ftp服务

sudo apt-get install vsftpd
sudo vim /etc/vsftpd.conf  # listen_ipv6=NO
service vsftpd start

3.卸载本机python3.6.9,安装python3.7

# uninstall python3.6
ls /usr/bin/python*
sudo apt-get remove python3.6
sudo apt-get remove --auto-remove python3.6
# insyall python3.7
sudo apt install software-properties-common
sudo add-apt-repository ppa:deadsnakes/ppa
 sudo apt-get update
 sudo apt-get install python3.7
 update-alternatives --display python
 sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.7 2 
sudo update-alternatives --install /usr/bin/python python /usr/bin/python3.7 1
python

4. 安装pip3

sudo apt-get remove python3-pip
sudo apt-get update
sudo apt-get -y install python3-pip
pip3 --version
# 此处需要查看当前目录
 sudo ln -sf /usr/local/bin/pip3.7 /usr/local/bin/pip
 # 清华镜像源
 pip install pip -U
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple

5.安装Nvidia驱动

此处利用FileZilla软件,传输文件,协议为SFTP

# ftp传输NVIDIA-Linux-x86_64-440.118.02.run驱动文件
sudo apt-get install linux-headers-$(uname -r)
sudo sh NVIDIA-Linux-x86_64-440.118.02.run # warning直接ok
 nvidia-smi

6.安装CUDA10.2

wget https://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run
# 以下命令安装时,驱动选项记得要去掉
sudo sh cuda_10.2.89_440.33.01_linux.run
# 设置环境变量
vim ~/.bashrc
# 以下内容为~/.bashrc新添加内容
""" 
export PATH=/usr/local/cuda-10.2/bin${PATH:+:${PATH}}
export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64 ${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
"""
source ~/.bashrc

如果安装失败:只卸载cuda,不卸载驱动,执行以下命令

 sudo apt-get --purge remove "*cublas*" "*cufft*" "*curand*" \
 "*cusolver*" "*cusparse*" "*npp*" "*nvjpeg*" "cuda*" "nsight*" 
 sudo apt-get autoremove

7.安装cuDNN 7.6.5

 tar -xzvf cudnn-10.2-linux-x64-v7.6.5.32.tgz
 sudo cp cuda/include/cudnn*.h /usr/local/cuda/include
 sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda/lib64
 sudo chmod a+r /usr/local/cuda/include/cudnn*.h /usr/local/cuda/lib64/libcudnn*

8.安装Tensorrt7.0(提前查看匹配的cuda、cudnn版本)

tar xzvf TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz 
vim ~/.bashrc
"""
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/dapeng/TensorRT-7.0.0.11/lib:$LD_LIBRARY_PATH
"""
source ~/.bashrc
cd .. && cd python 
ls # 查看当前文件后缀
# 此处的pip其实指向是pip3.7
# 可以利用pip -V查看对应的python版本
sudo -H pip install tensorrt-7.0.0.11-cp37-none-linux_x86_64.whl
cd .. && cd graphsurgeon/
sudo -H pip install  graphsurgeon-0.4.1-py2.py3-none-any.whl
# 可以在sample目录中make,然后在data目录中下载mnist数据,然后bin中执行

9.安装Anaconda

wget https://repo.anaconda.com/archive/Anaconda3-2021.05-Linux-x86_64.sh
conda activate ppocr

10. 安装paddleocr

# 直接官网安装对应版本的paddle
python -m pip install paddlepaddle-gpu -i https://mirror.baidu.com/pypi/simple
sudo apt install git
git clone https://github.com/PaddlePaddle/PaddleOCR
cd PaddleOCR

sudo apt-get install python3.7-dev
sudo apt-get install python3-setuptools
pip install -r requirements.txt

11.安装paddleSevering

# server 
pip install https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.0.0.post102-py3-none-any.whl
# client
pip install https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl
# app
pip install https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_app-0.0.0-py3-none-any.whl

12.PPOCR模型转换

# 转换检测模型det
python3 -m paddle_serving_client.convert --dirname ./inference_model/det_sast_inference_model/ \
                                         --model_filename inference.pdmodel          \
                                         --params_filename inference.pdiparams       \
                                         --serving_server ./serving_model/serving_det_sast_serving/ \
                                         --serving_client  ./serving_model/serving_det_sast_client/

# 转换识别模型rec
python3 -m paddle_serving_client.convert --dirname ./inference_model/rec_en_number_lite_inference_model/ \
                                         --model_filename inference.pdmodel          \
                                         --params_filename inference.pdiparams       \
                                         --serving_server ./serving_model/serving_rec_en_numberLite_serving/   \
                                         --serving_client ./serving_model/serving_rec_en_numberLite_client/
# 模型转换后如下形式
./inference_model
├── serving_det_sast_client
│   ├── serving_client_conf.prototxt
│   └── serving_client_conf.stream.prototxt
├── serving_det_sast_serving
│   ├── inference.pdiparams
│   ├── inference.pdmodel
│   ├── serving_server_conf.prototxt
│   └── serving_server_conf.stream.prototxt
├── serving_rec_en_numberLite_client
│   ├── serving_client_conf.prototxt
│   └── serving_client_conf.stream.prototxt
└── serving_rec_en_numberLite_serving
    ├── inference.pdiparams
    ├── inference.pdmodel
    ├── serving_server_conf.prototxt
    └── serving_server_conf.stream.prototxt

13.PPOCR服务启用

# 修改ocr_reader.py
image_shape=[3, 32, 192]
char_dict_path="./ppocr_keys_v1.txt
cd PaddleOCR/deploy/pdserver/pdserving/
python3 web_service.py &>log.txt &
#  查看进程线程
ps               # 列出进程
kill -9 pid # 删除某进程

通过查看pdserving/PipelineServingLogs日志,去找问题

标签:serving,get,部署,--,sudo,apt,PaddleServing,install,服务器
来源: https://blog.csdn.net/a18838956649/article/details/120766977