明天要分享的是服务化部署框架(MindSpore Serving)具体要实现的就是一个能够在线识图的页面。1.MindSpore Serving装置MindSpore Serving目前只能通过指定whl包装置pip install https://ms-release.obs.cn-nor...

2.MindSpore Serving应用查看相干文档https://www.mindspore.cn/serv...疾速入门的例子有点不够形象化于是去github的代码仓外面看看样例都有哪些https://gitee.com/mindspore/s...须要没有GPU, 就拿resnet50来跑跑看,cifa10的数据集也不大,cpu下训练也不麻烦。如下是相干模型的训练代码:from mindspore.train import Model
from mindvision.dataset import Cifar10
from mindvision.engine.callback import ValAccMonitor
from mindvision.classification.models.classifiers import BaseClassifier
from mindvision.classification.models.head import DenseHead
from mindvision.classification.models.neck import GlobalAvgPooling
from mindvision.classification.utils.model_urls import model_urls
from mindvision.utils.load_pretrained_model import LoadPretrainedModel
from typing import Type, Union, List, Optional
from mindvision.classification.models.blocks import ConvNormActivation
from mindspore import nn

class ResidualBlockBase(nn.Cell):

expansion: int = 1  # 最初一个卷积核数量与第一个卷积核数量相等def __init__(self, in_channel: int, out_channel: int,             stride: int = 1, norm: Optional[nn.Cell] = None,             down_sample: Optional[nn.Cell] = None) -> None:    super(ResidualBlockBase, self).__init__()    if not norm:        norm = nn.BatchNorm2d    self.conv1 = ConvNormActivation(in_channel, out_channel,                                    kernel_size=3, stride=stride, norm=norm)    self.conv2 = ConvNormActivation(out_channel, out_channel,                                    kernel_size=3, norm=norm, activation=None)    self.relu = nn.ReLU()    self.down_sample = down_sampledef construct(self, x):    """ResidualBlockBase construct."""    identity = x  # shortcuts分支    out = self.conv1(x)  # 主分支第一层:3*3卷积层    out = self.conv2(out)  # 主分支第二层:3*3卷积层    if self.down_sample:        identity = self.down_sample(x)    out += identity  # 输入为主分支与shortcuts之和    out = self.relu(out)    return out

class ResidualBlock(nn.Cell):

expansion = 4  # 最初一个卷积核的数量是第一个卷积核数量的4倍def __init__(self, in_channel: int, out_channel: int,             stride: int = 1, norm: Optional[nn.Cell] = None,             down_sample: Optional[nn.Cell] = None) -> None:    super(ResidualBlock, self).__init__()    if not norm:        norm = nn.BatchNorm2d    self.conv1 = ConvNormActivation(in_channel, out_channel,                                    kernel_size=1, norm=norm)    self.conv2 = ConvNormActivation(out_channel, out_channel,                                    kernel_size=3, stride=stride, norm=norm)    self.conv3 = ConvNormActivation(out_channel, out_channel * self.expansion,                                    kernel_size=1, norm=norm, activation=None)    self.relu = nn.ReLU()    self.down_sample = down_sampledef construct(self, x):    identity = x  # shortscuts分支    out = self.conv1(x)  # 主分支第一层:1*1卷积层    out = self.conv2(out)  # 主分支第二层:3*3卷积层    out = self.conv3(out)  # 主分支第三层:1*1卷积层    if self.down_sample:        identity = self.down_sample(x)    out += identity  # 输入为主分支与shortcuts之和    out = self.relu(out)    return out

def make_layer(last_out_channel, block: Type[Union[ResidualBlockBase, ResidualBlock]],

           channel: int, block_nums: int, stride: int = 1):down_sample = None  # shortcuts分支if stride != 1 or last_out_channel != channel * block.expansion:    down_sample = ConvNormActivation(last_out_channel, channel * block.expansion,                                     kernel_size=1, stride=stride, norm=nn.BatchNorm2d, activation=None)layers = []layers.append(block(last_out_channel, channel, stride=stride, down_sample=down_sample, norm=nn.BatchNorm2d))in_channel = channel * block.expansion# 重叠残差网络for _ in range(1, block_nums):    layers.append(block(in_channel, channel, norm=nn.BatchNorm2d))return nn.SequentialCell(layers)

class ResNet(nn.Cell):

def __init__(self, block: Type[Union[ResidualBlockBase, ResidualBlock]],             layer_nums: List[int], norm: Optional[nn.Cell] = None) -> None:    super(ResNet, self).__init__()    if not norm:        norm = nn.BatchNorm2d    # 第一个卷积层,输出channel为3(彩色图像),输入channel为64    self.conv1 = ConvNormActivation(3, 64, kernel_size=7, stride=2, norm=norm)    # 最大池化层,放大图片的尺寸    self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')    # 各个残差网络结构块定义,    self.layer1 = make_layer(64, block, 64, layer_nums[0])    self.layer2 = make_layer(64 * block.expansion, block, 128, layer_nums[1], stride=2)    self.layer3 = make_layer(128 * block.expansion, block, 256, layer_nums[2], stride=2)    self.layer4 = make_layer(256 * block.expansion, block, 512, layer_nums[3], stride=2)def construct(self, x):    x = self.conv1(x)    x = self.max_pool(x)    x = self.layer1(x)    x = self.layer2(x)    x = self.layer3(x)    x = self.layer4(x)    return x

def _resnet(arch: str, block: Type[Union[ResidualBlockBase, ResidualBlock]],

        layers: List[int], num_classes: int, pretrained: bool, input_channel: int):backbone = ResNet(block, layers)neck = GlobalAvgPooling()  # 均匀池化层head = DenseHead(input_channel=input_channel, num_classes=num_classes)  # 全连贯层model = BaseClassifier(backbone, neck, head)  # 将backbone层、neck层和head层连接起来if pretrained:    # 下载并加载预训练模型    LoadPretrainedModel(model, model_urls[arch]).run()return model

def resnet50(num_classes: int = 1000, pretrained: bool = False):

"ResNet50模型"return _resnet("resnet50", ResidualBlock, [3, 4, 6, 3], num_classes, pretrained, 2048)

数据集根目录

data_dir = "./data"

下载解压并加载CIFAR-10训练数据集

dataset_train = Cifar10(path=data_dir, split='train', batch_size=6, resize=32)
ds_train = dataset_train.run()
step_size = ds_train.get_dataset_size()

下载解压并加载CIFAR-10测试数据集

dataset_val = Cifar10(path=data_dir, split='test', batch_size=6, resize=32)
ds_val = dataset_val.run()

定义ResNet50网络

network = resnet50(pretrained=True)

全连贯层输出层的大小

in_channel = network.head.dense.in_channels
head = DenseHead(input_channel=in_channel, num_classes=10)

重置全连贯层

network.head = head

设置学习率

num_epochs = 40
lr = nn.cosine_decay_lr(min_lr=0.00001, max_lr=0.001, total_step=step_size * num_epochs,

                    step_per_epoch=step_size, decay_epoch=num_epochs)

定义优化器和损失函数

opt = nn.Momentum(params=network.trainable_params(), learning_rate=lr, momentum=0.9)
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

实例化模型

model = Model(network, loss, opt, metrics={"Accuracy": nn.Accuracy()})

模型训练

model.train(num_epochs, ds_train, callbacks=[ValAccMonitor(model, ds_val, num_epochs)])

感觉最终的训练准确度不够高。十几个小时徒劳了。训练进去的模型须要保留为mindir能力用。

执行example外面的export_resnet.py,将模型进行转换。然而export_resnet.py这个脚本是间接下载曾经训练好的模型来转换的,如果要转换本人训练进去的模型记得批改下文件。执行export_resnet.py会报错,先是报短少easydict模块。执行pip install easydict, 装置报错。查问了一下,是setuptools版本过低造成。于是执行pip install -U setuptools进行降级。而后再执行pip install easydict,顺利装置胜利。


这个时候转换胜利了。生成了mindir模型。有了模型就能够运行server。

执行启动server后报错,晋升短少so文件。通过sudo apt-get install libpython3.9进行库装置。

再次运行server。

仍然报错,提醒不反对的设施。查看代码config = server.ServableStartConfig(servable_directory=servable_dir, servable_name="resnet50", device_ids=0,

                                    num_parallel_workers=4)


于是查问了下api,看能够设置device_type

于是减少了device_type=“CPU”

还是不行,于是看了下文档。

因为以后是CPU,因而推理后端只能是Mindspore Lite只能装置Mindspore Lite了推理/训练runtime、推理/训练jar包、以及benchmark/codegen/converter/cropper工具CPULinux-x86_64https://ms-release.obs.cn-nor...既然用Lite了,模型也要转换。通过如下命令将模型转换。

批改servable_config.py文件,把模型批改成mindspore lite模型

而后再执行

而后查看_servable_local.py在什么状况下会抛出异样。

看代码是执行了函数结尾的命令后没有获取到相干设施信息才报的错。把这个命令独自拎进去执行下看看。

间接报so找不到。先设置下LD_LIBRARY_PATH, 首先是查看下相干的so文件地位在哪。而后再进行相干设置。


最新服务起来了。如果设置了127.0.0.1只能本机拜访,因为应用的是虚拟机,内部拜访须要批改ip为0.0.0.0。同时虚机的网络要设置为桥接模式才能够。

而后查看虚机ip

代码测试通过,grpc还有点问题,我也只用到了restfull,grpc就先不论了。这里用模型是我本地训练进去的,看来还是不行,于是就换了样例外面的模型文件。

3.页面拜访页面拜访的话计划很多,能够是vue或是flask。不过都要写页面,html JavaScript等等。在网上搜到一个纯python的库,streamlit, 看上去很不错。Streamlit 是一个基于 Python 的 Web 应用程序框架,致力于以更高效、更灵便的形式可视化数据,并剖析后果。Streamlit是一个开源库,能够帮忙数据科学家和学者在短时间内开发机器学习 (ML) 可视化仪表板。只需几行代码,咱们就能够构建并部署弱小的数据应用程序。为什么抉择Streamlit?目前,应用程序需求量微小,开发人员须要始终开发新的库和框架,帮忙构建并部署疾速上手的仪表板。Streamlit 是一个库,可将仪表板的开发工夫从几天缩短至几小时。以下是抉择 Streamlit 的起因:1. Streamlit是一个收费的开源库。2. 和装置其余python 包一样, Streamlit的装置非常简单。3. Streamlit学起来很容易,无须要任何 Web 开发教训,只需对 Python 有根本的理解,就足以构建数据应用程序。4. Streamlit与大部分机器学习框架兼容,包含 Tensorflow 和 Pytorch、Scikit-learn 和可视化库,如 Seaborn、Altair、Plotly 等。#! -- coding=utf-8 --
import cv2
import numpy as np
import streamlit as st
import base64
import requests
import json
st.title('图片辨认演示')

labels = {'airplane':'飞机',

        'automobile':'汽车',         'bird':'鸟',         'cat':'猫',         'deer':'鹿',         'dog':'狗',         'frog':'青蛙',         'horse':'马',         'ship':'船',         'truck':'卡车'}

uploaded_file = st.file_uploader("上传文件", type="jpg")
if uploaded_file is not None:

file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)opencv_image = cv2.imdecode(file_bytes, 1)st.image(opencv_image, channels="BGR")instances = []base64_data = base64.b64encode(file_bytes).decode()instances.append({"image": {"b64": base64_data}})instances_map = {"instances": instances}post_payload = json.dumps(instances_map)ip = "192.168.0.225"restful_port = 1500servable_name = "resnet50"method_name = "classify_top1"result = requests.post(f"http://{ip}:{restful_port}/model/{servable_name}:{method_name}", data=post_payload)result = json.loads(result.text)label = result['instances'][0]['label']st.text(f'图片辨认为:{labels[label]}')  #cv2.imwrite('test.jpg',opencv_image)执行如下命令启动服务器:streamlit run D:\ai\0709\server.py


最终的成果如下:上传文件反对拖拽和抉择上传。


最终成果如下:


这个青蛙有点离谱了。。。模型的准确性有点低。