From ee409e499643b35b7c7da81f93813bbdcca7559b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:03:15 +0000 Subject: [PATCH 1/9] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20=E8=A1=A8=E6=83=85?= =?UTF-8?q?=E8=AF=86=E5=88=AB-=E5=AD=99=E7=AD=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From d6962fc42e10b6fd45016f928c2c731908bd9fdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:04:26 +0000 Subject: [PATCH 2/9] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../code/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 8b212070ad5b07dda63928d3357ca40e715dd58c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:04:38 +0000 Subject: [PATCH 3/9] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20ckpt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ckpt/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From 9a684e8531e3e6407cb2839e682b23e807d425ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:04:58 +0000 Subject: [PATCH 4/9] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20data=5Fpre=5Fprocess?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data_pre_process/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From a3cd3d6a1abf9638a86827cef633b5ba690ed298 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:05:19 +0000 Subject: [PATCH 5/9] =?UTF-8?q?=E6=96=B0=E5=BB=BA=20predict=5Fcode?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../predict_code/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/.keep" new file mode 100644 index 0000000..e69de29 -- Gitee From a6c2d6940331337e1d79afdb866f5578c8d7da7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:06:03 +0000 Subject: [PATCH 6/9] =?UTF-8?q?=E6=8E=A8=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../predict_code/main.py" | 153 +++++++++ .../predict_code/resnet.py" | 293 ++++++++++++++++++ .../predict_code/usemodel.py" | 130 ++++++++ 3 files changed, 576 insertions(+) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/main.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/resnet.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/usemodel.py" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/main.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/main.py" new file mode 100644 index 0000000..4875d6e --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/main.py" @@ -0,0 +1,153 @@ +import os +import argparse +#import random +from mindspore import context, Model, load_checkpoint, load_param_into_net, Tensor +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits +from mindspore.communication.management import init +from mindspore.parallel._auto_parallel_context import auto_parallel_context +from mindspore.nn.optim.momentum import Momentum +from mindspore.context import ParallelMode +from resnet import resnet50 +import cv2 +from PIL import Image +import numpy as np +import mindspore as ms +import moxing as mox +import random + +#from CreateDataset import create_dataset + + + +# 定义一个参数接收器。用于读取运行时传入的参数 +parser = argparse.ArgumentParser(description='face expression classification') +parser.add_argument('--run_distribute', type=bool, default=True, help='Run distribute.') +parser.add_argument('--device_num', type=int, default=24, help='Device num.') +parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU']) + +args = parser.parse_args() + +# 设置运行环境的参数 +context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) +if args.device_target == "Ascend": + device_id = int(os.getenv('DEVICE_ID')) + context.set_context(device_id=device_id) + +dataset_path = "./expression" # 定义数据集所在路径 +random.seed(1) + + +def get_img(data_path): + # Getting image array from path: + img0 = cv2.imread(data_path, 3) + img1 = cv2.resize(img0, (128, 128)) + img2 = cv2.resize(img1, (224, 224)) + img = cv2.normalize(img2, 0, 255, cv2.NORM_MINMAX) + return img + +if __name__ == '__main__': + + data_obs_path='obs://sunce-demo/testdata/expression/' + mox.file.copy_parallel(src_url=data_obs_path, dst_url='./expression') + cpkt_obs_path='obs://sunce-demo/testdata/cpkt/' + mox.file.copy_parallel(src_url=cpkt_obs_path, dst_url='./') + + #mox.file.copy_parallel(src_url='./cpkt', dst_url='./') + #mox.file.copy_parallel(src_url='obs://sunce-demo/testdata/predict.csv', dst_url='./expression') + + # 自动并行运算 + if args.run_distribute: + context.set_auto_parallel_context(device_num=args.device_num, parallel_mode=ParallelMode.DATA_PARALLEL) + auto_parallel_context().set_all_reduce_fusion_split_indices([140]) + init() + + print("begin") + + net = resnet50(batch_size=32, num_classes=4) + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) + #resnet = ResNet50() + + param_dict = load_checkpoint("train_resnet50-1_9792.ckpt") + load_param_into_net(net, param_dict) + model = Model(net, loss_fn=net_loss, optimizer=opt, metrics={'acc'}) + + images=[] + labels=[] + + csv_path=os.path.join(dataset_path,'predict.csv') + + print("begin predict") + #with open(csv_path, 'r') as f: + address = "./expression" + count=0 + for root,dirs,files in os.walk(address): + #遍历 + for file_name in dirs: + #解析每一行csv文件内容 + pathq=os.path.join(address,file_name) + txt_count=0 + for x, ys, txt_names in os.walk(os.path.join(address,file_name)): + for txt_name in txt_names: + #cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + image_path = os.path.join(pathq,txt_name) + print(count) + np0_image = get_img(image_path) + + np1_image = np.array(np0_image) + #np1_image = np.transpose(np0_image,(2,0,1)) + #print("image: {}".format(np1_image)) + + np2_image = np.transpose(np1_image,(2,0,1)) + #np2_image = np.array(np1_image) + #print("shape: {}".format(np2_image.shape), ", dtype: {}".format(np2_image.dtype)) + + np_image = np.array([np2_image], dtype=np.float32) + #print("shape: {}".format(np_image.shape), ", dtype: {}".format(np_image.dtype)) + + # 图像处理 + input_data = Tensor(np_image,ms.float32) + pred = model.predict(input_data) + print(pred) + #print("label: {}".format( pred.argmax(axis=1) ) ) + a=pred[0][0]/5.39 + b=-pred[0][1]/0.61 + c=-pred[0][2]/0.75 + d=-pred[0][3]/3.24 + + #a=a-random.random()*a + #b=b-random.random()*b + #c=c-random.random()*c + #d=d-random.random()*d + + print(a,b,c,d) + label_num=[a,b,c,d] + #classes = {'0':a,'1':b,'2':c,'3':d} + #print + #classes = {a:'0',b:'1',:c,'3':d} + label=label_num.index(max(label_num)) + #label=pred.index(max(pred)) + print("label: {}".format( label ) ) + + labels.append(label) + images.append(np_image) + count+=1 + txt_count+=1 + if txt_count==10: + break + + print("end predict") + with open("result.csv", mode='w', newline='') as csv_p: + fieldnames = ['label','shot'] + writer = csv.DictWriter(csv_p, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(labellist, shotlist): + writer.writerow({'shot':i[0], 'label':i[1]}) + + out_obs_path='obs://sunce-demo/testdata/out/' + mox.file.copy_parallel(src_url='result.csv', dst_url=out_obs_path) + #mox.file.copy_parallel(src_url='./', dst_url='./out') + + \ No newline at end of file diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/resnet.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/resnet.py" new file mode 100644 index 0000000..3281660 --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/resnet.py" @@ -0,0 +1,293 @@ +import numpy as np +import mindspore.nn as nn +from mindspore import Tensor +import mindspore.ops as ops + + +def weight_variable_0(shape): + """weight_variable_0""" + zeros = np.zeros(shape).astype(np.float32) + return Tensor(zeros) + + +def weight_variable_1(shape): + """weight_variable_1""" + ones = np.ones(shape).astype(np.float32) + return Tensor(ones) + + +def conv3x3(in_channels, out_channels, stride=1, padding=0): + """3x3 convolution """ + return nn.Conv2d(in_channels, out_channels, + kernel_size=3, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def conv1x1(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + return nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def conv7x7(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + return nn.Conv2d(in_channels, out_channels, + kernel_size=7, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def bn_with_initialize(out_channels): + """bn_with_initialize""" + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform', + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def bn_with_initialize_last(out_channels): + """bn_with_initialize_last""" + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform', + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def fc_with_initialize(input_channels, out_channels): + """fc_with_initialize""" + return nn.Dense(input_channels, out_channels, weight_init='XavierUniform', bias_init='Uniform') + + +class ResidualBlock(nn.Cell): + """ResidualBlock""" + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1): + """init block""" + super(ResidualBlock, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = ops.ReLU() + self.add = ops.Add() + + def construct(self, x): + """construct""" + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class ResidualBlockWithDown(nn.Cell): + """ResidualBlockWithDown""" + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1, + down_sample=False): + """init block with down""" + super(ResidualBlockWithDown, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = ops.ReLU() + self.down_sample = down_sample + + self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) + self.bn_down_sample = bn_with_initialize(out_channels) + self.add = ops.Add() + + def construct(self, x): + """construct""" + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + identity = self.conv_down_sample(identity) + identity = self.bn_down_sample(identity) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class MakeLayer0(nn.Cell): + """MakeLayer0""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer0, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True) + self.b = block(out_channels, out_channels, stride=stride) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class MakeLayer1(nn.Cell): + """MakeLayer1""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer1, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + + return x + + +class MakeLayer2(nn.Cell): + """MakeLayer2""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer2, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + self.e = block(out_channels, out_channels, stride=1) + self.f = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + x = self.e(x) + x = self.f(x) + + return x + + +class MakeLayer3(nn.Cell): + """MakeLayer3""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer3, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class ResNet(nn.Cell): + """ResNet""" + + def __init__(self, block, num_classes=100, batch_size=32): + """init""" + super(ResNet, self).__init__() + self.batch_size = batch_size + self.num_classes = num_classes + + self.conv1 = conv7x7(3, 64, stride=2, padding=0) + + self.bn1 = bn_with_initialize(64) + self.relu = ops.ReLU() + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") + + self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1) + self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2) + self.layer3 = MakeLayer2(block, in_channels=512, out_channels=1024, stride=2) + self.layer4 = MakeLayer3(block, in_channels=1024, out_channels=2048, stride=2) + + self.pool = ops.ReduceMean(keep_dims=True) + self.squeeze = ops.Squeeze(axis=(2, 3)) + self.fc = fc_with_initialize(512 * block.expansion, num_classes) + + def construct(self, x): + """construct""" + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.pool(x, (2, 3)) + x = self.squeeze(x) + x = self.fc(x) + return x + + +def resnet50(batch_size, num_classes): + """create resnet50""" + return ResNet(ResidualBlock, num_classes, batch_size) diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/usemodel.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/usemodel.py" new file mode 100644 index 0000000..334e16d --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/predict_code/usemodel.py" @@ -0,0 +1,130 @@ +import os +import argparse +#import random +from mindspore import context, Model, load_checkpoint, load_param_into_net, Tensor +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits +from mindspore.nn.optim.momentum import Momentum +from resnet import resnet50 +import cv2 +from PIL import Image +import numpy as np +import mindspore as ms +import moxing as mox + +#from CreateDataset import create_dataset + + + +# 定义一个参数接收器。用于读取运行时传入的参数 +parser = argparse.ArgumentParser(description='face expression classification') +parser.add_argument('--run_distribute', type=bool, default=True, help='Run distribute.') +parser.add_argument('--device_num', type=int, default=24, help='Device num.') +parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU']) + +args = parser.parse_args() + +# 设置运行环境的参数 +context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) +if args.device_target == "Ascend": + device_id = int(os.getenv('DEVICE_ID')) + context.set_context(device_id=device_id) + +dataset_path = "./expression" # 定义数据集所在路径 +classes = ["0","1","2","3"] + + + +def get_img(data_path): + # Getting image array from path: + img = cv2.imread(data_path) + img = cv2.resize(img, (128, 128)) + img = img.reshape(1, 128, 128, 3) + return img + +if __name__ == '__main__': + + #data_obs_path='obs://sunce-demo/testdata/expression/' + #mox.file.copy_parallel(src_url=data_obs_path, dst_url='./expression') + #cpkt_obs_path='obs://sunce-demo/testdata/cpkt/' + #mox.file.copy_parallel(src_url=cpkt_obs_path, dst_url='./') + + #mox.file.copy_parallel(src_url='./cpkt', dst_url='./') + #mox.file.copy_parallel(src_url='obs://sunce-demo/testdata/predict.csv', dst_url='./expression') + + # 自动并行运算 + if args.run_distribute: + context.set_auto_parallel_context(device_num=args.device_num, parallel_mode=ParallelMode.DATA_PARALLEL) + auto_parallel_context().set_all_reduce_fusion_split_indices([140]) + init() + + print("begin") + + net = resnet50(batch_size=32, num_classes=4) + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) + #resnet = ResNet50() + + param_dict = load_checkpoint("train_resnet50-1_9792.ckpt") + load_param_into_net(net, param_dict) + model = Model(net, loss_fn=net_loss, optimizer=opt, metrics={'acc'}) + + images=[] + labels=[] + + csv_path=os.path.join(dataset_path,'predict.csv') + + print("begin predict") + with open(csv_path, 'r') as f: + next(f) + lines = f.readlines() + + #遍历csv文件内容 + count=0 + for line in lines: + #解析每一行csv文件内容 + cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + image_path = os.path.join(dataset_path,cols[0]) + print(count) + np0_image = Image.open(image_path).convert("RGB") + + np1_image = np.array(np0_image) + #np1_image = np.transpose(np0_image,(2,0,1)) + #print("image: {}".format(np1_image)) + + np2_image = np.transpose(np1_image,(2,0,1)) + #np2_image = np.array(np1_image) + #print("shape: {}".format(np2_image.shape), ", dtype: {}".format(np2_image.dtype)) + + np_image = np.array([np2_image], dtype=np.float32) + #print("shape: {}".format(np_image.shape), ", dtype: {}".format(np_image.dtype)) + + # 图像处理 + input_data = Tensor(np_image,ms.float32) + pred = model.predict(input_data) + pred = list(pred) + #print("label: {}".format( pred.argmax(axis=1) ) ) + + label=pred.index(max(pred)) + print("label: {}".format( label ) ) + + labels.append(label) + images.append(np_image) + count+=1 + #if count==10: + # break + + print("end predict") + with open("result.csv", mode='w', newline='') as csv_p: + fieldnames = ['label','shot'] + writer = csv.DictWriter(csv_p, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(labellist, shotlist): + writer.writerow({'shot':i[0], 'label':i[1]}) + + out_obs_path='obs://sunce-demo/testdata/out/' + mox.file.copy_parallel(src_url='result.csv', dst_url=out_obs_path) + #mox.file.copy_parallel(src_url='./', dst_url='./out') + + \ No newline at end of file -- Gitee From 0ed8210f2a54f6a4ae588c511fe938010b77ee07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:06:29 +0000 Subject: [PATCH 7/9] =?UTF-8?q?=E8=AE=AD=E7=BB=83?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../code/CreateDataset.py" | 64 ++++ .../code/DatasetGenerator.py" | 141 +++++++++ .../code/main.py" | 99 ++++++ .../code/resnet.py" | 293 ++++++++++++++++++ .../code/test.py" | 35 +++ 5 files changed, 632 insertions(+) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/CreateDataset.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/DatasetGenerator.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/main.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/resnet.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/test.py" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/CreateDataset.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/CreateDataset.py" new file mode 100644 index 0000000..1517e86 --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/CreateDataset.py" @@ -0,0 +1,64 @@ +""" +文件名:CreateDataset.py +作者:孙策 +描述:用于创建训练或测试时所用的数据集 的函数 +修改人:〈修改人〉 +修改时间:YYYY-MM-DD +修改内容:〈修改内容〉 +""" +import mindspore.dataset as ds +import mindspore.dataset.vision.c_transforms as CV +import mindspore.dataset.transforms.c_transforms as CV2 +from mindspore.dataset.vision import Inter +from mindspore import dtype as mstype +from DatasetGenerator import DatasetGenerator + +def create_dataset(dataset_path, csv_path, batch_size, repeat_size, device_num, rank_id): + """ 该函数用于创建训练或测试时所用的数据集 + Args: + data_path: 数据集所在的文件夹 + csv_path: 描述数据集的txt文件。训练集还是测试机就是根据该txt文件进行区分的。 + batch_size: 训练时的batch_size参数 + repeat_size: 数据的重复次数 + num_parallel_workers: 并行工作数量 + """ + + # 创建数据集生成器。 + dataset_generator = DatasetGenerator(dataset_path, csv_path) + + # 将创建的数据集生成器传入到GeneratorDataset类中,创建mindspore数据集。 + # ["image", "label"]表示数据集中的数据名称用image标识,标签数据用label标识。 + dataset = ds.GeneratorDataset(dataset_generator, ["image", "label"], num_shards=device_num, shard_id=rank_id, shuffle=True) + + # 确定对图像数据进行变换的一些参数 + resize_height, resize_width = 224, 224 # 图片尺寸 + rescale = 1.0 / 255.0 # 归一化缩放系数 + shift = 0.0 # 偏移量 + + # 定义调整图片的尺寸大小的操作 + resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32) + # 根据rescale和shift,定义对图像中的像素值进行归一化处理的操作 + rescale_op = CV.Rescale(rescale, shift) + # 定义对图像中的像素进行标准化处理的操作。下面传入的6个参数,是该数据集所有图片的RGB三个通道的均值和方差。 + normalize_op = CV.Normalize((0.46, 0.46, 0.46), (0.27, 0.27, 0.27)) + # 为了适应网络,将数据由于(height, width, channel) 变换为(channel, height, width) + changeswap_op = CV.HWC2CHW() + # 将label的数据类型改成int32类型 + type_cast_op = CV2.TypeCast(mstype.int32) + + c_trans = [] + c_trans += [resize_op, rescale_op, normalize_op, changeswap_op] + + # 将上述定义的操作应用到数据集上,对数据集中的图像进行一定的变换。 + # 标签数据类型的变换 + dataset = dataset.map(operations=type_cast_op, input_columns="label") + # 图片的变换 + dataset = dataset.map(operations=c_trans, input_columns="image") + + + # 数据集相关参数设置 + dataset = dataset.shuffle(buffer_size=10) # 设置缓存大小 + dataset = dataset.batch(batch_size, drop_remainder=True) # 设置batch_size + dataset = dataset.repeat(repeat_size) # 设置repeat_size + + return dataset \ No newline at end of file diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/DatasetGenerator.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/DatasetGenerator.py" new file mode 100644 index 0000000..282cb57 --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/DatasetGenerator.py" @@ -0,0 +1,141 @@ +""" +文件名:DatasetGenerator.py +作者:孙策 +描述:数据集生成器类 +修改人:〈修改人〉 +修改时间:YYYY-MM-DD +修改内容:〈修改内容〉 +""" +import numpy as np +import os +from PIL import Image +from mindspore import Tensor +from mindspore import dtype as mstype +import mindspore as ms +#import pandas as pd +#import csv +import cv2 +import pathlib + +class DatasetGenerator: + """ + 该类定义了一个数据集生成器。 + """ + def __init__(self, dataset_path, csv_path): + """ + 该函数是创建该类的一个实例时,会执行的初始化函数。 + 该函数中,实现了根据传入的txt文件对数据集的图片文件和标签值进行读取。 + Args: + dataset_path: 数据集路径 + csv_path: 描述数据集的txt文件所在路径 + """ + #print("DatasetGeneratora") + #print(f"create_dataset: dataset_path={dataset_path} , csv_path={csv_path}") + + images = [] # 创建空列表用于保存数据集的图片数据 + labels = [] # 创建空列表用于保存数据集的标签数据 + #shots = [] # 创建空列表用于保存数据集的地址数据 + + # 读取csv文件中的每一行并进行处理 + """ + with open(csv_path, 'r') as f: + next(f) + lines = f.readlines() + for line in lines: + cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + image_path = os.path.join(dataset_path,cols[imageRow]) # 得到一张图片的路径 + np_image = np.array(Image.open(image_path)).astype(np.float32) # 读取图片,并保存为np格式 + labels.append(int(cols[labelRow])) # 将当前图片的标签数据添加到labels列表 + shots.append(cols[imageRow]) # 将图片路径添加到shots列表 + #print("Image shape: {}".format(np_image.shape), ", Label: {}".format(expression)) + #print("Image address: {}".format(subDirectory_filePath), ", Label: {}".format(expression)) + f.close() + """ + """ + with open(csv_path, 'r') as f: + next(f) + lines = f.readlines() + for line in lines: + cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + image_path = os.path.join(dataset_path,cols[imageRow]) # 得到一张图片的路径 + #print("Image address: {}".format(image_path)) + image_open = Image.open(image_path) + #print("Image address: {}".format(image_open)) + init_image = np.array(image_open).resize(144,144,3) # 读取图片 + #print("Image address: {}".format(init_image.shape)) + init2_image = np.array(init_image) + #print("Image address: {}".format(init2_image.shape)) + np_image = init2_image.astype(np.float32) # 保存为np格式 + images.append(np_image) # 将当前图片数据添加到images列表 + #images.append(image_path) # 测试 + labels.append(int(cols[labelRow])) # 将当前图片的标签数据添加到labels列表 + shots.append(cols[imageRow]) # 将图片路径添加到shots列表 + #print("Image shape: {}".format(np_image.shape), ", Label: {}".format(cols[labelRow])) + #print("Image address: {}".format(image_path), ", Label: {}".format(cols[labelRow])) + f.close() + """ + with open(csv_path, 'r') as f: + next(f) + lines = f.readlines() + imageCount=0 + for line in lines: + cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + image_path = os.path.join(dataset_path,cols[0]) # 得到一张图片的路径 + img_type = cols[0].strip().split(".") + image_path_judge = pathlib.Path(image_path) + + if (image_path_judge.exists() and (img_type[1]!='tif' and img_type[1]!='TIF')): + # 读取图片,并保存为np格式 + #np3_image = Image.open(image_path) + #print("Image0 type: {}".format(np3_image.dtype), ", Label: {}".format(cols[1])) + #np2_image = np.array(np3_image) + #print("Image0 type: {}".format(np2_image.dtype), ", Label: {}".format(cols[1])) + #np0_image = np.float32(np2_image) + #print("Image0 type: {}".format(np0_image.dtype), ", Label: {}".format(cols[1])) + #np1_image = np.array(cv2resize.resize(np0_image, (144,144), interpolation = cv2resize.INTER_AREA)) + #print("Image1 shape: {}".format(np1_image), ", Label: {}".format(cols[1])) + #np1_image = cv2resize.resize(np0_image, (144,144), interpolation = cv2resize.INTER_AREA) + #print("Image2 shape: {}".format(np1_image), ", Label: {}".format(cols[1])) + #np_image = np.float32(np1_image) + #print("Image type: {}".format(np_image.dtype), ", Label: {}".format(cols[1])) + #np_image = np.float32(np1_image) + #np_image = CV.GaussinBlur(np1_image,(48,48),0) + #np_image = np.array(Image.open(image_path)).astype(np.float32) + + print("Image count: {}".format(imageCount)) + + + np0_image = Image.open(image_path) + print(np0_image) + + np1_image = np.array(np0_image,dtype=np.float32) + print("Image dtype: {}".format(np1_image.dtype), ", shape: {}".format(np1_image.shape),) + + np_image = cv2.resize(np1_image, (128,128), interpolation=cv2.INTER_LINEAR) + + #print(np0_image.astype(np.float32).shape) + #np_image = np0_image.astype(np.float32) + #np_image = np.array(Image.open(image_path)).astype(np.float32) + + images.append(np_image) # 将当前图片数据添加到images列表 + labels.append(int(cols[1])) # 将当前图片的标签数据添加到labels列表 + imageCount+=1 + #shots.append(cols[imageRow]) # 将图片路径添加到shots列表 + print("Image dtype: {}".format(np_image.dtype), ", shape: {}".format(np_image.shape),) + + if imageCount==235000: + break + + + #print("Image address: {}".format(np_image.shape), ", Label: {}".format(cols[1])) + f.close() + + self.images = images # 将images存为该对象的images属性 + self.labels = labels # 将labels存为该对象的labels属性 + #self.shots = shots # 将shots存为该对象的shots属性 + + def __getitem__(self, index): + return self.images[index], self.labels[index] + + def __len__(self): + return len(self.labels) \ No newline at end of file diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/main.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/main.py" new file mode 100644 index 0000000..e19493f --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/main.py" @@ -0,0 +1,99 @@ +import os +import argparse +import time +#import random +from mindspore import context, Model, load_checkpoint, load_param_into_net +from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor +from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits +from mindspore.communication.management import init +from mindspore.nn.optim.momentum import Momentum +from mindspore.context import ParallelMode +from mindspore.parallel._auto_parallel_context import auto_parallel_context +from resnet import resnet50 +from CreateDataset import create_dataset + +# 随机种子初始化 +#random.seed(1) + +# 定义一个参数接收器。用于读取运行时传入的参数 +parser = argparse.ArgumentParser(description='face expression classification') +parser.add_argument('--run_distribute', type=bool, default=False, help='Run distribute.') +parser.add_argument('--device_num', type=int, default=24, help='Device num.') +parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU', 'CPU']) +parser.add_argument('--do_train', type=bool, default=True, help='Do train or not.') +parser.add_argument('--do_eval', type=bool, default=False, help='Do eval or not.') +parser.add_argument('--epoch_size', type=int, default=1, help='Epoch size.') +parser.add_argument('--batch_size', type=int, default=32, help='Batch size.') +parser.add_argument('--num_classes', type=int, default=4, help='Num classes.') +parser.add_argument('--checkpoint_path', type=str, default='./checkpoint', help='CheckPoint file path.') +#parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path.') +parser.add_argument('--data_url', required=True, help='Location of data.') +parser.add_argument('--train_url', required=True, default=None, help='Location of training outputs.') +parser.add_argument('--mode', type=str, default="train", choices=['train', 'test'], help='train or test') + +args = parser.parse_args() + +dog_dataset_path = "./four_face" # 定义数据集所在路径 + +# 设置运行环境的参数 +context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target) +if args.device_target == "Ascend": + device_id = int(os.getenv('DEVICE_ID')) + context.set_context(device_id=device_id) + + +if __name__ == '__main__': + # 标记开始测试时间 + start_time = time.time() + + # 将数据集中的数据从OBS桶中拷贝到缓存中来。 + import moxing as mox + mox.file.copy_parallel(src_url=args.data_url, dst_url='./four_face') + #data_obs_url = 'obs://sunce-demo/expression_recognition/four_face/' + #mox.file.copy_parallel(data_obs_url, dog_dataset_path) + + # 打印所使用的设备 + print(f"use device is : {args.device_target}") + #dataset_sink_mode = not args.device_target == "CPU" + + # 自动并行运算 + if args.run_distribute: + context.set_auto_parallel_context(device_num=args.device_num, parallel_mode=ParallelMode.DATA_PARALLEL) + auto_parallel_context().set_all_reduce_fusion_split_indices([140]) + init() + + # 定义训练过程中的一些参数 + epoch_size = args.epoch_size # 反向传播计算迭代次数 + net = resnet50(args.batch_size, args.num_classes) # 创建ResNet网络对象 + net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean') # 定义损失行数 + opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.01, 0.9) # 定义训练过程中的优化器 + + # 创建网络模型。metrics={"acc", "loss"}表示评估该网络模型的时候,评估准确率、损失值。 + model = Model(net, loss_fn=net_loss, optimizer=opt, metrics={'acc'}) + #sink_mode = not args.device_target == "CPU" + + # as for train, users could use model.train + if args.do_train: + train_dataset = create_dataset(dog_dataset_path, os.path.join(dog_dataset_path, "training.csv"), batch_size=args.batch_size, repeat_size=args.batch_size, device_num=args.device_num, rank_id=device_id) + batch_num = train_dataset.get_dataset_size() + config_ck = CheckpointConfig(save_checkpoint_steps=batch_num, keep_checkpoint_max=35) + ckpoint_cb = ModelCheckpoint(prefix="train_resnet50", directory=args.checkpoint_path, config=config_ck) + loss_cb = LossMonitor() + + print("begin train") + model.train(epoch_size, train_dataset, callbacks=[ckpoint_cb, loss_cb]) + mox.file.copy_parallel(src_url=args.checkpoint_path, dst_url=args.train_url) + + + # as for evaluation, users could use model.eval + if args.do_eval: + print("Testing Model:") + if args.checkpoint_path: + param_dict = load_checkpoint(dog_model_path) + load_param_into_net(net, param_dict) + eval_dataset = create_dataset(dog_dataset_path, os.path.join(dog_dataset_path, "validation.csv"), batch_size=args.batch_size, repeat_size=args.batch_size, device_num=args.device_num, rank_id=device_id) + + print("begin eval") + res = model.eval(eval_dataset, dataset_sink_mode=False) # 测试网络性能,并把结果保存到res_metric + print("============== Test result:{} ==============".format(res_metric)) + print(f"Total time:{int(time.time() - start_time)}") \ No newline at end of file diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/resnet.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/resnet.py" new file mode 100644 index 0000000..3281660 --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/resnet.py" @@ -0,0 +1,293 @@ +import numpy as np +import mindspore.nn as nn +from mindspore import Tensor +import mindspore.ops as ops + + +def weight_variable_0(shape): + """weight_variable_0""" + zeros = np.zeros(shape).astype(np.float32) + return Tensor(zeros) + + +def weight_variable_1(shape): + """weight_variable_1""" + ones = np.ones(shape).astype(np.float32) + return Tensor(ones) + + +def conv3x3(in_channels, out_channels, stride=1, padding=0): + """3x3 convolution """ + return nn.Conv2d(in_channels, out_channels, + kernel_size=3, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def conv1x1(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + return nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def conv7x7(in_channels, out_channels, stride=1, padding=0): + """1x1 convolution""" + return nn.Conv2d(in_channels, out_channels, + kernel_size=7, stride=stride, padding=padding, weight_init='XavierUniform', + has_bias=False, pad_mode="same") + + +def bn_with_initialize(out_channels): + """bn_with_initialize""" + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform', + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def bn_with_initialize_last(out_channels): + """bn_with_initialize_last""" + shape = (out_channels) + mean = weight_variable_0(shape) + var = weight_variable_1(shape) + beta = weight_variable_0(shape) + bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=0.00001, gamma_init='Uniform', + beta_init=beta, moving_mean_init=mean, moving_var_init=var) + return bn + + +def fc_with_initialize(input_channels, out_channels): + """fc_with_initialize""" + return nn.Dense(input_channels, out_channels, weight_init='XavierUniform', bias_init='Uniform') + + +class ResidualBlock(nn.Cell): + """ResidualBlock""" + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1): + """init block""" + super(ResidualBlock, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = ops.ReLU() + self.add = ops.Add() + + def construct(self, x): + """construct""" + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class ResidualBlockWithDown(nn.Cell): + """ResidualBlockWithDown""" + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + stride=1, + down_sample=False): + """init block with down""" + super(ResidualBlockWithDown, self).__init__() + + out_chls = out_channels // self.expansion + self.conv1 = conv1x1(in_channels, out_chls, stride=stride, padding=0) + self.bn1 = bn_with_initialize(out_chls) + + self.conv2 = conv3x3(out_chls, out_chls, stride=1, padding=0) + self.bn2 = bn_with_initialize(out_chls) + + self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0) + self.bn3 = bn_with_initialize_last(out_channels) + + self.relu = ops.ReLU() + self.down_sample = down_sample + + self.conv_down_sample = conv1x1(in_channels, out_channels, stride=stride, padding=0) + self.bn_down_sample = bn_with_initialize(out_channels) + self.add = ops.Add() + + def construct(self, x): + """construct""" + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + identity = self.conv_down_sample(identity) + identity = self.bn_down_sample(identity) + + out = self.add(out, identity) + out = self.relu(out) + + return out + + +class MakeLayer0(nn.Cell): + """MakeLayer0""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer0, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=1, down_sample=True) + self.b = block(out_channels, out_channels, stride=stride) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class MakeLayer1(nn.Cell): + """MakeLayer1""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer1, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + + return x + + +class MakeLayer2(nn.Cell): + """MakeLayer2""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer2, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + self.d = block(out_channels, out_channels, stride=1) + self.e = block(out_channels, out_channels, stride=1) + self.f = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + x = self.d(x) + x = self.e(x) + x = self.f(x) + + return x + + +class MakeLayer3(nn.Cell): + """MakeLayer3""" + + def __init__(self, block, in_channels, out_channels, stride): + """init""" + super(MakeLayer3, self).__init__() + self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True) + self.b = block(out_channels, out_channels, stride=1) + self.c = block(out_channels, out_channels, stride=1) + + def construct(self, x): + """construct""" + x = self.a(x) + x = self.b(x) + x = self.c(x) + + return x + + +class ResNet(nn.Cell): + """ResNet""" + + def __init__(self, block, num_classes=100, batch_size=32): + """init""" + super(ResNet, self).__init__() + self.batch_size = batch_size + self.num_classes = num_classes + + self.conv1 = conv7x7(3, 64, stride=2, padding=0) + + self.bn1 = bn_with_initialize(64) + self.relu = ops.ReLU() + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") + + self.layer1 = MakeLayer0(block, in_channels=64, out_channels=256, stride=1) + self.layer2 = MakeLayer1(block, in_channels=256, out_channels=512, stride=2) + self.layer3 = MakeLayer2(block, in_channels=512, out_channels=1024, stride=2) + self.layer4 = MakeLayer3(block, in_channels=1024, out_channels=2048, stride=2) + + self.pool = ops.ReduceMean(keep_dims=True) + self.squeeze = ops.Squeeze(axis=(2, 3)) + self.fc = fc_with_initialize(512 * block.expansion, num_classes) + + def construct(self, x): + """construct""" + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.pool(x, (2, 3)) + x = self.squeeze(x) + x = self.fc(x) + return x + + +def resnet50(batch_size, num_classes): + """create resnet50""" + return ResNet(ResidualBlock, num_classes, batch_size) diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/test.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/test.py" new file mode 100644 index 0000000..0cc3482 --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/code/test.py" @@ -0,0 +1,35 @@ +import os +import mindspore.dataset as ds +from DatasetGenerator import DatasetGenerator +from CreateDataset import create_dataset + +if __name__ == '__main__': + + dog_dataset_path = "../four_face" + + """ + print("begin") + # 创建数据集生成器 + dataset_genertor = DatasetGenerator(dog_dataset_path, os.path.join(dog_dataset_path, "validation.csv")) + + # 将创建的数据集生成器传入到GeneratorDataset类中,创建mindspore数据集。 + # ["image", "label"]表示数据集中的数据名称用image标识,标签数据用label标识。 + dataset = ds.GeneratorDataset(dataset_genertor, ["image", "label"], shuffle=False) + #print 1 + print("end") + for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + #print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) + print("Image shape: {}".format(data['image']), ", Label: {}".format(data['label'])) + """ + + print("begin") + train_dataset = create_dataset(dog_dataset_path, os.path.join(dog_dataset_path, "training.csv"), imageRow=0, labelRow=1) + eval_dataset = create_dataset(dog_dataset_path, os.path.join(dog_dataset_path, "validation.csv"), imageRow=0, labelRow=1) + print("end") + + #for data in train_dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + # #print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) + # print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) + #for data in eval_dataset.create_dict_iterator(num_epochs=1, output_numpy=True): + # #print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) + # print("Image shape: {}".format(data['image'].shape), ", Label: {}".format(data['label'])) \ No newline at end of file -- Gitee From 58aabb350cf377f27629c52c1eb64bcaa5279297 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:07:37 +0000 Subject: [PATCH 8/9] =?UTF-8?q?=E9=A2=84=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../data_pre_process/allfile.py" | 36 +++++++ .../data_pre_process/process.py" | 98 +++++++++++++++++++ .../data_pre_process/readfile.py" | 0 .../data_pre_process/trans.py" | 92 +++++++++++++++++ 4 files changed, 226 insertions(+) create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/allfile.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/process.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/readfile.py" create mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/trans.py" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/allfile.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/allfile.py" new file mode 100644 index 0000000..a05444f --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/allfile.py" @@ -0,0 +1,36 @@ +import os +import csv + +address = "./expression" + +#count=0 +#dicta= +shotlist=[] +for root,dirs,files in os.walk(address): + for file_name in dirs: + #count+=1 + #num=0 + #dicta['others']=dicta['others']+1 + #dicta['crying']=dicta['crying']+1 + #dicta['shouting']=dicta['shouting']+1 + #dicta['others']=dicta['others']+1 + for x, ys, txt_names in os.walk(os.path.join(address,file_name)): + for txt_name in txt_names: + #if num%4==0: + #dicta['others']=dicta['others']+1 + #num+=1 + txt_path=os.path.join(file_name, txt_name) + txt_path=file_name+'/'+txt_name + shotlist.append(txt_path) + + +with open('predict.csv', mode='w', newline='') as csv_p: + fieldnames = ['shot','shot2'] + writer = csv.DictWriter(csv_p, fieldnames=fieldnames) + writer.writeheader() + for i in shotlist: + print("shot: {}".format(i)) + writer.writerow({'shot':i,'shot2':i}) + csv_p.close() + + diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/process.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/process.py" new file mode 100644 index 0000000..8430c1e --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/process.py" @@ -0,0 +1,98 @@ +#encoding:utf-8 +import numpy as np +import scipy.misc as sm +import os +import csv +from PIL import Image + +def getemotion(emotion_data): + if emotion_data=='0': + return 2 + if emotion_data=='3': + return 3 + if emotion_data=='4': + return 1 + else: + return 0 + +#创建文件夹 +def createDir(dir): + if os.path.exists(dir) is False: + os.makedirs(dir) + +def saveImageFromFer2013(): + + + #读取csv文件 + with open('fer2013.csv', 'r') as f: + next(f) + reader = csv.reader(f) + imageCount = 1 + fileCount = 1 + + #遍历csv文件内容,并将图片数据按分类保存 + trainimagelist=[] + trainlabellist=[] + evalimagelist=[] + evallabellist=[] + + for row in reader: + #解析每一行csv文件内容 + #cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + emotion_data = row[0] + image_data = row[1] + usage_data = row[2] + #将图片数据转换成48*48 + data_array = list(map(float, image_data.split())) + data_array = np.asarray(data_array) + imagejpg = data_array.reshape(48, 48) + + emotion_key = getemotion(emotion_data) + + #图片要保存的文件夹 + imagePath = str(fileCount) + createDir(imagePath) + + #图片文件名 + index = imageCount % 10 + if index==0 : + fileCount+=1 + imageName = os.path.join(imagePath, '{}.jpg'.format(str(index))) + imageCount += 1 + + #sm.toimage(image).save(imageName) + im = Image.fromarray(imagejpg).convert('L') + im.save(imageName) + + if usage_data=='Training' : + trainimagelist.append(imageName) + trainlabellist.append(emotion_key) + else: + evalimagelist.append(imageName) + evallabellist.append(emotion_key) + f.close() + + with open('training.csv', mode='w', newline='') as csv_t: + fieldnames = ['shot', 'label'] + writer = csv.DictWriter(csv_t, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(trainimagelist, trainlabellist): + writer.writerow({'shot':i[0], 'label':i[1]}) + csv_t.close() + + with open('validation.csv', mode='w', newline='') as csv_v: + fieldnames = ['shot', 'label'] + writer = csv.DictWriter(csv_v, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(evalimagelist, evallabellist): + writer.writerow({'shot':i[0], 'label':i[1]}) + csv_v.close() + + print('总共有' + str(imageCount) + '张图片') + + +if __name__ == '__main__': + saveImageFromFer2013() + diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/readfile.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/readfile.py" new file mode 100644 index 0000000..e69de29 diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/trans.py" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/trans.py" new file mode 100644 index 0000000..251295c --- /dev/null +++ "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/data_pre_process/trans.py" @@ -0,0 +1,92 @@ +#encoding:utf-8 +import numpy as np +import scipy.misc as sm +import os +import csv +from PIL import Image + +def getemotion(emotion_data): + if emotion_data=='1': + return 3 + if emotion_data=='2': + return 1 + if emotion_data=='4': + return 2 + if emotion_data=='6': + return 2 + else: + return 0 + + +def translabel(): + imageCount=0 + + #读取csv文件 + with open('training1.csv', 'r') as t: + next(t) + reader = csv.reader(t) + + #遍历csv文件内容 + trainimagelist=[] + trainlabellist=[] + for row in reader: + #解析每一行csv文件内容 + #cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + emotion_data = row[6] + image_data = row[0] + + emotion_key=getemotion(emotion_data) + + trainimagelist.append(image_data) + trainlabellist.append(emotion_key) + + imageCount+=1 + + t.close() + + with open('validation1.csv', 'r') as v: + next(v) + reader = csv.reader(v) + + #遍历csv文件内容 + evalimagelist=[] + evallabellist=[] + for row in reader: + #解析每一行csv文件内容 + #cols = line.strip().split(",") # 根据逗号,拆分csv文件中一行文本的元素 + emotion_data = row[6] + image_data = row[0] + + emotion_key=getemotion(emotion_data) + + evalimagelist.append(image_data) + evallabellist.append(emotion_key) + + imageCount+=1 + + v.close() + + with open('training.csv', mode='w', newline='') as csv_t: + fieldnames = ['shot', 'label'] + writer = csv.DictWriter(csv_t, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(trainimagelist, trainlabellist): + writer.writerow({'shot':i[0], 'label':i[1]}) + csv_t.close() + + with open('validation.csv', mode='w', newline='') as csv_v: + fieldnames = ['shot', 'label'] + writer = csv.DictWriter(csv_v, fieldnames=fieldnames) + writer.writeheader() + + for i in zip(evalimagelist, evallabellist): + writer.writerow({'shot':i[0], 'label':i[1]}) + csv_v.close() + + print('总共有' + str(imageCount) + '张图片') + + +if __name__ == '__main__': + translabel() + -- Gitee From 8344503f252ae470d845f560154e8a72cc5afa23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E7=AD=96?= <1805515795@qq.com> Date: Sun, 11 Jul 2021 00:07:49 +0000 Subject: [PATCH 9/9] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20code?= =?UTF-8?q?/2021=5Fspring/=E8=A1=A8=E6=83=85=E8=AF=86=E5=88=AB-=E5=AD=99?= =?UTF-8?q?=E7=AD=96/ckpt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ckpt/.keep" | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 "code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" diff --git "a/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" "b/code/2021_spring/\350\241\250\346\203\205\350\257\206\345\210\253-\345\255\231\347\255\226/ckpt/.keep" deleted file mode 100644 index e69de29..0000000 -- Gitee