diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/.keep" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/.keep"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/Readme.md" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/Readme.md"
new file mode 100644
index 0000000000000000000000000000000000000000..f7e82fc79c716b1f784162aebb4a5a8b1c0cf814
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/Readme.md"
@@ -0,0 +1,4 @@
+本次提交的代码基于lab4 resnet部分示例代码完成,修改了部分配置以适配自行处理过的火山地形InSAR影像数据集。
+主要代码文档为resnet_volcano.ipynb,其余部分为支撑代码,可能有未用上的部分,为了能完整运行故全部上传
+代码运行环境为华为云ModelArts下,镜像为tensorflow1.15-mindspore1.3.0-cann5.0.2-euler2.8-aarch64,规格为Ascend910|CPU:24核 96GB
+为了方便提交以及减少仓库储存用量,只提供数据集样例,完整数据集可从https://seis.bristol.ac.uk/~eexna/download.html下载。
\ No newline at end of file
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/background/africa_adwa_079D_07894_131313_adwa_20150127_20150328_162_134_1.png" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/background/africa_adwa_079D_07894_131313_adwa_20150127_20150328_162_134_1.png"
new file mode 100644
index 0000000000000000000000000000000000000000..45dc76974ccf1f19ec02e821e2ecf1e21865618d
Binary files /dev/null and "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/background/africa_adwa_079D_07894_131313_adwa_20150127_20150328_162_134_1.png" differ
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170209.geo.diff_pha.tif" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170209.geo.diff_pha.tif"
new file mode 100644
index 0000000000000000000000000000000000000000..939b98c56ffc1f5f06c9b5271073634b69a688ac
Binary files /dev/null and "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/eval/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170209.geo.diff_pha.tif" differ
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/background/africa_adwa_014A_07885_131313_adwa_20151013_20151106_158_82_0.png" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/background/africa_adwa_014A_07885_131313_adwa_20151013_20151106_158_82_0.png"
new file mode 100644
index 0000000000000000000000000000000000000000..24535efcccdd8b194dff9459ae92719111c05d41
Binary files /dev/null and "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/background/africa_adwa_014A_07885_131313_adwa_20151013_20151106_158_82_0.png" differ
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170305.geo.diff_pha.tif" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170305.geo.diff_pha.tif"
new file mode 100644
index 0000000000000000000000000000000000000000..aea705ea5242c441794bdfa53cc19fc9ad0dd483
Binary files /dev/null and "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/dataset_volcano/train/volcano/africa_ale_bagu_079D_07694_131313_ale_bagu_20170104_20170305.geo.diff_pha.tif" differ
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/resnet_volcano.ipynb" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/resnet_volcano.ipynb"
new file mode 100644
index 0000000000000000000000000000000000000000..f889bc009085e8e6b35456f2557df32e0d6bc945
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/resnet_volcano.ipynb"
@@ -0,0 +1,698 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "6035a7ec",
+ "metadata": {},
+ "source": [
+ "# 火山监测分类任务(ResNet50)\n",
+ "\n",
+ "\n",
+ "## 背景介绍\n",
+ "\n",
+ "本研究基于N. Anantrasirichai团队的研究工作和本课程的实验示例代码完成。随着近期卫星图像在频率、类型和可用性方面的进展,对偏远和难以到达的地区的火山(包括无地基监测系统的地区)进行常规化研究逐渐成为可能。其中,星基合成孔径雷达干涉(Interferometric Synthetic Aperture Radar, InSAR)数据可以监测地表形变,这与火山喷发有很强的统计学联系。然而,近期发射的哨兵一号(Sentinel-1)卫星产生的数据集太大,无法在全球范围内进行人工分析。N. Anantrasirichai团队系统地处理了900多座火山的>3,000张(公开部分)没有进行大气校正的非解缠短期干涉图,本研究基于其处理结果,应用深度学习算法来自动探测火山区域的地面形变。\n",
+ "\n",
+ "【环境要求】:\n",
+ "1. Python 3.7.5\n",
+ "2. Mindspore 1.1.1\n",
+ "\n",
+ "\n",
+ "## 总体设计:\n",
+ "\n",
+ "1. 环境导入\n",
+ "- 数据集导入\n",
+ "- 构建ResNet50模型\n",
+ "- 训练模型\n",
+ "- 测试网络模型\n",
+ "\n",
+ "\n",
+ "**说明:如果运行结果中出现WARNING或者UserWarning,无需担心;不会影响实验结果。**"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "0538fbcc",
+ "metadata": {},
+ "source": [
+ "## 1. 环境导入"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "072c7ac4",
+ "metadata": {},
+ "source": [
+ "### 导入相关模块\n",
+ "mindspore包主要用于本次实验卷积神经网络的构建,包括很多子模块。
"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "9494f9f7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "# 导入os库\n",
+ "import argparse\n",
+ "# 导入argarse库\n",
+ "import ast\n",
+ "# 导入ast库\n",
+ "\n",
+ "import sys\n",
+ "sys.path.append(\".\")\n",
+ "sys.path.append(\"..\")\n",
+ "\n",
+ "from mindspore import context\n",
+ "from mindspore import Tensor\n",
+ "from mindspore.nn.optim import Momentum\n",
+ "from mindspore.train.model import Model\n",
+ "from mindspore.context import ParallelMode\n",
+ "from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor\n",
+ "from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits\n",
+ "from mindspore.train.loss_scale_manager import FixedLossScaleManager\n",
+ "from mindspore.train.serialization import load_checkpoint, load_param_into_net\n",
+ "from mindspore.communication.management import init, get_rank, get_group_size\n",
+ "from mindspore.common import set_seed\n",
+ "from mindspore.parallel import set_algo_parameters\n",
+ "# 设置mindspore运行环境所需要的库\n",
+ "import mindspore.nn as nn\n",
+ "# 各类网络层都在nn里面\n",
+ "import mindspore.common.initializer as weight_init\n",
+ "# 用于初始化权重\n",
+ "import mindspore.log as logger\n",
+ "# 用于保存日志\n",
+ "\n",
+ "from resnet.src.lr_generator import get_lr, warmup_cosine_annealing_lr\n",
+ "# 学习率设置函数\n",
+ "from resnet.src.CrossEntropySmooth import CrossEntropySmooth\n",
+ "# 交叉验证\n",
+ "from resnet.src.config import cfg\n",
+ "# 参数配置\n",
+ "from resnet.src.eval_callback import EvalCallBack\n",
+ "# 回调函数\n",
+ "from resnet.src.metric import DistAccuracy, ClassifyCorrectCell\n",
+ "# 模型评估函数\n",
+ "from resnet.src.resnet import resnet50 as resnet\n",
+ "# 导入模型构造函数\n",
+ "from resnet.src.config import config_v as config\n",
+ "# 参数配置\n",
+ "from resnet.src.dataset import create_dataset2 as create_dataset\n",
+ "# 构建数据集"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "be1ea5d5",
+ "metadata": {},
+ "source": [
+ "### 初始化训练用到的一些参数"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "1c5524aa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parser = argparse.ArgumentParser(description='Image classification')\n",
+ "parser.add_argument('--net', type=str, default='resnet50', help='Resnet Model, resnet50')\n",
+ "parser.add_argument('--dataset', type=str, default='Volcano', help='Dataset, either cifar10')\n",
+ "parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')\n",
+ "parser.add_argument('--device_num', type=int, default=0, help='Device num.')\n",
+ "\n",
+ "parser.add_argument('--dataset_path', type=str, default='/home/ma-user/work/Course_NNDL/pack/lab4/resnet/dataset_volcano/train/', help='Dataset path')\n",
+ "# 训练集地址\n",
+ "parser.add_argument('--device_target', type=str, default='Ascend', choices=(\"Ascend\", \"GPU\", \"CPU\"),\n",
+ " help=\"Device target, support Ascend, GPU and CPU.\")\n",
+ "parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')\n",
+ "parser.add_argument('--parameter_server', type=ast.literal_eval, default=False, help='Run parameter server train')\n",
+ "parser.add_argument(\"--filter_weight\", type=ast.literal_eval, default=False,\n",
+ " help=\"Filter head weight parameters, default is False.\")\n",
+ "parser.add_argument(\"--run_eval\", type=ast.literal_eval, default=False,\n",
+ " help=\"Run evaluation when training, default is False.\")\n",
+ "parser.add_argument('--eval_dataset_path', type=str, default=None, help='Evaluation dataset path when run_eval is True')\n",
+ "parser.add_argument(\"--save_best_ckpt\", type=ast.literal_eval, default=True,\n",
+ " help=\"Save best checkpoint when run_eval is True, default is True.\")\n",
+ "parser.add_argument(\"--eval_start_epoch\", type=int, default=40,\n",
+ " help=\"Evaluation start epoch when run_eval is True, default is 40.\")\n",
+ "parser.add_argument(\"--eval_interval\", type=int, default=1,\n",
+ " help=\"Evaluation interval when run_eval is True, default is 1.\")\n",
+ "args_opt = parser.parse_args(args=[])\n",
+ "\n",
+ "set_seed(1)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "41e52590",
+ "metadata": {},
+ "source": [
+ "### 定义用到的一些函数"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "0e959522",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def filter_checkpoint_parameter_by_list(origin_dict, param_filter):\n",
+ " \"\"\"remove useless parameters according to filter_list\"\"\"\n",
+ " for key in list(origin_dict.keys()):\n",
+ " for name in param_filter:\n",
+ " if name in key:\n",
+ " print(\"Delete parameter from checkpoint: \", key)\n",
+ " del origin_dict[key]\n",
+ " break\n",
+ "\n",
+ "def apply_eval(eval_param):\n",
+ " \"\"\"evaluate the model\"\"\"\n",
+ " eval_model = eval_param[\"model\"]\n",
+ " eval_ds = eval_param[\"dataset\"]\n",
+ " metrics_name = eval_param[\"metrics_name\"]\n",
+ " res = eval_model.eval(eval_ds)\n",
+ " return res[metrics_name]\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "93cbeaa8",
+ "metadata": {},
+ "source": [
+ "## 2. 数据集导入"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "7ea07c10",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# create dataset\n",
+ "dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, repeat_num=1,\n",
+ " batch_size=config.batch_size, distribute=args_opt.run_distribute)\n",
+ "step_size = dataset.get_dataset_size()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "f256c938",
+ "metadata": {},
+ "source": [
+ "### 数据可视化"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "fbcb00c7",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOMAAADjCAAAAABS5fyQAACXrklEQVR4nKyddV9VW/f2V+fugk13hyCIqGB3d3d3d3d3d3d3gVgomIQggkg37O694vnj97yA+5zjegffz3XNOcccc4w1wHXA3/zchq4uz6ncEjj+jgAuemwB+xlLHamvd7qsyHl2Omnw16ma+cA4WdbczdeHKT64utfaQ2DFAcWFiy5PXZv9ylo5/kPp6cqvt8NXGLccf7TLbE+4SBw5RIlGzY20t4ewscb067nt8RfruQPBr859yHwVDm2YAuloBqhs7l+p5xEUseMMK0BgFGRwB8qDIAiiEAQifxXxyfMW+57XRQbGr6Coztnl51q+K2u4zXVoFrMhii68uSCtx9i1fPRTbmnxjyV23v3wnx+f4uriptb/Vr56nx7hF1lWl71wXNnepvHnxsreqALvPls/8Ajt3W6rx72XV2MTC2qPnirDBljjxZQ42GSU+gjpM29dXZgaQ7ItD3LhVKAWMYmdKEJyOIuyOA9AMAQ7IBD6q4zPTqU/MbWPfR7ZuqTJzEyVbitJUhZWdajVjTlL1OILLXP6Ny3p3csc2bp88LPCpvTg/ZNdniHf5vq6NKUvfAINRHpylcKWdQ+P1eovIL9qfYP2uF8LsH0693VXY+HM4Oz1Dc+iprn/2JqQsxaMbsqLrufKmyOlWEiozmzDeAFjUtBeBCkWSqUyKSZGcYIgSUoul/9dxkERM35zczd8G92qKmnd9cqiCYEyn/1afL5xzE0sLlKfvPhj/iDdz/Lps8SfSjKOZUtDr8t/7T/y+tCrQv2Kg/viwnYEp21QGVcl/Br/fYl06W3hrp/aTrevmUp3Cstyp4x4GqLVrxbFDHcJPexY7pkkOvJK910irbBwYspNLkYQMSWGXGVKhUxKKFxFbgq5XK6QUwT5d72aEspeTHQPM52b/XFgx/EfL7Ue+X1Bz41T16OpXHmzvTsuiNSZJ6Lv/xxY/7nbsd3Bu8e1uBMmSF2b+dX91iTgVdTtMYfejwo9fm7X2v0385Y5IxbeDylqWhLa7/IQeSMwS+MHh97Dx2P3Wwe2aCjlExY2LFLWAlYfC0z4VVhEAhgmBA7OKeUJDCAYkucZAIBAUADAf5XxU/iAMY1Rx5DGUOHdPU2u06u3WyJ/uWo2HK86ceLMsaMRF95NUWiHC9q4i0oPXBsx48uSj/bMpkPg8Uftlj7t04b293w83i/28OxPTz9Oia15tkbyum5eTZfGSyOPBu20Htn7Y4n82YzEKbdnDH+Q4OMAb/2CbKgUqkXFgloSBwEJyREoJsUxnkB5jKUYHgQB3gbx0F/1as9NSfSt0DMNxmaVExofuuXeHeKB9z8uPFk+Psf0uutByU/SnTt+RXt61MJhfOvU28x8d9P7bd2C6vo3nVxxrOACNL9HdvS+a6OQEfMaR4eVXgj/3j5n6bkMzrw8sV3j1f1bPwXXAI2Pr2WRjQsaci9mqoVSla9ebimFCZSWu1G8WEQThJQQ4QKhVEHRFEEQpEwsxf6qjgr1tw7ez5tZrl1J7dH9RJ9Y247ViMnSUvsB2Hooaekcx5LtiVTMfC5H0y3c+0tqdNURz3ZZBduIpbrboeHBw1s71Cs3qjjDFcFX27L4HO9uB13KDrmXbV3XquFC2ZEJnP3g+oW7vY4iU+1PV7LZOwyltUC10Q0LNPF2p4ABQYogUYAAIZCECJTHWJ4HON7BgX/XqzV34B8egc3VbfqFl63GlV/7LZhckj0bFx7tUZnlP96IDprWql+34Ipk49p3nQc+QK9cdK/c23f68o1hvZu/q193c2bmM+WvOZ3y3zQN6+mqH/tVcvRQ7eoO5U/62K+feOmS+6fJa+sGb0OA4w86ZdAfk8WO1FI1lFlKOcQ2KSmQQDSIciDCECAOUzDA8jzPcggP8n/Vq0rDEOOsh91MgQf3jCKnPq97uBeER9/yDOArd67udnh0x12Xph1W3xsjrAg4Ql+74Hsscm3MoaInPzs7283sOHbvhg63tlpOMqbs9arsQNulrmv73YtNJUvjEyvaFgcQM4Nb3P4GP/4V+rLO7lh6uINvTnV2AQZaaAlPCxQ8hrM8h4CUEBcSYkKAYYhYJpURGIqiGLju7yGmX7meWtYrWJDjcQl0remY2dL1zcaA3ty8hYUDs84c/rHi8MIdOZMMmtYT+6v7n888ETcFDF2XfBvwNyZ9yhs5vY+X4NTXKZU9z2aThW36N55jFyynWjwa7Jbo/qDAtKv03kes9uekW31csaNfWhUwdaq4m6zeZq3l5TBEEKRAwJA0BuMQgCIggWIQCHEAzzO8CQD+JqPp3bRPZ3t/f9pRvvlgdY8pEv5C4I9HT2X639vmzKlczr1c59LlekNsyabK+2EXon7eyYnLCD75/RhcZE54PvjxaQQU3lg6UaaNWNZfdvpajta6tnDOV2ezcRv79WqsKV1IzrQWuF7t9T2HvfADC+ArxZXfxT65rJ3VS+xSEYxSNM4KIRJEYQqCMRjEWAC1AyzLcsDfXI97cqYPCxk/tOuwmFmmccccWT1SNAE3rj/ABPwcQatT2V/n1BSt7us69+fwBBE5X3txL7l7nd+ls31bnPrqHuFx6FlRFHFnyKqcqdteZEL3u79/VH1gra3T135/Qv640tOfLI9T7gnp9Gz6OHtDBF6c7UnLgkKtRAWll6A4xthc7QRAAhiIABAMYyCMQhgIwABvZzjgb65HdMOrmLCyBTcaEiSDkpbIF12EmAkHA82W9PNHrkX0neh22+9EJ9VQqp/6c6A46JM9zGPPMmZG3clDNsW61h0HfdNf3vPl6jfRnwoPSFcQriVcrmvWL6VOHnwM2bZlP75yZmNj2aFvi1qe3CGsQGcqpENVHMHDOonT4rDhCieC8BDIAzxGwRgEIxAMIygKI6QAQ/6mV4XvoaEjhl65jWX+cc/9sTKT7TTwRP7s9NA12RTWqcvsXbcWbQpknzSVH2m/YU5iQ+9mTVvOTimEZ3Z9e3Tblr5Y9tVP59f4n19zzTppLeliGex6fPrctl0E95PevFuqllkcH9udCtZu7h09dGrQ6+56J2c0p4Xhv+sgO0KzOClkRQCFUSgOoyQCozCKIBDI8DzH8+BfZGy+Z+DxTRUoUPVwcJ5Ral7fzXeDeOr57B7D9knuGOGXd8dOT6voczNg0cZfgME9pLbw8zNx06MuLuscc1yijm4739/v10Unpx+nS0lXrh+zdHfIKJ3boR4lPpMULeIDMs6Yf2RYG/qsWb8nt9/2ACoj6I9Sbamz/aQrKaNVgJMoICJECI7COIGhEIzCMIpAPODgAOBvrsesGjBVGPlw0GaXVvLZALqyY2btjIltzj1q2PCy6du3hY0rrybOm317sHfwxd2ORb74F/TUy1woia8uT43RX37WZUa8s36fp1EBx8HLmqkmRz88tRw9cRIKRzbeGIJvDQkP8bj23LxH2OdLl6T1gnCzNwKU0TqhVS5ROCkYIgEWt6I4h0IgDEIoBIEACAEgxPEA+xcZR96Glq9c9BQ/cd63+emU7Ukr7/dMwXfItF7K+QeFp4OvLhtfaVasuus1xwpJAgL359O357V8lPDpyoeEd4OSkp/VT3KvUZp7K+KGCPfqlqbmakzGyOyQIW0OPdrgQy6uTmsmKXip+D7688EJTRXh9sAqYZnnT4lCa9ZgmEPGcSDvhDAeAREYhBAQghAUQQEOAnge/4uMDTdqVrK+cqU82k7WLpwhnDkIlB+/M2pJ2K+n48//WYLXv22nvf5i9LVuv9fgCx7fSxZuNDXY4/ZuvLH10pAA/OY1zfCupR8DoRktx4TmBBr8t2zp2g32n3u50DIucD76u+Z7QX9XOPf0saTY+51tnIW1KaFEwATyHGXmWQIBOQcO8hwMQjAEQQAEQiAIYYATBJC/x9iqoW+rmOirf4K3pvms77S+ZlFx5hevD0nJ3sJGH/q6KhADk7jzSXEAf5wZmlLieoP1mb8n4Ah9zq2opMXXMf6Pgtt96O6/tuWYhJ+70MDmsUN7dPR0PX+/TukyMVyrWqOeevC+y9EVP/dNrSjqvLjuJith6l3QP5AJ8NfCNKNV20keAGGQB0EA5CAMQmEMAmEABZm/uK/GW2L48LiO0dvSF3l/ib/1Z82hzty5qMwfe+euzZs8uWXB2N+LD30Lev7jDyg5O+W1vb0xbfyYbdq6r21uH+y1lO9cKxk4ZSRE2r6Ndiy+ZUAqXTTDnvyCAsoPT1l/f3gvgahJ1n4+s+aZW+x579UEW+BNa3F5kRTUippMnAPA7IiDIHCYREUYgsIIhuAwCRAgxEHQX4xzRMfH/bayL41+Sya1SwjU33fNe4q5DHEP7BOzMahTxsw52bvbDqh/4u85SrJYMq3vTfPI5n4+yzrFL63O/UIMrB/QbUCJ9OP4gi6m9sd2GKxeuCnrWccvUT9fb7k1eGu64CXY63Ln43lfG707nfMZGt76Y2W0somUWLQSqJGzGBEnC/AoSkEESeIogqI4iiEYioIQwuH434sBSjpQ7hsvfp9CLBn6OGz8tnb5ZyzN+u6r8ZlMNg3cfsv35zJw2YoDtkNbLnRfP2bz19L5m+76HYsfUr/sytGJRbetoTFnPhzzLR8fGTTRhfOQC9yy6nq5yh3Bj9NbzxroPNfpUZytwv9Az66IV6uRHZkPZBKjFbMG1p3heASVcriYJRHOxvEAAPB2hucAEIYgCEEgAv2LjPC0ddm1r8JX/USmbk4quXYvpnHY5jUS9w+t8se3zdzXOkKfh6zEaWHOfeXWvJBfebqTM4p1E7ult/q9Qrunb2VC5QiXxdcV1IEP565Z6p1NlpFhKzMUZtPzLjP6sAszZXn+cFn7fa3eTzdoml5RtP8XF0bX1Fhnpm28HadoyEk4EAR22AwWux0CQJDneZAHeBDFxMK/xih+0XB851tnhVtb4+kphxo6O5dPOra8fkeEq2eY+X70Y4f88cv1xg1+sYlHtrWJ7BA8Z7k/O7DlXe7ovCDr0MWr07e9DyypTmAmuXgoDsGiwNAgfJbqBZ9VrH9yT1WKOyIbr3Wa99IWEnExurCRaiIK3MuhXNyJ1zgwCYYBYhFE4hDHMhaH08pyAMfzIATCIAgCMEz/tfV4vXhwmeVKxcKywEe8b7RGHS+MDH5YP0Hzc9nj5+ePPxC5zV9c4bk9XL1s9dpuzQY9EZ9/H2Cfopf1rzv2rPADdd3+XbsFfOw/hT1lCvcqoyBvSM55KX2pmN/TH+wM4ftoaz377B/lVvS6zq/KW+KL2BwOIY4qjSJSj7F2wA5AWtrKMpgAFeEUjBMEQaAoguC4SET8NR2fULvPN5R+uXm+Go+xXMJbb9x8brjHnOKKu6oEP/chCTdtXw7KvZ8hE6YHzYLHTzoRvX6AeG74aO8Bz91HLvx2d4Zbwv0jo9LWzu2se6YtwCC8yap31JaY7IzrWy/6d1Bj+VxraPLln1PadeRcLUgNh1MgygFmuQbgYQBCOYGTN3E23sFajFYjywMAz7Esy3IgCAn/2vnYtcdNd51l/nzvgcsnFz9d5ub15cxEbIp57VWvddJvlvfT7rmMmnaDSWDP+rDK3xHMcvf2eWs2JzfO2r+yvWUcf2dHfndteuLvFokPnjgRG89gMC/UxFUl+QI6VYL7p96ZJo+sF+Fne7TycP8RAtCvxc+XakVGPYObLLCVhDgLQ5sAJ8rqCMwBOVEHyjKQjURQAsL+Xj6ne0eiocO5K6fmtqO2Ga/7LDEVLWwZqYsCu425deL3keKxQsr9wL4FtYO3F8jRwXTwPdTd+aXSLCUe/R4SIXxsOjeNKbZtebP3aOvxZoK3MhQCMvx7e+LXjJZnISgq81VVBQPqgjbrvdozea/3yNkxqNzhNDjkvN2GkU0Y4wB4B2QDWciOEqAVtnO8E2d5HhSKZH/Lq9l3nrhZicWGO56/BxoyBjZsRkvfAbILe1tiaLfWLwRPmfNe8hkZqS6BKSuC9rnG3Z8oOH5uuMeyUwOv3GttzhZbRaM0r9XDW3wf2WsK4GAtVrPByTCsWRZdHdBiOBZba9y3zxQkePg0p+w1GDjlJwzzApPJTpNsnRG16XEL6HQ47U6bjXc6TRaLze7g7DwIoQACwn9NR/hEoP3EjPhr5g7r/sx/422+Ft5XfOtgTMeS58vj95/8bhmQIVge2mrDqAlj5a2PrV0fdXInZT82uOsyZ935xUfD83KuNHhu3Z5haN/KOciJMQDIAYwU4exmZ2JdpHHt2IyiKQ32DDGBsr8YBtaxjFznwBkYt9gh0GYlzGajEzaDLA9zvBNnWdCOIDzDcjxJS8V/bT0WhHr3mGruk1Qt1uf2PffNcea5fy+zcZ5u2bg8HI8a8HXnZUH3H7NqPp2f8nZk+/LC8WEdgAV9zfHJFs/LN2Pd3ue5HipbPtR862rSWgtswRlAyNgbBCYHzA+I9+YJokKVKW14JIToMps3XCY0kAYRYnIgZgyDYQfUyHCwmSOcTtgOg6AdNdEoyzGcA2cBCELQv8V4DOtsat1sQ20ChK42Xjk2p+DPTlY8sBrcfnBsQlPGmoSw4EFc9uj0zunP+ueVfhm1Pv7Eq7s7sjF1f/BxskgCjY+PHd+Upvle2HyPh4BlecggcKJWnrcgFmc9wijFEEJYhE2kmaVwA1VUB8otThBmETHkQBlEhwEOVGAEQBYAAZbnLSAAQgiKI4RAIID+llelqtf4Y2fS8av16nfFz2o/nYt3ve0/4gBFH9s2CZphmh5marWxsn9an823gcKJ0sH95xVVuT4PZLAHQT9sM1J2XXKdt2kcvSe7jL3ZCrDYLSjFa2EhCAIAxNNOwsxYRVZPVoCydsJR6aBRq1QPEhDAkVoIJlA7b3VSTjOHwRAH2UingWBZjgMcTpYDIUgg/kuMD08N9Zh+dPVsa7WgvnOwM7jDov1qEvmd18JY0efo/kndd895/fvtUvMPSe68IRVT85cFNw9797SZY3BXqN28XMfg+THRQdfe05NSjk5+URBow1iGhngdxNMcz9owJ4RiOGZD7AzA6OQGTC8hnGaM5UGYdTpkFoYBIbPVikMEy0EAAjBOEIZgGEZRXCwAIeBv5XPanDG3zF49fuvz+acZ30eJV5qCZtJ89eG7ncjuqftlHf1zRsc+uxg2a3zP+aPD1hanPvl4q6i4cl5tiwOCMeVHbrScJO5cee11O/BxUO/rNQxCwAhKwRAPQhCKSVBKKhaKaZoWMRAs5VkSBAQMZxLgIAjzMMWBIArAqJNAOdyBkChGESBNEbQAUyo93dxcXVz+ko6z35rnhlwLuHzLuftm446ODy/6f4y9mxkx5VXKmP3zMk9PeeGG5/C7JYnxLrum5YPG2oxvaxNHN2L7Hq6HBM+2qVrFD1sxYeKua19f7XxONECsSQBZIQgFQZAH7CBjBiDKCHAQR4GNKGjGBfUYAFjsLM1iHMhgDE9ADAYyiJllUQCAecoMghiLkwzA8DwA/SXG0p8ly15/F7hO/pTd3uVC+sg+le1K0mb3mlX/THGJjdj1q1flVKtL1Fk96jXnZHJV5xUm8FuV3ngei/w+yDLM+nb04i3BCf20cztWrjQ3OngYMIgAEHKCGM4zdhAGUWsD7lCZIACxkxBnc5B2O2QWI1bAAfEW2o4zPAyyjABGQMgG2UAnC8Mo5mRogiRw+i/VPPypqXYzt26WO3v3m0HDu7mkVc/4YTlFHCk+HgnK2z88szA+7PmrVWflloM952fnqR73dvgPS/1QOjc6ShL33Md3lcRmV80QifoNH7pf9S60SWKDUTuMQxxoQwW0lQAQ0AaDYBOBm1CphjBLWC2KODkGEGEWwgixLKCnMZAnOAiwmwk7C4Ewy/Gw3eFgHQwAiP4O49DZ66HyMW0r0YM2obP+Sfuh3ROv6euOde84iFUIDkePzWj/df/NAnTSbNWg4jmX2FNtgIO7SwIH58zFnnhsFAOqvjfzl00cv/5ywPzDPgf7amWcA0RMEItxqMNOggzA4SjGsXYnzjcxdocDoTg7wDkhDcoZEUQA6VCLlYZgmLOggB0ibABtQzGbGBVhKMX+rfxq/ZukpDqwJfn8btadkevT9+aVjvhwIJbUVR5pf77juH6dyb2tPX8qi455fyuYcXcr+GhhvvJFTLuK/W2rXMz3vbsfb0/O/aa9dPTq97ehSfEcaBHaQR4AOBtrQQQQ5jBzTsaMqjHSDDtgB2PHeIbgGJCDQRDEODPkxAnETLNGnHOCMGelWI5ELajQbBDLbQr8L+no9Xlh/Kpt5VwfPKVtr2tO2909NfcOT6g9vX27OP8Jak9bNjFpYNzWH99bXZbwa+eZS0fcvHVk1JVy/8xlLbfm3s+EEvWOpOPCJT5f37fcM6k34zAhvIPDeJ5xQGYKhiCIA0AbS9GIA4QYFucAFoQcLA+AnNCK8xaYcwjsME85eJippVkTAQAghSAIhthsDpb5K4zbUifIoytSrqzNzXe8D7zQfATdBG+9NaA2svnjCx08WQixDjl2N3fvxfudIeMSQenN9d5EZ+ej+Zd9j2ZtkLysXgx1ti/73uvm2no3eu3h4UPLSQRhOdaKszyP6upFAgshY1kEgmGUgzEzbCUgGuYZJwiCkBljGBBBcBto4XkH4nTiVg5wCs0wZLM5bRYFLvSm/goj0jdi97i1EeCHZaNOrr20vsPaOPfFReuKtj+/blkt05WyZaOcIYO6X39lq1SMm3fSc5Cm9afND6uaqo/pHepBLwe19h/76sUmR+O+J6sc8/NfH+hrpGCAYzgLy5NOJ+1wwKwTw+QACCAkzFgxBgBAnGURniNB3gY6INzCQ06QY1kW4gHUAYBGiU2AwlarzWq2+jn+CmOLcZcubRk1KUva5Fo5w7tXhMw3zjcz1vrK607Vi5qurac8PPHl5PSU19cra1u7vXkWEF5fcHK6Z8HdgeUBbVJc+fiN5y4nf9QaLru1O3TybtQwZRJl4p08wnIsYKOcEGykUJxxOAUiGKVhwqYyMgjPYaAFBkywRI+iIA/ARhRgWQbnCdBEQTbYQupwSgJAIIbKrX8lziltHZIeuTa23YTkcbPCgwNN5ZeHD9xcPyi1fd9p3yZb8cWjLm1zaoKOzD7cjwyvS87uLF6+J2D2HWJW8qilf768VEekzL/74Qyi85Q5pcU918mDm3MwyNucOAuCCKqgSBLCSAwlRAQBSTiCNYtxHBNyDMDAEATxMIoTPIHjmMBBkhhpwyhKJJK5iISeHh4+Pv4h/n/njtx7xPolZ83S6AW9h263bgPx5BMf63+Kr15b0ZDy+lzt8CjlN6/mBlxfvTnlEPGmVt11/nfjXvMcWQ7EJiG7dhXxnkOXRUA10Kl3O1KjzRPYSrvDbjAZay0Wi8Wm1ej0ZovDajUZ9HqLzmzTQVYGYJwWu521Wi1Gs81uNhuMRoPBaLCbTHqj2Wo3GS0mXsgBYoHNarT8Da+uDFh9zKC2u7VdsnPItd4SF7/Zwatxu6zdjdCwNz303X26Z9xIMDz2LIOomsZRHwf0aZY3sW3gmm6rZlXVv79Rf81n+MStza9OOLi3/GnHZ83qdZwHqKF5p93psEASkOccMAryGgRACByXORDYBrIgSBo5u5mBJYwTQZ0EjfIM6NSQHInjoNNhh1mjyqwXAWa7CMH/yvl4++fWztByptfIEzua/GXWi8X04MgFR/PXeaKjGt7dl0h2Bl6JmzgR6/W56Vf3STuUM2+nH9k34c2bVjkLoUEhgytalYuSmNU9OxyNXBPPDHtTvQOZ4OFE9JCDA2BOCsEIwNo5iGUhI47YIMiFgHGnTg/zsM3GNfIoApMcg7M4y2AgDNpZIQMCPEECLIKYDCZlXVXg32D8kLY289Ssfu+7Leq0ZuueZW3Cvi3m8zcPPTMXaH/hQWNFiVB3L7fr2mlt46f2P7F8afwk7GaXD89f7yzqFZB+6lvOihGTe62PnH5Y/IH13HW7a4fUQpVDQzN2iw0HYdDgQBkHijoRluUYmx7FSZsV5VgTSsMWzsGyLExAAMcAvFNHOVieJ1iTE+A4CG5U4jQFOpwgr/0be86+7S89R99XTH7f8vnGYG9+Rga775eyjHPb7Nerb5nQcrrBsrr67OL4p72BktiQs3e+9jaHL59XtbZlyh3t0EQgpzh/WcWv4k6f270c+avP22P8+7UiN5mdsVqdoJCmRbREIIAhkAGcCIwKcRIUU4QIQFErCLCsHUUAmMQojGRxEhVYKFKA06SAFqnk3gIvjwB3ubdvwF/RcV7BzV8PzrpOCwmw+Vey7s0SZD1rfRsPPZh7M1k/6uUH33O1W5Zok44ETlk87PzbQfcQb8uZ9QFmqN2IDj6+P6Kmdlo1fKpLVdrBEwK2sr6ejL6Yi+ucnMXuhBiTEuZRFMIwzuowIhgjENEmDempsfJ6AY+yIACAKIrhGE6RHMMYMRi1czDDc7ABb/IGUR4AtOL6v8B4ATie0JSU/v7euJfLu8qYnqN2Jx+8MG3+lp2HLkRcxJ43Hipru+AE+dT114ZfN8rf1d48lPYibLTqaHTkjjdfpFPV5ffDszoPcTmwLqN18JrxyQOfFn3TkJjdaWFtAEbqcMJp5AEecFotsJ7grWydh7BEhZh0IABjOAACGEQ6KRDieBYBeIBjCAinGA60mPV6g9nICNi/UQ8wo9t17y+Ybk7wgqUbY4K+Z2ZfLuiA2TqMXTV45sDD3VcfTZ/s/Sajnb5n+rFdkc1lfZK91KNaz1pzMHAnPv9BXMwdOGDYjzvT5YcnrA2rKe1IeGyjwpf0MHEOq92BOBwAY4WsnB2zYQ4b5AQBtlGZ/d4e4hRqEYZzaiAEgEgnyepwgqRIBILNDAgxGE+aSZ0VdFgbPYGGv1GHnLoxP+/ZjwfffI5uHZi+M5sIvBO5c59h1a9WB/q/e+7nhYyVSn7oDW3WHtonqi+f9yhy/QXPGkG/pRcjBtrKA5a55f84pz1CCYct3P+8+3pD5dJLX0pcSZue4znWjLMwaNfRBOKw2OsRFNIbLTK91El8VTc6CDMCQyyAorUERpJOxuFwMk4eh6wgbHVaGYtbXRgpkHJ22P4XYoBWAm4qJUrbdyel3FfZ+HS0cjr4Z1ad22XlqBkv21GjX3rHTXSP+T165vW1x0LU3YpAYmL24wl1l9Y/0TdLKEsoaeVzd84SRXv8e7N94SlZaX39COFiL4PNyQOcs6E8z8xZTaCTtdZrqysanPaAOCXgMJgMWpPWqNM26q0GndFk1OoNJovVZmmsMplrTU12A+xgGNiob9JqtQ11/13HPMd7g0K+d9WqDo4RPZPPFxaPm6fFuVOSsJ1+geqTm5a/rdoqT23168iSI1Myb/rUbpO0f3nRP6fsaJf84Q/YwflzJg9KvskcyfSMHPHtm9uIMK/Ie0B5A2BjQKPT5GCwfDvk/DSqIgzRE1ZUpm/3qUJdKZU3OTHYydtpCEIRjnUS+mqcooW0gNJQVJOkQWghTKZGjQdZg6D0X6hDTtcfPL2xeFdsp9gFH9a3XWVOXzdgZga/rqRgHf7pULNHme27P9i49sTjzR41zd6sPrg5dCVVpVwOpwt1G7soGr/tnpy0sLOn6qDp6tW16dXc2W8/Qj6L9HwdYmctJgvMMq5WoKp+q8d9z3iNCr4S8qqGryknKxCOsyMAogMgmMRI3EHxPGAVM6zDyfCwCXE6rZSR0hk0BIFC1v/u1cD9016dunFxzjFqQlW5fVbG04Wh1z7vXnS/1cCW504kfPUdXDIsQ/t8/TF2/MW+UUMGnN7rGhBp+mT9XBDeZcXP9M4jXr5b3qO1ILtDQli1fbX1/cPBDmvP7MJai0GjaaoqtRscDZIqP0gk+uhksiSNiu6sR0STpUHvNOl1jQ06k1lvNBuMeoPRZNDYbRaHw2gwmS1mk97pdJhZZ31T3V+I5WaNrOy/eJrnu8MbBw45pevcOFVa6XaTKFr/HTY4thhWZG7e5f8+2YeIaN7tw1gluWlQ/9pv5ZL40fluX9p+vwd1dTnT4cCLqLvQ8usTmuV4vsG/iUPuzPEFBRWQ3mgxWTEfOWTtpin2co/57nB6fhPVNMtxspwFbkJ5GwpXyByU3AkQChbUo8JKVwFolztwp4M1wgRCoI0woqfM/53xT2sju+zkgNU3WgNHses6XZbHK8cZY7v7u+Lld4NexsADaoUJcaZtF2rSZuUeLFYmTbWeHwhOF/fViDJi3y571G/IuNJz/W6NZO5PKxDEP86gXjyXBv/AGdMfK20zOu2k00y3/qWmSn/CpLs5qMAHrcbs5XKuAUYZjsUsOGOladJOkJgIRhtpkrJ7IHaLQyoFbGawyh2GkLr/7FUu6dvW7n6u9Q+pG/tud521ZGHQB8nOgZ+Hjalee16lHOzWK6gP88oxiHqkeL1gJJsTSURd075vPjJgQMwJIPrB980ldnhP+uBbn8BYa+/SzKzekZXnvHLK3qcU1NR8/+bgbU1+ioYiSyryueKd1gLXF+Z/MGo0cF21Tlur0Wu1ep3earVaLGa9Xm80GE0Wh9NgcJiN2praWh3rMFtNpv8e5+R8dR5oWbc0eO9pxzeJ+6no64oPM5yb8174tywp2bhXtMd261LhoeDrQIFxEtZgg0O/tKz5ee5Hs5+evVOLj9ieL1HvGbjyxd4Xi2YeqH415epl3Vr/j8Z8M1NjYuxWa0ZdwYDPfEQW0ZxrrJKnDX0BwdUWC2mnpE120AnTRpBEaKGAIlwIkkVxp4HjjGKE5zGnzWkDCi0IUM1Bzv/uVeZ0X2JiZWNqq6MPN3Zckxe6J+5Nl90zt3eav9y+uSp2OleiWHzgjaEsI/vh8Xgd8zjd7p+Ne9XvefTt0MdLc/wjY5XXW7tse/ceWp9VSzr1sfFKGwxLG53GBtheobVCyEgH/OZF070fSlvPC13ISLjOTALmWhiEbLyWh200BAKc3UEQlMlsFBBOh0MP6QGObwLFpMVMmSQY85/vHROyhrQYTTad84gdsow7qt6YQi/6dWjkbrBKnd90dsGCHbd/yrPv92x3ssOkN6XjVI2FeeM6rN5WwIxtHbOvvv/tnNjZdyeLIm493jHxSoCfYtCSZ0ObGU//CCHMdSUGY7W6obyqifwkz3e3hREgdT7A0eazv14scDAwCPA8CwGYQEgJcJqgcLEYF9MCSkZLBQKpWC0UqGiZ0l+hVsr+M2PHPZ6qdbNPizd6RRIb1z8PXGuLtYgqc29YTv2B4FEdFnWRL+SU5jV9727qyxGqRZ96+RJmw96sViN+DXpSdNj2pJ+fy77xioIoZEyziIELB3Y9PcMzufRSia6KNzYabbaKoojOZHKllUrOUOiVctXPwF9qxBqDIZUYz9tQFSGkBRhJkiQpFkgEYkpAiMRCodRTKJHIRRI8WOImcf2vXr3f/0mX32NSrzNluoEbnRt6e//Z3RT+PHJFyha3cO+e1MshyPjSvMVt5WVVhjYXPMOe7QW/q4TNZx/9Nqplu41n73etuRT1OoP27+fxJVv8bbt7eaXbJ3feNVx131FeS1Q7wKo152eMcv7sZrPfJAKlMW+UWlRzdUgDYzVhHG+BSgiBSECRBCmgdAKdF+9kIbsds4k1HAyBHAzU0SbM+l8ZsZPJ562OoulDnZ6DvXK0h9icTQnO7JhF3pAELd39OExR1DZIP2tgeNqSuZ4d+BMJeQsqW21eNv2EKaNwdvc+K/b9eH7vz+TZ4sPb7urY49XvXiUWcH5FtZrU4iqbodqsI+0nowZMvxPwFK718fxEfRJHoZr4jrYCyk5SvNVJOAFYK3TYCQfj5BGsWoS5806OY/QSJ0UzJgQ2VYH8f45Xa0LkLgeJ3uTTc8qlYTJMm2O4S57uVbX5GNUi61x1nCSjVV6zjcaXKQ8yiobdW6h8X43luPTTxraSt5jci79wILesU1PXYy9cz2LdfMTOISHXR9woLrklJ0zZBptJa7Z5yGzfrc2flG4YlPi4Y/GbSSUVlKRcSdNOH1gG1cptFMPRZpJlHA4bxwEsR9eZpBaC5TVSQOejdDhsViPp+K/rUfH28LAnpUdjZklOv1yK+3TfAKdcfh+/o0NcTvv5ZWhy8MXzjV8Nx65taHXznm+PqCHuY8X0i5+d1TDXQ8j3/RJwL3nPjOnmDE+mb20sl726x957d+ZgD9RbA8t1jbWA2cI7eRvZ9vyAGLcPz5sHlODxDbCHiwlQkhIRiNtBmsDdnJQLRVGkUCwW4FJKQInFck+BXEJTvnKBWi5Re6v+o44r7WLXZl5/WszYUl9Qcb0kJGPKV+uE8pWez0+Ontfl9vpnH9P5wq7soht4s9FZkxhR+JDwhzlNc6+J0fe5vOl2cfe2B7OLxg0NLfc71fNqpy3Ko/d3pubu8T5uSjM7m2q0JiHA4zR6PrjEpVaQY//goGhPUUEdLnYqbHYAhjEnzdaQHIv8IUiJ2JsRoRAMmwAQ8Ea1csJppx06EJP/13vH29Py0wfDb6w/bXj42u0ye7JVyvuXLQZO8tp55uLTmpPjN9VPS1/b1PmZeezyIP+apMYN82qO1r66+9HgeuvPxPRjkgBy4rqCO3NmFrSdp0/6+PvcbomyrMO2lBuTudq6er3OVKHgefcqQmz4IiwKfmnl86oVoFEu8JZVkkLYQwDhDoZ1OGw4w8JWcanQm2HqRRKxg6jBQZ28CsZpCkZs7H/06irp6SRJqzGlgmnv9h4lt9VGOldcv3FjrN1/8fAHJ7N0C5dLTzSsHfxj2f2GkIxT7urt0x3U6GlB9inHiWkbU19EhnW/tabDdl+dIqyh8POyOlI21TXpuNDFq1z5q7HWTuitToAnWLIj6LwyugThGU+zGkEhJSqUiiiEEohpSoS7IxhJkJRMKKIltAgXSqQ+IqnMS6aQeFNypdg1QPEfdZw3UiK19HsWcvtIu8WpEWeKJ01dtUXgc6ruyK4qJnLTraa7Ax54rY0gWl8qn9v/EfCrs6TrtE39b0oeJYTdOGnWtFi99GHnMafw1UF94z8ufzagcEHc5vYvUlbcq/8FazQao4HiEBYzen/8GtfCbBDUqrWaCrFEVIO7ALwDM1kdVglLOCjMheUB0GkQMCzKoRasBjeiBlgnq/YH7BzThP5HHTsfq/NQHNzci91aFd8mZsH3g8+Xp196MCCOTNtOJ1zet3fzp9rwwM7Vp2K6K10fFb6YUHAzwPv7oWP1iref+/zoLg0Y1a5x+oE1idO2HlZAW/JOdbY9sI8LedRSVd3I12E6o9nKcUFikclCKrp+0Ykkeg6FSVyIRckEmIQSSMViCqJJUklTIlwqkApRqUgkkImCFGKZtxyXKQIkMqXa87/p+HjY7Xc5W6vmRZ8+sFb9Krqub0n7izFHZ4fdnfAjoOr9H/v30eayJ3fKEmI6vtt6m/x8/PfBqIfIN3jYTvP0Z/HHeg+Z2rtk36KCzhOY+sYtwx5dXnn71GE3i0tgLiLW12oahWaL0wbUytQO90akbVKopTioSuQEXRoln9y/CBCJXOVrFVEc67SAmEgq8gEbZEYEgjX8HwREKkCpg7E5OSP1H9/KUyQV0z62UyvW1j9R7HTO7tpfqph/PAfpRqisi2fH9MocRETUvsr1uvdr6wJxr4vDdKFH1zStx9ds7N55+pE904H8PooF2mdT/G9/Gbx9VttxA39UzK3t5LbCqrDZPGqadL/MMptFYARh1EVW17JBH9L0mxe4oCxitdooWomZhHqFn8BCERhGmyDkT0VgIyVzcgDtqCGtgAQ2yg0ICtrt/43xvIapu3A63RKzfsqB+VELd//o+HJ/0YADDfv4MLHiZ0lQe59OTxa8yJz/tNOEJbGeK7JerDl8mnt72qfZm0MBtTv7c6dSR8fumvpk9gvaQ77mWaR/1hX4s7E2oUxQ8bNOpHXWZNjqy6tJcN7M2Oav2uc5WZFdo+EQD0+7SWqC7Xy93FoicbdjiNCs4FhCWm7wsfBOCAIhLQg10nbGAKEQbfxvjEufu+TF+x14QbRNbzrqdvfM1o0fn2cURsQN6dyu0yTq1ce6ueyeoamaQ44f+KPlrNX7TeX8ztDT8akxLs7IjmMqKzzjnm53tYR/fvuk24Utc77+9AxWJ2++tKUR8TFWZVSbLUZbEcIoaofrB+XwT2M1NQ4fswDDjRUUHkOb3S3SOtqtvkaGkgFoEyiV8ZxT0iiRW2m7tQQCIQlh8GZtkLXpPzFyYRcnnffiatxuvq/O7RJPvK9ePHKH7599SSXPVyT7Z4cM+trq6/LcPcNuZW+xBUare4VtvrjzQOGJgyF1rp7jU3Ox+Hc5KRN7WLqv2djcy906cc6SaeXpJ734pmLxH8BiMVllJitrxXM4RBvpP3VH4c3Jn36TDRgtJEn5FyFVlogb4XIXq6EJK3AVhnMOKQDni5RaBciDiN3B6HCs1hPRgv8tBiik1mxKBaecE788nDe8I3cl5cuTw7ve1tySb09a03XI0iUya1/4lLnIbeT7yghDwoloQ/MvpzSv2ucMWDp544GC5A8d2n540NmtpKHtkQal2ypre3gfuQ8Y2uetopyvpHV2Y4FRxpqRP6qmCuZ78ni0Mmnn0nsl3wJRF6HabMagL5EY6UCrZI2uEp0VcbPyMC8h7FVWEESxKjtHEHKnFpIZ/1tv4GlujDnURdfWoDAEWL9tu/ZxTfxFtWBtXXxg3/tvu02PnHli7fqAi+4UM+XEyFHDis52G7vC3efS7vIOvdacymovHTF8OrApeWXyriWPZ7s39nWsG/NoSmRSl2/GIBSxmAmz2WLPd0gh4evoE/oHQtG4X8/1eweYUq/3yAMSRby52bWixitsKVJN1ElYlxoJbuZZFgTA3wG0g2V5s8WqR43yaq//lutIarftTE1coDYv2eNtqczeZlqPkvG9Lo27OCzpjMfgU3MPxyypc050L4Havew4NOJLswRZfYdR5W7bbm7OgBYtjet64dvR4efi4zJ7SqIXsxdu+NzcCsTuDNjZfUB9Vam3xWx2Sh1GKytwr2RdanZE3m/xa8ouuWaHfkGemHohcpft7bTk3bTZonvAeG2BXCD7HMnxJGohm4DfoRhFUrRJZpKaLIaa//amo6kO8/TpfbFV2onGJ93Dc+cteNLNr/eBhO/frz693rHOTLT67SadvnZUT+szNn1HsvTRyJstvE7PWJeecqYsefL9JwuGs73nf/yldDXCawofx+ffyS+au8ht8QKng9b+9LI47A12uVlSKQ381L3185LXAQsdiWUJQJk059tv15BCj6h3SW3ZJX5R7E3PQI6po341uIAMxCsIsiaSM9IUZSZMOMuDzv+Se7ydtze3+q1xRSN0rXny/lN/KsLuNStsujVkbsSjxU7k3Wj4kTgyc2HBda/y3hXyx/Qsgv6U8NX7WcyK09NPsXHVE/a/DeODEiVls8nMffbENaN91N4TfHyOZTZMtFlK7r0u/l1a+6XxV927xrWSh9uV3QPm/qhdeXoY8uD3O2mbQPvrtFLLk8a2zz811JbU6xrr7DabrdzepGts0mj/lPEAD3CmGqPJbPlP++qUoVzrzuWaCpeTkxvkzPEVbnvGRfb9c6jn8pK8AeD4Hit2Wq50fZV5rYrLhBtaludv5yK8Ag8XbH2+ZZNPzaOd7e4LNi4dKi6bKm99zOuVqM8md01R5hmwevzkWwpRl6YK6JspADJ+pZq8joimDZSeyT7p+WaTckUGNdnT2WPtyl5ZPQS93w/YWlPDXKwxUTFKvMEFAYr9zbjVodF4lVIEVecNcCzP/6faldDj3TY2S7ZPfBpZNgVq2657bmTJ/C+iKzvXWdBBOLfy+PpR3+X3FN4/V1a7T2qVNDl/W8/NtbP5+mEtht/s3+bb03MfhkKzfwa5t3k9duLHL0+fam/HLG7ROrZX6s3cMKPZndMZ8oyRuqKjI0et0NTBDbKXyJvon7uEWXKs8qm2b23H0MMTXZdFMzrB6h4qL5vNbjELmiAugOcRknEKGVavMGGNpMX0n86OqmWvbQ/CpA+/fds0tsPl9BG5zVuVrolYGzDg7F5nO4N4TpFlTnxfnzHmlf4p3zvjIX4Z12a26ioa4TUw9lHV6APVzcNnbpyT+zogoVq9bkztNcOHYz3f7VBMbrO0TV0dr8tBjWKrrUGi3tb5QFHT52cDqu9lDND7PtraJZj+7fkYexE2MiHHg3oS91UlCjNostQaqFEpoZAy1ApieL03SRIsy3Nmk4r4L4z9rgUtcIxp2TorABcLk3ZssQfsQ++OKQ4O6JacMvrR72/OAlXi3S9Hvu/3f/f+cS9dilF45PqDSebizZ41LUbzKzugW+3u5zdXeW4+PVjYIEwTbb62oHdepLPvvtUh0UoLZbLwWr0DkkKG9VnLFsrcToxeOE0U8iL49qDQrQ9/LGvG/JLgoYnDs39G1rvZQafN0lQvgRnWEehwMiyjoag6nCRBgP8v9eS6bqJx5wQNjx3+ES/2zz6UUTdDfTRgkv5owlmPk/AMy4ZWQ7KrVZENU7XvwrcduRD7cPUb30fmMQsymvbprlYKbA7DKGaDpH4+0jM68cNG6Y+2MS2TTt+6Fpi496X80tsAmrGYcJuqEbari6/FTQ1edSH8zvQlixdsq6SX61sOGDdxV5Bf0feo4cF25qs6ApTwAO+O2OU4a5NQlEAIcDwAQDabxWJS/wdG9+vzP3X6tXtc28xVRzoNz2oTNHb/r6QpTaVTzw6yj1yl7gQkHOyVs8s3J8QD/pT5s9a75cfaInB06IJxd4E75ybd6D/gwsyZVxqlSyZeP97pZXOrlNoWcVM8bkr5dUplfP/jnk+11W64XVdL5liS7Rlm/7iGrKEe8cjb6k1NdUVjnm+r7JL7sn355C9iVx4HftkCIBSioSapwEypnWyTxlskFJoFEAOA/6Wuo9eaPqEXCrqLBU3rZ4/pd/HO0GAl1+J7ecd5wsQxP9btBeRlXn6jhaF1ophxxmrFGwjpeWlRSDTInXvYcLrsh/rt9LyT83qc/mHinjpKCj5X73z+E0uhTJffd2X7qPSxxmK7D27W5AYRt2KGtH98vWSkEx385vjnZz77Ptx0brENdLY4FVI+U0d/qIv/Wk/qrFYN+VvcWCegqVxSIGgCAN5YR1NmDfhfcshCr6Je6/2l91Y25Zx8dbVwVp9kcb06A1odvGJMzOFe2ztN8Qv/Sd4d5Y1ezf7coSztsnb7sCkrx/kOCNr07MGJmsnQip0Hu51Dex+t2Nu2dy3Vf/nOYAa8GDupqn3jdesPh12ttzrIOkdBE/vrgbH97Etl1uSFK6oyoLNHVt64MrboatSJs+/q87WG4DwXsMTNiddGg/IfCl+zxNmkkUq1JdEiAcCDPGD9D/WrcWNz9t7qNmrGkISWuOXYxnw2qv+I68sd3TwSDo7nL0Se2HrXO/R1u97a29MHqHdkdxrN9kluFn/lwVNA33WDf7+UMulafFSB0vT8wL72XxvXEqt14xJ//Hy7avB6t32bqU6eVWoT1+RQO3U8WAGoatnybjP7v7NkHfqZ1v5bPddh+JTrcodf/+pe0aI2IVRwa9gTb+1GyBQCuZ+3r38r3wA/WaSbm59aofRS/nsd86zAtj7kz8ndsqddff8kzGf1jJvrxmtTFc7pTrUsPmz0QoMt5drNKeIh3x9EX7f9QdJnMatN+U0DI89AY/r0SfVoOZZed2dqY9nMqj70pPYrWjzo1Lvg7pz+FSKhvIIq+y37Y5XZm74BrgbGwmy4ExMwJpUeq27+6Nid7EM5puP2z+JCQH23VXW1zx8ZInkYo3YpglSkBmLIaj9BvlirAwAAsJgoi8n07xnvFrYdKLdqPzXE16iGTpnkXXZrX+HaPxfnPV3wvNXqUUU/HEUtrSOOmx8o/TSBJU+76SQvMnb63nn3qLVygk5yVjd8K3vv9I8077GvhuVcXx2rnzo6pp3kyrva9TMvm1T5ZbjOpje7Guubnjq8P5gGj0/UfBoyp6Vb9EPn1+hFqWOCwmVIJNavueXbH++YFx2rRU4dGG/0iDKzOOrGgkCTVKr1EwiFNhCC/sPZscM3aHXNj9cJ5y+a5lnzB1hXrNh9LHK2s3GJcOvQl/di0jo/ySXxWyNe9tJ0e/f+3bQLRrbl14bcNrsCl/6RTsUb3YY/U2SVLwpPPb9rQrcpI2U+49RDa4N6PS3Q58Xv8ZblGwqchjytj95i+glJatjfL0THCK/eR77kcDWX1syJdQSk6dtcW1gj/6FjknKNEsIrSEdZ8v2tEuxnY5NGq4cgjjeIzFab/j/0sVqbrXne6Xw1vSEHsbmtitAtjpqZFW+aJW7Z7m2Mgdk7+0Pvjgt/PyjaZ2nugc3GUh8p/EalQe2P+hq6XjyUPPH1ftJr+dVNihbddPiTkCvD499oNrWf9nbO13nHQ0Yss9ZozaZGgdGca/S0OhxCKfLcbix5NdedCpv60N6zX2hR3cvkvPweCmO4Ew4vEqPCWqelhmONTeGsgGkMb9JoyxSGyggQtEOw/l8z+see+PRiV53r088ZbYJEg/ZuPTH4AHXx069jCWt0VQNW7k9rTHww5mlKV/98t6D077UZuemASxuX1Acb1B1vr7x79jOxEPgI6SrOivRXXhCfDvwAOvYxd67Le317SeyYe+VKkVNn0OslJkuhGTD7WEOU7zzSs7cFnT56YktUbei08O+hpSoLktMNdi0Q/fGMEyJCrjRca6PyVGKtVg/BMAhovYwmk8ls/vex3PyQFxJuT5hs5DTp6qo1bcSbQ34mdcxWNQ18eO7uJW0L1ycet4aw9+f47q0ZcbCodMrJLPu185fh/Zt3yh9dGhw9MLyHoPbYiU10m0W9z7VekZq5o6yly4uz7VsVHt544aD7wJyPIp3eoHOYZSaLw/FT9s3TIoydYxrivqDsRVFA27SmI2uz41R59j8eGZ0MdibfQmMY/Ukcw2GAVavThap1Wq0AMICgyWIm//XZUfwyaWO629uc+422boGfn6hyLp9p2NZtuMNQpd5v/TSZD/w1ejzqD43gTyBXArCQt7rv50bCrnD9Ue772ymDHj5fcWlu1139GtNefzgJvvJkOh9PnWhOPzt8/Ihz2LVnTrR5o1Yr0ZkMJpXF5oBRygW86rWsYuCdxA8fahf9/NA+b9dZ5xhBJ29319COsa7BKq+IJAHuDnuH+4e1jAoLiQ+W+UX5eEe6uSn9/v17R2Gr1aIi/Nvb7hMfTqh/e4fNPLdWrl+c1g5UhuVeTHB7r5uyK5TK9Mz6/P5Yp2m/R/TXUjsmHps9pdm2a6fihtwMvZB8ouLQfB+79+iL6wrXOprVwOWnGpD2hWdFb7412LJ75+gatValodic8sqNZRX6n2LqTXzpEdqwbL6l04Sxs+bfRBx5wR9prdT0Q8yX+MEw0gqwCzmz0VjlrQurdJEDAAhWCA3Ef/Aq+aTNga7hseZQ5chf79Ybq9YOl1i+7TgU2KK3/6btzxOvel/AkrWTFkLuzTYCnuMW3ht9NMnjVO0Ql70LD+//0vqO5+8ZC2XqZvsveo7Dn028jhHJVG/jwoxyUmZ//nsHdxJxLTJoDCYXk5HNdYL2Fhb84fy57yLGLDqy4P7Nc6v0hg3JXsZS+6fiAIT8olfUBpdoyrz7mkgDRpAOzGisVhmr9N4SvQuvl/1bxoPjTlWOOPdRVn0nHt47xgeYvv3OLP2TJ5EdfV7+yD18IHn8lV0S0ZrcnuugTXuTwkYlxIm+fEz0vVo1vKRf54zaV4PfUtOfug890L04aEDZ3vWjIHJRadvdE7ooj4k2ofXds1Hzd7teqDPnmFysVhiAUjD1m3UFSq/V64zTDhybEKfczqYktGjhKEe/tQd4EAv54w6ZwfctqptaGw2GGhxHTCBYYdDSlRT1r+usPo09ons4hlm3+NRb8KwU+rxho54lmPwnoTmz2g4xne59vDkzONOcIVh0e8bNeQ2tCWhHSQ9R/YPvDbOW1r8blnlmCRky5UKvk7cuhD/97P9WE/bc7/agOT+21c+XNi/8ov/RrdFs1jTJjXZjttmd52hYUkQjE63rjjY87288FJ40WlIpqMhVtWwhEKS3Z3WOGrhKgQuYRlc8HCWNJrPRoNfrJLpwjjdQgn/LKNCtGbwYjY21j09KT+OPilcEjNwyO6G7YsV5cSvhriW0yt2Eec3avb7VrJDKKWh3p2vdMvJWzLTXA/KXAcQ4piCCp2Pnrjs6zep3aecdWQ6Fum4anhYzJ2+tNEKX72h46GkyNjZl6RUWo4XlfECBDTdm6sA5kyefOfs5iMefBDKjqmhrhcgKgq8jOihZIhtka6Bf7tqISj8v0obCYBknEVWJTBbwX8c51VWrgj16O+1XHi5t77alfvqWhREJs5mfC3fHxVlPuxavHroz+F6LMVW7WrQ453Vz42dzWvH1ent4fBpX1jDEG+1wsLjplmwNt6bz1+8Xn8NpW0dR678lDDrxubRWv8LTS1eW8EuRj9Y7dHoX8x+GTaXU/kDw63qk7Wxod0nXkFmpRWsE5wfYa70Rh59OYfBw5Zy29HjUAeoxQm80GnR+EgEt4XjAZDab/21tZ/dT0IWn6E3/MjqubT99/sUxkmv+n+/2bT09RIOQN7ecxsRHrh25e/Tl5VRF+Imj65o+vvwe/OfnxxYzbvzQZh616vczCZJtJfbPAdBY0aR2d2onC17nvZsOje/Z+sOzXbafRRXcL13tO4FB98Xm9HNgsPNzoWdqZBh4KTalH3PQ63cUIxaVxv6plJgdPSjOmtFR4KivDqQbNE1NXjYUArQaiioWS/S+IGD9t33lUYFKx5jDn6K+s8jRnmMnBRjaB19UXdsEzRzdfRquQ/p99rtwvajzrxxqHCxukA0zdK84vogoxtvdHvb60MDIOzvndN+zMQt52ifvTtXabUuv8l5L1a7+unEuZc9fXv5lLHL7oPhTb6jXakVWO1PJB5CAtoWpUHmrUzdtYqmhw4rFZtVrH4uxBQoY4GIHWBSU1lVF19e2I7A/Op2u1FXHNah5EDAYTXViy7/VcWpK6Gv9Hp8Vb95fKt5Uau4zYb72/q8+fZYI51Obe8P1bzvWLF8rC++JNo/I3jBmZUafK7tGJWZ0evPyLvs188OZFb9WVh5fflE35tWPmo69IzfvJqSJj4XX3tjn9bq67eMbqg5Dvhi56kadxux0ZSBIS1BR96ia2saVzQY3YWpn/9/qnT1gnbBAUxT+B6OMTm3z78Y2MpeKQJ0XTkHQb4nGq0xsqFDCMAybG/8l41rV+rfaLWhIlfzVg/fktdNxZ1p061N3bmHFCUXOqFtlLa2xqZhn6GZ094CC/sE5D6hTnyK9ohpccMa2XBRzOLTZ3r2Jq56KOgqfAGW9tlwOH9RzuFJj6TVz7aJZn7W6DL2NNTA1NY2EzuaoVYAECcuLvPNDMV+/pI+BEYc24y1K6lLi4w1ORU1JfKX4q4yulPAajBSW+zeVdNJqpBovCEZNphql0WyW/Nv/PWY1j55In12cJyy6mbOjX/Ycrwtjvl5Ly2wW0c3nFbxxx4kP5R8fYJmygOYXfymWBRxp7ApvZ74dO/K77w5+1Y33Q7xdNZqrd79MvLagFXeMOfWu+Njmq7tzXYdI7/iMbKyECNxGGqob6swSq4OrA2kviRb/HFH6pQ31i3KpaOuRESoRm9919vWrlVONknqXYml1pFxb7qX3o2jSrNPrdTqDoVpJYhhqtlj/ZV/ArwWLhla/DqBTLrejPys9ouIVXj4ofvEuT47pK5o6Vbcn6uSdm63mO+4slzX87MlP/n3ba2NiR8nmOUFCetZDaxwS4Fr8eN9bnxvHv1Q8+9r1hmP5knn0ac/PcVc/W70aLxxiLbn5TbWNdZ+zc2saG2ryvmZeMlysEfMF73Je/iktsXyqTnuNfM4x1mkbcypr6jXlHA/iuiqDrkyv05sgEDJCRrPJbDZbcBZF/h3j6r1eX5a4deV9Q2/4LWrea3RT9v2BT7aUFqzo9mH2xtaPt6xODouuWIqBdzeUZiZ3+xHwLFl2eokp8lnBB3Vn1+93Loxpc2nvXWjY2BOjmZ0LXgjexd463GF0Vv6WOT8sPQ58x68WldQ1VTQJaTHBVP1qaqyt/q16GqcvfmGvN9xFG7IGfL2uAX6whtQXv+sMFca6BkNBsU5LUDiF2HV6XalObzTiOIHjjRar1fYv83Ln1sbk7Uho1z93V587069KA0LnR+xtSXRISor04Wc/7vDpiaXDk+crz15Zsv7GGE23+a6qUM3z3a+WWPBKv2YxFeGnJg8flJ9+89Li9xdbVF90z2jdNL7NnqfPDp+O2bp3f+yYtHZPzHqzVV+usdcaGVYDZQmQYFlrA0g3abtSVSbrJpnvL6oxIMOzV6oYI2pEzUAcL/bRaks7aLQWEORhvbHeZGn0tLihKMba/p2OUu2qEVunX1x8/fm550Wj+BedvOflX6nbn7Gs5dzPl7WD5hxB4pfs9Xtqa/bD50rrW2n7Ji550kf5fLnQvvz+gCqo98OE8nVvzr46AImKxgbvaNZzTJb0YS/JrtslG/MJ2zUSX6b59fVjXVVFQ13+7/r6ulpTk6mUu1u3OFcv1714+M6W44OaCqrSGEO5pFGvrdEWNGkbSRtOoAaNRvNbry9DKFJACq1WS5PVauP/lY4Te7f/FnC4rtvCa2kt1pa0vtL25Odp9YOPeQKz6YUHA/R7Hj22D3gsyPaxr55zYILFw//LXSbTJyiuR9e9YZ/KQzBmcS4mI6eN5E9ZJ0lfft5xeUHXN/MzIkTzR4VPKi4xiKiPrKG68ZuNtJsw1mTmxGY5nKflBs3kv4c1a6myYuz9ThpvUxGbVByv8qLICAzX6XW6kgCdzohAkEFvrPGx6GxWnsQI1mq1/StGe8i+riMffsjvdOZC8Djjqdm1mZXK1zZhW7Y0egi5csPoFszBNdeQ9616ejvIieufSWuPjPdo3D+suXLaPa5F7cvsweah5d3ekzvKfs3uNTnE03tzzPikF4E9YsdFny3SPJ7P634UaLRGayWDQ07IBmuBomb0a3HY3eAQuA60S6q8fqS4fwppbq9rqbQ06utjNCVikiSpP2qNttDFRdsCJ2iK1NisGherlcP+3dkRstnY5Y1xJTyLqnFU1x3JHL47aOPLq/eCu7+1tH15U5Zx3j53I3Hvwte7PUxXShsOPS2Aetacf+x3ruZcddzX/CLvANe2AXcfR6ZvjKYni2IavgvIADn0oKprmxWS++HAIeIoCxmtSjvLajgnTDh5O/xOxrpJvOpfJ9vqZCofcdDv+ub6Ij9HVQIM6q24Tqsr0esNnhiKI5jOUOdrbfC0uBMEwVmsVo30XzFm+mVmvDfFdWxoV2ZPmHzpEuwbufNNRDLRqVul51x9wZ4pC2dOWP2Wzpp2/lKHFQPblPcOaH6j/dHSEPvQV48nDX5tGbb1Vc1V89ZOay3Dmou+2guvDpkxJEB6Wfdjz7ignlNgN9TK2swaB+LkOZCxw1Z3WI4gPP6FjNUWJAGzfjQ0dKWqghuCSE1tpbfBXOVPUhSh0+jKyhONlYG0GJZYdHaHzmK18RiGsf9qz4HeBcVLilx7zBj2s1FfNvRaSVpwt+Pn10vB+9jMvrm7h+1IGYYt2L9vYZw9ofUAR33dDq/AyL5TF70t+bBozLuNuubdx/ozi0RVRccti86NCbu6qx2eWjj4c94xfMaT2FAcqmvQ1tXojVatzmY2aLVNpmxrzrf6+qjS2sKXxGN+zrs89EPl7yKz1kZSpMBg0lcbDcYqJ0lRFG22NJhtdqtQBIsozG636+z2f9XHqpxW3OKJ6rz17bq+kXmd/9je3D8WpjnpHZtFJH33tnQx6jbMc1z0+NxZcvjQrS6Cq/WdXFx0bWOZL0+RcwNTEv0Xn3RsS9vWceSR5AFjrozW76g2xL1eWZTV2afVgzsW9XyN3G60Wqxai9FkqqttsDj1VfpaA1jqcrPQ+LM6UpcuA2DKBaLkkNFoN1lqYVIkAIQmvbHMpDc3iKViUmjV2ex2m80OUyQN2P9Vfc6nNwVnFoSrisbtjZkodmfOb8tqWPRu4vLcWV86q/wPV2qv3s6ZMbdjywi5W0Tf9CE31m8MXWgMey25lTh/Jmyf+b566NGh1wXWJ/GNXslfdc1EtkH7u1XAEUO+f3rzUkAk3mQhh9VicdrtAON0OJwgwIDF1qYwwiNvY3rxqVFnwfeSBiGO5asIKSxyWCxN5lhzfYgwRgaZLdZGq8PukIgJkRh3OBxmh80bwP6NjjeD+otfnbr9QvCl+esZh4uTpn0+HOh2usm9d0a/2W/E+THkibHmU/enpRRcLLA/96pX0bVpvU/9CtuyPDq2b33f0F1LE9dik16us9wqHVi26WPK4HMPGld3TvZNdXHfbonZQSGcw2qx2ax2o9ZmtZj0DXWl+RoZ+PxT220LDpSMQ77Fh72vscqcRqueaaDEYgkhtlrqTCaziRArFCKJzWC32/Q2BykU0qTDZnf8G8bDS19V9ZXck4aSN1e6fzr7PTW8/+KFS0WfX6zxk5KJP/eE1zSkeOvi9he/2lh179IT6POZhNyDo2+uLHidev1gBVzr17ONYdfu3PN7991u+r0uBtyqGts43ll1x/EgWPJBbrdZTQzH2B2Mw9nUoNfrdTY77p8yyxsFGqrsVKFHv481Cu57N2+JWKQ3NWjsNptBKhIhkNlgNzjsNqNcQokkNofDYXY6rQhN/avZT/dLNt0897V5n4xD4+/kp/UkF3mFtajZt7jDsbYke6SLa9j1rv4J2TunfujU65zidfatB3D05YZLUyeLv1R5vNJR92a1PQut5C5crm29d+qm88VCw8/7F4uu7zuOb1fvxk79FvNO0O5gnE6L02o21lttZk0d+9PjQil823NAg3TTj2NiwlqoP2i2WatIsVSGW60WvdPucOAShVwos9sdDqfJLiEFAhpnnIzD8W90HH1ywIvZ29Lu3zmztH/QxOSBGYO6+l/cfcTpJqdCmzbJqqvSMypHzY43S3dfKqzF1ns3zT+kS9w+o1RedXH9Sur0mRru7qWpSGJ48jXDooOqT3tle17IyRkp6cd7h3UNvS1kHSAMgwDAc0aTQW+qqdVrvWorjaNbKOHBKct7PmJt0gqfFqHeXgEak9Zis1lwoVSKu9hY1uxwsjaFUiKVOS1OxsZyHEUTEPFvasni4tsEzncuGPkeON3eNfRwfdOOm9RnTfBlxZyV8+avpJqvuPW2fnijS/H5SykHvd7HTh9aljR8dN+sqsbyIJ9nxwKCtqPbfw4fXPsO+3bvo+jydb9+Mq8OQ5+5lb7yMaYURVagOM+wLMMCHGezOs0MY2r4Wlg+Vsh0vD1Zpmm6EdizMqdRbPqjt3sopVIXp9NuMlsdWloh9pXaGI7lAMYplQqlUhHHcSzAO9F/wZj56Yds24p70/tZwwtHr9jo7/h27OG67KfuT27tPLclzXuZqtK1bccjM4NH7vk6b3TKrdzz/fwm+D4PXUGMF/fy6/Rgz9uWbUeeA/vuu/29QTqgsvjd9TVeZ/zd65lbtYHwOYMAhCEQBCHIabeYdWansbEUzB89rXPH4IRNRwfHuE0BTfHEE1/PYPSPgbGzOqlUqhK58BDMOm12T5W70kXJcgDPMywnEomFQpb9NzFAyvB7mD7Bq6j+Tc8XG0901ni+PXOr/uPm2slEiKxNXbMROTfDFrS+rfbvEF11LGa6X+OY2KEeBR3GDiv/LciYE7ijoPeYLiWDzwy4Sz4fnf6rpJo9PoSquKTbb7z7lcpMqMrnnCzL8RAIASzHWWq0jZLgHt/ebeu5NOL3tNpxCf5cZZOg18PmQgdPK1V23sjwdtYgVClkHjjBOjiWtbnK5QoVw7EAz3E8TdP//Hw8ltf3+7iXw5tnWXzmuJS9bnbAOvCx2u/jgupuPTcOr42/fZBs4fZ2eo/u+vqHVvBD5Dj1C8275JmNx/enXWsdLLwzSn2iWZ2I0jzTFmBT91xddHRf953aB71u3dXeWDWdmeSnhhAIRlCGtVttNpBjrJDplyvcXv/TNcSF+zM1zlUe55fU+GtShUaE8byH2kfuIneYrQ4eABQqV1eVOw+yPM8ycqlMIpUiBC36FzFAw4Sagf0Kb/8S9H12qGs51/bLGdlqayd63I+i4XMGtSvvWj226v73oc4TS9ac3Xf8+uivOSM0k7M3+Bbt/La928yNNdId/fU29Gff3t57+oxsth8uPUqHF19JGvLqrgmxyBO1cRzAIzCC4AiCNtmNJrM+nOhaAZk6RN0aofCnrLWl8ZxfJXI0oTaAYhE74+QgINg/xsMNtHIOAOKcKrlcoeR4juN4nld4SsU+/9yr3ZZozpgzc7HYLx9eprW94t+3Z6BLqwmi3YNPtykaf0Ejk9x1i+x0e9PdtmmNd35Erv40Xpdmz9vbv8N6Unfzd3TxKJJ5MufV9tG9VkXveIZvvtC3MkofMvvi4medM0b6qSxVP2CIhGCeYzgUgHmUsbZORJs6ibXTohbqSF4o8Qj09mno67vz9INIpbfaVSXmLBaWtXICr1BXb1ce5FiA43mVUi6TSQCAQxD4X8ybe3DtYVoUPunji6G1kflnAy8r955334u6943C52wpU17a0WFov7770y5e48MNq9PoUVeEl0ZUuGz9kLW4+7hJ2sWmhBTZkeUdBl2oPIePGesztd7oGFpx+8/lbYMuXDyEd2+GBpEYghICBIFtvMFYrkc8fguZxJCpTy8WynRiUCT29Q0IUs+cKz9Ti0Cs01KpcFUI1GrUbLMzDOup9nRTqjiO53mOByRiCUVD/+L98YUprqN3z7z6eTPBESxb9SY3u9P5L5kpt1+pv7kM3KFupUAPDfdkxhxZ+blbR1VO9cXt03rECAMuJVrP9CWKp7S+fugY1jCtnzmqrSh60qDFPK/t3XJe3+FjD0bw99qm+xIiBnGCAEiBDoaTmUNMbNHiCzNSVd7tzxBxTJBHRHCQFFHUh0bEif1Uao9omijoTgkRRB3t5+UNt4CdAAuCrkoXuYLneB4AARD/52fHnEHtvS9V7qMv/VoW96r3BGFqpOX2+GEZB+Gidfi01CJZ4IP4c/3e+8+/oe+1Q1T87mXbQVhXOCVrTXtl5OcXWyWTFr/rbYoZ/G3sCtmeKaeM/mvHvF83W3Lw8po0SelWgbwkyUYjKI2iKEww5gq6d7Pm3gXNY7x5Ju1ygiGExygPWubqFQBUVqAUy8JAgFqRurUZD1hYgxV291R6uCg5hgd5gJdLZWKJEOL4f96LdMXZZsfCA3HozuLZsRtenmq2pYc7f2Ybl5mMy46P6T5wv1tZlz3rB/zM7FG2Yc/znl3HXJu9I8Tgr5qn6H7kjFFBe9CQa8nTsK/w8rrJb88XN2tXd1k9nNS/NIEjO35pdz9EH2dnMEDs5OyIzKzxvaFmHYsqL4V6kPqNvjQQmaCWe0K0QO4X5O3rpvJyQbU8XeNoGiR2l3kjPMjxHAeoXZUqlRLkAYZxWFW08J/PKHF/vsy+c16rvC5ntYv7ftlQNWl3t0uu2f1jk8qU0SOW8r3uvx2ed7ud+u60nb97xX8f36iUCQKztrZckOeybu7N2wNsg6tGttuttHZ/F/14aJup8gJNIRZy/1dAz6wB76Lqe4vdhEIpQWCIECMAbHTMRvW9ZS4bJtlqGUBukgOWRgfj9PRTEKQLxUISwgMU4ooVb8Um0oGAXr4qTw8vCIUhEASUcrlMKuJ5zmiq/cc6NkiaVdmNdD+fa7XVLDqg8XmKan5kz9pH0qjksJnPTr/PHus/LD5iUq/W1f20zwo2fShatvjl5tvLxFPLOjxKuJm64HHf/ouPFUXcWXd453m3um53Ot0rHsQ13jvYn5KadgdoB0A8iNqMACt1Cuxoj+iEm/b3+XBkuchP7s2ECwQgyAsIpVu0d5jaHRaUunZhr6hlKjHnyrG8EwIhTB2ndnUFYAgCeR6UeogJ/J/3lMGdpLZdlamqUb1w8H1Y4QFp7KWXnffVvK/d/2VB1pArnUJGn137PEAUmYn9WnTWOsd7eWxE2UZMda+NYOSYOEFt9d7IN9XVt08qb0a4RQecV+PC6T9cx79vj0v8pg9P07qEa4x6BoCbQIeAg6vj8m8VtWv+OVGrlqNS2kPh6+3loXKjhSEigcDbyydWnhOs7OfeHvByjQ8PdPH2QBEQASEEcFO6yOUygGd4jrQF/VPGybLb0+KbaWJiyz6n30LnzDj8qRn5bYxHZfziBc84yUev5lWti/bNBV+SGmDF7tQOU4L+6G/yt9I8uqSlD3j18dU2dLqwh3zBbYs4/sVNz+Bnglnn6tY9XsG2yBnkboEC1DVmJy43QpCSg6zgkrirsbvGc6i/TSmWS6TuYm8PghKoXV09vcNCcZky1rX3akE4Y5d4oxxM8AIGc3fz8PLzQnEQgiAQkIhFEtpGaP/petScH1vXOnKWQq2LWThlHN4sih/Xb9HDxfvuq6ZNTelR+DPqdUl07HOZreF+xHVBSG/yzNATyS3VXZJ7cV+eyc+/BV+d2z0HHPLocdKqyhUXbA8+SXsZBM2PT+vk1WOh7xuMs7rTECRESUzmyUfc7jj259Sw9j4tQ+QKN/8QlZ+Pd7Snt0iopiiFMjg0OC5JRMkBcxAuFPp7uIZ7kDYM4CkYVind1SoEAkAIAh00/49rkCbn28lHihG9d+y1BbmnnvT4s3fe9J5n/fGMUYvA8NISJPDisIBTNzU9M6patfxdlDY97eqlrp60ueX7KHLieY/1hnbHlrPNsmzmwJYC8/O2g6J/+53Ns3ct/uh3ED4VQ7LptLvRaSPs9TZQYVq9ac6B4e3LVQJCIRJ7y93dKYnU20vuExISLJGEioVmdxcJ7h4RAxEqwqGWUYFRrkEBMQgMARAISqVSodCdwVD4H/d3vDcLo8YPWpLRvvY9/3p1i0ho6reeyouSd1ntVEu6DwDGfZt0zNnu2ST+yplFKeDWyYs7pQlc/3imup0fdVN6q/h1liN8akwo/wZ9fsmdexbpN8/uNtHUclert29CFx0QMapci5iwazEGwkweMeXdqw93UwCEDFKrZSI3dw93D5FE4q0UiuU+/n5tAtzNoMCFRoS+LkExYQqcpkkCBnFvD7cAPwAEeAAEeQGCiv9pnDP3imTo7WnOi/2P1c7fJ44Im338LXKjzX33kNuH7iwPuPTNbcDgXuasxjHUrs0L1o+8Vru3z+OHpctHtBh4M7p6eKzzFx40J+wg/iSr9mGXb0Nu1I91OfJmdvQ369TCg+9COvmV1k9JqcZhvMmKKNVMPenpzRlIX7GLxdOddkPlMpnMR+AT6K2WikUSxMG7err6JUlRT4aTyDyCWopkPr4QAoIgBBIeSoVEJOZBHrCS/7QOucPGDj3rZ/He1hnE+gnUqYyHjtQwYavskNF3P8sD46SfP0lvzroZdnzFjeIYkZJDIvaVZzDbkJF0Ug5/6fW1pHHDLtQospqxTuzLqafSLA9vP/hLs37mJTHxqeNvLDkWEvTLN1SPOI1kJgtg8jJvxuMHbRaQLmKJWu3uSolkEh+5VBYY6hYcluhQ2WkBjEW1UIWNR2NlFqSVu1TmH+XuqlIBAAiCEMDRIpzSef3T946qQTkBloxXJ7nnpa0XPmw2xz0s8ePisnsnmz+9y8QEzO/3elNtwfkFAaN3AR66mvodUenbz62Qr6uXyn3eny9lwkauajEpfNoWpWuXqYGX8Te9jfzKzIz1qxbER9/1CvhGN8VjcvkflpGAsVoHpOsAfikkoFDUw1Mi5SQSqbcn7enjJxOLha7yWDIqyEPQybfRrg9HdTF0eGRzN2+WIwUUgUMqhUwiBgEAYFiGsrj/Q8ahWKKvbM3DJ5VdDo6yXw6OOLAehUYOri6pnnEtoujLsVGZ0c0uefqJE+h1Vy8secyOKx5VPM3z+Jo3lk0vxz5LYKu+Xhq/ccmIVq+PlBryFhO1lrdJDmlmkuHryKQevpUC14JaV6MnrUFr1FZ5UaintG+ixxqRDJGqle5KhJbLveQKuX+QOCQ6JkDggGJUHUu3ODs26+FFBfu6y23CCN9oXw81jGM4joJisdhVSAMAU/gPGdlD63o/o1t1jxmtdV7+8ooLupDVRP4Soj9K/QK9f1hs4/ecavOy0DbnQPLcceNHft/mHOGklw9zfTZj8ZSNZe93lk4zT3q1ZWI/x2qT99Cg2vRDR+DnV+nrj3fGhZCDmrernTIUhoQsxPiWw0Azb0ARW/+ma4A8QIxKJAoP3tfd20sgFoqCcZncG+vYNrRtyVCrN0WKPTClsaMEwRERgaCY3D1EJQERCOR5ABIK5P/w3uGRpZq/IujUno+T597ZzQucpfsSJpdRK+x39KlT3PsE3Tg1ZK7nwrKPwQNOZEPP97w72tAmsvXrDZtjXFe9dFmbIpl+DCcHNL9w4KVv+7jC16I7/FQWVfpdrmh+unpRs0vGp+3nQJiHOy0RSKN8/V1kQbaKAMf4ajmMBLf1U0oj/KUScZhvkG9YMgdIh4fW0PUdPCx0iBNhpJhIBsUFhfj5kyRBQRAESkUikVDEc4wd+Ic55Gt/Wt2oHJaPTSgctJpznYccmN9ld8Oq9lfedv/ZSTq7tde0+oY9Y94hyrBezhFfS+JfsLbmx0/uW/AjMW01M/HmlaI2de3Z1b9+bz13XjNCaSqsuTx1a9WflMrJfmv3vwpacIJaovKgxJ7+riJBiABWKtr6IpsaO+S6+bkqwoICvP0jFSo6IlhqRRJ7eYt9vfTSEN8Z7fx94+fIoqQWmQBFacTDVaZwc8UQAAQBEKAECMka/plXHd1V+xLe1o0ZMKzq08jwJYfnNjXFVVd5TDSfbrWnjf/9DYVHjqX7iq+9W95qbQ/0zY09MRvTj6Ulagfuu3W6zc81u2pcQqLvYVMr74/TNgzt0CNvWdaf++SJYR2X7zkTKfXRCDTjulAORgRJWDMrIqpUjHj0gWE7vcfElcoJkUwh9vbyIn2ihCqBBxoLR3eqbx9q9Tc19NR8C3FCHgrXoHCQhBAQhSGJWCoW/d9sVh79h306Y/JDte/cU0fNjBS2runxli3rM+yUUDTmy/BZ7KrifsznPo4K2fsRU7nFsLLnYS+PaafmTRnVclFP67lU9wOJAgQalb16q0fKpVEDdiWi2ml70qmRuZlTQ/6krlL/CXL0lW7aWchTWCPuBFV6inN1sP7I4c6kd6KVdUOaWQNrRAqRp5QO87V7jidnpkhmASmTE5dvCkv1iuBP5MklpNjNzV3t5gJBEAiCgNCVwhGEg//ZTITTquMRNzol3ctJLbhfaK0/IT2W6wChuohnlrzOP65tTWn58suU8m9/FLkdNmgPHlqZK/Q7OHlhlsvGUT4X06YmDlDfgDXHT1ceGNZq8KUza+W39J7o7KFis2e7+2eNW3L9fqryrWoTBcuwRg51dzIwLraaxK7uWQLX2Dgop7mhN+/hFeOJaroFh+BBjxSrn+w45ntxpUfqpPxWmMER7kKSchJDUT+1h1wiAkGA50EQQP7h3PluA7/aDvUIbv+8/GGLtv1Cmn09dXCj/PK5+6JzdK5X08yfS6ceGWFea6nvVZdkjvh17mL6qtiZ6Qv39JwN+pys9x/4oCj19N7E1ItDDnzx6k3vu9W83Zb4yOjpPmcOh/3RXqZzn6f42OE62IGq6lAcJU1eoENeF2oLoh8vkmf25Sk+0oskFQkqtay1yje+csat5o99e9/MO3VFkGMRPadcot08vD0hGAMBEIFFApomCYDn/uF/rdcNDz+y9D7YtKth//DCZfcCwwMdk84fGt13ease3P7jzi9hH71H1H8J0f+pKJIPCs8qn6KZ0nD9ro/27BqT4/mYNQ8ER/YMRFoe/RrFDvcFhAskhse3Rh1Bx15YWrPgimXIWELqamOlXAPCSywARnqbeZkXGJfSfObgL2Y3yNffWx4u8HSPlsjcSitlad01D8rv9E0XHDm65/eqbxFTIRFKIAThppQrxSIAAAGeYxwEDP/DWd4vW7at3dPm8EMgMmFPi/7Wrz09G16ufvBlddfPTzUpvwya9Kp1fkP9l2ebi/WXA8QrsuPetxw3oommFwFu7d1fFCkW6l8WSjuNqDvPXbpCT2FSbnWa/OCnM/JF/vzn5Org+zlzlEA1AAJwPcrTOglOYTwtKBkpKiI8xEMbrQGEqHlAEB/ZIryoPTptRhdD7Mxpi5lc7/LSky2m2yrGBHh4uWMoBOEECQoFlFDJ8zzvQMB/No91GrLgjsfmF6rkVNk4cXvvP510cGHvUOJx9JW9xpKxI9aN1/kMn02+TYr1/75SMKm4+7ZuLzKbj/OoOvIgog24wnl57639p28VAdCxkRfHXpy3yqDnRgrvjZ+wRWd5NG6Zz8Cqy7ANrOMhmNXBCMnSuBRRa0I5sTGu0cu70+s+CjDANVBBJJs2Fs5CjL4fr5LW823GZd0TcPpFtpUjXUKCQj3dvAP9A4RqtUqlclXIpEJCRMn/iY593xFHOs+ZSPnftj73Og9fuhe1i0/s1tB+4uj2YNp2yYYw1fqtodyG7SrF25dPLmdF3zSdG+3mp8m8MNfTUPrr0peG2Q3nOj87PjK2b3W4s9dW2HZl6+DJ7w5Fy+UPdsx4Bb01xpSKARnQADNKE4qLRHoQ4aM4d6G3shAM+h4fZYgZ+8obCc5/2OfrlYXS+ku9glJn7n13Cnny4F27oe0iPF0RlEJQBAZFIpqmeKfTYYNxlvhHbzqJHbx3zH9RXNLddZYofDb/yHvqnUaHu2/AQZFhxYHwUV7w3m69Dr/9A+5KvZEOL167mo+a4dFzBpr0NX1mqDtNPqvaN+xtWdkE85ZdT5YPDz/wXTknYuuDMzH9Q87sOXJBSkxVKp1OlgPlWgBQYxYp5iY2Cb1YN04a3hYiZX5oUHB6K83gRqj8vcu4EHfTwg/bH/SYcvJ34qQ7UxKfATQuULuo3L1EQgoAAB6g5BgGo3bqn81F6vqM6Wh5A98OgUc+97YIE3d4eH1b/Qu+O/n35hdFq040T080rct8E7yyMd1DtLd06cER8vDf7s5DzYbQLdt//BzSfY508MCLKb3DR7dL9r/Rp2Jb9nohua1PqH3tOqRnaRRwdbGa1nI2DpRrQIjwYWixWGxzNDcENdoRCcZGCv35bsY4NTjHQNb2O7QqXxl5MPJe/oxZx9aRo7MvxHkRqBBGMUwmFsjFQpDneY7hWAIl/9FM9pxT0bdDauyKZxWhwEnevUdt1Sf3F2+GHsy+90jff+fyYabmRMUDueDmklbNz2JNjd2Dco9nJE3uEX901qhveRbwwNftSftLXr/LZMO+RmUtuxz5NtajwtN3u07d5UlIkzp+qfE153RW8wyA1opRWmiD3AU2LshdRBFBzbKwUYYQ1aMuMLLe68csXNZ3+xfZwumbT8vSC+dFrb3gbBflF+gV5KeQePu7u6uUCrlcKhXhAhmllP2j9egZ6eXfO1M+4aQ7Im85jC+s/Zg0u2TcRWXB8Q5P94CnUjp/Gr1u4trFYSMDGgacCDcW/Fg3+0K3Sc2KpnoMzbNe6NXUcVbbg6valPTgU4c4ojYvD7g12T65ZT9blcB338PTHb0ibgfpDYSaaXTAHpBGKKZwGgpiGX8kRuFaNQkD/N1aNTVIOobqu9ddT3dun66JuDglnAllig76vb3iR6K4FEWl7r4qF5oGAIDnWQ5DQRJD/8lbQBtEN65uGjO79SCjeX44UvdzwADC323mzqb5ZwZ0mbjx+6NdigGCY6RBdis9OvjX7arm+IbbFwVv10eP9+05YMZGzZvxLwzqPtgezaiJmXx2t2Ox8QpSihUYO11K23ikzYoLx2S0nKjkHAqyHBUGl8sFap73cYoDo60BqMDqK3ALglo7CVWYt0zUPMy1y+qTHxqG1CfurzuZxUGfWik93JVq1BMEIBBFBBRBiHmW43ing6H+wb2j57Ixn0dS264fon7FrqhgOWb87GQXVf70hF4v5KOz3V5aBpqXnScTzkd/h4YqPiRiv9teWNUyc9110ay+ZSdGXQobnjrnT9piN68dW76PDFrQLbMD8eLS8pQ3ZeG+nVN3CgJeTA3z8FVGeri7u7b2ksm6eLi7RsWZlCol5uLq0q2LQt5SEl7jE9QjDPlO9vX3oubu+PjHaL1/+NSZ1i4SV0QmoXDCRSwR0xQIQSDIMhDEYTDG/aM9J/gbc+5Sva5ziyN2omnflLEVnR7vTPx0e1r51g/Z9Td8jgcJoxsXpkA3Od/6kSNaRWatPhQUOaigdE6tX6kLfVQ2onVS4EJB7Z7TU6ZUolf7ZjWZP+aPfPn5dB9/aLEgedrUyh5PIZ0bi8BlAhCqomUSu9nbQwoojZic8BC4+3m4+VrC2z7JZUfM6G763G9Q/u6s9Uu1AQuDprRNnuXr7eKiRmAPisRxTEhTJM7zPM85OBbl/8lbef2aD3udxJKh1wen3up9cqP7zIgM28hy79Tnv4vn9Rgp/rkt3Zrze9S0AlKkG33xudxzEdypdhE6LTsuS7mCLQJqOkz++mv8waNb9lcOiM96a5OmPEEueQaFnaBlp4cP907P6eTO+VkbYDRChbLJigBeQTQJxbivH1LvwbZxx6TSaso9uz8c4LTeaofRv3VXjPeidj+80oszvg3ywDEEUUkVchUIgQgEAgAhRDCcZ50M8w8Yj876UnoEsJ57ZO/ln125/WOHqFUTtvZ2ybSdStu5XXtdgSw7fHdN7term9hRXlUzjvTdkj1o+qCLq1X6pBdj5w84aJtuVZ5dAZ+/3MrUPBP3MQAnImNPLZxjrs/v229A5iqr36yJ6RhhcWtkEcaCg1I5ACkDSIXGh3ALLwwPpto0BZco8KgYGA2oTmgeBZl2XZ5//v2+r20/eQvULgpvDxIjPREYkUhomiJAnuN4jsNRlAX+QT35594r/V6UDFEE+Q95r7tmsZ5rY93Z6vEz9bMjd1czLcqDziR/9sVmXWuzfdzdaUeGj4+6sF0VsnCaZs2KjTPvLHBZ7Huzz/agAUxcTuvU3IBPM06OMp1ZGlE8Z8ZVdidwmXfIKpTlzViTEPKsd1IMZhPRgJfVqhAGmNzC/rQNYuoNI6DmkiyvVn88zetGG3F3x1tP8y23MnkmLYRwCEEQF5lUrEIRGAYBAMQxAscBgGOdrOUf9JTtWZjCp77rs/7uvHbpoT6m2T69r4afRVftGzx/7NRvN7J2dF1qUoxIDO5G5/zZ2qos+cLAnklY4ZrTr0L+3Jn08Yk48FrnPK/WltqwhCd4zqeRPzs17wdt3jDJq3JqJ9GciY2zKmQHMbQaQlBZEwJiuJNW4BDlFAjixY2iFosOZC/HhlRBHSr22xURdKNnYr04sCpi9GPSiQV4eHi4wzACYRAMSQQ0JaAAAAABAMQwEIQA5h+cHTVrOgyLKZzdakOr5HvJRQM7jn46Iva8ZufpV6GHWs7i9wl8+lt3g2N/HHqRkzzpgjajxxuu1UXLj65Pgdxhk/KUtmftu0hX+DpfXb7QY8q65NZPgrqKf/fMvbbcf2ir2zU9xheUd1tlw0VEPQy5NQkogEaMahlEE3ygXdkS+DJ0SpzTLkts8YqKac2QlJdMGkuGj7/ThhQLYRxDYE8XF4VSDYH/lwIgCZwQggAPcJwdQ/6BV+dmpVwe/6RKI67/+KQgoO3n+ePSPY62rf3V++An6bd2n5UbJnWe+HtH8O3+kypvZwd/GxoweG7jx+9fH0YvSt51MO2bmH7sDhDtP/qen9O894yDov7pv6FNW4Rp/Yf5MRG8xCbc6MlbCKd3rYPy4K00IbPKCZrGeCjUVdcGViw2untai6SR/harH+Lma8O0QW7F3ja5wkfm6wJBEARDECQXi2kBCAAAAIIghiIgDHJ25H+f5d13sO0HAK89V7+o+6vdJ32Pfd3z2LVqy71pr1dIfj6G23bnVuT3Wp1cyOuubli/vDRL4mI1T0Rjlu50LpE3/4qiF2dEf93b48aJDgMsj25XrQSWXv6x8Wtact/TVy/q00x97LhhzhvWo0HEy/FaVGgVOYAguxcGBFAmX86/SRR08+BDR/vD/cMpJerPyKW6JCpZ6EKTBE4oacLVXS6TKQQQCIEgSJEETmAAwAM8x3AQyP7vuY7AWbEx1+a/P7fJa+SRxAlr3QY+fBNUO0hhXlTrkL73GXS11aPQDgfMP17ujp3NDt+VEAU6/7zZ1WzWxEqdbGrEhRZCe/vEVfky65bzk45ntDjStS6ZmICNPbbjxpsRhjpv3anyoFItjakgLQKqQJ2HQNCoZNzkBRKXgEan6wCbZEeq3zjZ7Es/Ooq8pEIXm8gslwWSaldXldyDQFGExlBYJhIK/r+KACREEAjheZbl2P89Z9Wy9Eg/kykjaZqb77Gk57lT50vzFVVrrp4/dfTRws/7Kou7T1+RHfPd++eHVz+E2YW+JeryVQ2zvIoyL1PrX3lntRP+OTPnzTxGubUd9Wh5UxfPvhsUwOmN896nlAnEe9q33Xx23R4DYQNgTo8RIGrz4lqH2gVEP14KJRpqqTfLv6MrLVgLpxh5ut3NyiZC0Z4SswolcbEUVXgrXaQ4AoEQCAI0RRIEDoIAAPAsBoM8D/zvcU7/k0tmtxp8YpYv02NXtydNGzOejHlTOeyOn/uM1sGdhHlYZEDreU+QDqsyrl+N3Tzgc13f95enfg8kz4hHTF+AFewPvf8772WzxY4Z3zqUDTePS/7Vau+ynqahLT8t3a2eWBFu6dhklGIueoR0RY2QWAkLa0W0VcQGOCFEGmgLiysTwpinIkBatcdLDUgYV7GIdlG6ypVeKE2TIgSRSlwFIiEIAAAAgCCOogjKAzzPc074f+8rP91g7Peh22e37ejVXudv6YCO4ieX2j5yqVNbp/ec+f3x6DvOZi6fjg5U3pg2egNQWDIsZq9n2q1Z/jfHAz2vvZv5Z/zJ5iNevOu5aAg2uMVQNVBWvPPA/p/D94c/orwMXTJTvrbci8pp1t3qQBCPBpkQEQFCuZgmHdJsuUjlbuvl0dqmCMaEZGtXWCHyCwtSSz0JASnBCXdPhUpBYQgEAQAgoCmSAHieBwCew2EIBADW8T/vOTmvHzAzYC0Uq3jbI/qVGeoItTqUMrbv96jgrLEu4gpjsXNCs/KxmvNPtr2K6nj9s+HsjfnRwzrOyvr9ffhn4YCXrieD8nbd3GFvMbG3oyWust99MvDAvQNHSodsvmksL2ts+mWN0+JmRqQk9QDgR5sEYJgOpBQo6ttchfiw8+55e0Eyp0oGeVtDJRQud6e8XNQCNe1KkYQIRaRykVAgAv7PojiGwTDIcQzLMhACAcz/eu+YBE7Y8XEqZvyz9YtOJTiSrBgUenJ4alrXbi0dfWjvW73Xu5Hdp9a9ZCti+RPDRQdM3ZdFPL2rPxi3O2rV2O+tBwVhs4Ifhaw/mT/V4vG6bPiq+cwzh0w4bsO9Pyd//l59Q9K5nwvtG+mtEki9JAqFKizYUxIZH+GuxP/Adr8O6a9UoeUCowo1eXMt3CNaeHuI3WiKUtIqtY+7ykWAIjACQaBQQFEkD/Acz3NOiAc4kGOdPPc/n49bTOvix7coepMd5491LrVgAxwTJ8/K/0g8jtPHnpPQM4X9nuYXnjHtvPz8YUBtL2GPZW/K4zR+d11TpOYJ5cUfh56pUvJNdnJD5YdJmauO3iXSPrTiH5Z3H9t8/ozeC8XkMxc5gkm8TGYJBJvlJCgzA3KBUSigvXMHyguaubSUAiQKcTECNw+BUBKAeypdaRXtQpKkGMfkKolQRIH/JxeB4SgMMbANYVkHBYLc//4/qx9te1f6ejtWB+guPtoRXfppTdrLLmfcF4wGXz9rt7QsMPZiD+/X4vm/VmrmzxzyNPLtxL1EmjorZd/0bu/hwWTl9eRXkZrlF72SjlZ3c1abe9D7To1nv6QW7L4TllNY0iH/WxDNKmAUCNLW0GIKcZJidxNDIL6WFsKk6ACyuvLjKoGAi5c1ega6e+G+pIokSSWtVCqlSrkARxEMgoQiWkBRwP9tNAwMgg7MAaIQizDw/+jVGpfUT1crqurcio2Jq36H7V04im8K+nDA7+OuO77twvrr1dZ3DXOSpwR0p1p3mzBuicXQxqVdUKr2WeP0sJWap1lzbzwzfP5lGp7WusT8jYaFxO/M6oGL9PAGvW2aIBrs4fQQCl2ktMTq0sJbLnCLxTFVS4/YQKStXQqRhtL2G0coxbGxXtLkWLVKLqZVLh4qBSklCVJGEDKVXChAIQgEAZ4ncBSHOZ5zsiznBEGe5QEQ+B8ZxYfpvEVf60aefBkSGr0dH7haeCyiA+51Pmd93ghdWvd5xy/u2zS8o7dmQcPxeLu28EhfnzHlplvG9JHHF7c9tM3cNXjVpJKN4tp7ia2svdZuABN86qK+t2l7RxMefA5voPJiKJmcdo+KUSEidaC/k24WJJR6wzGerr3sHjapyt+b4MXCKC9XF1c/V0qtpGlaTirlnu4quQhDEQSBhSKBQEADPMfzPAfzIMM5Oc7JMiz3v3t1YnkzPuA4s79sUhniNiPRcnDe5it5K8JfBR2sKS8jel6hth5bRA/btLZHqUj7dcODwBzw2eC+lMcz/HNDyJIWibPjtyziuq+zfNwvSauQfMvALTkJJXs34K3DP1k2Hk20hDUAXKMb4ozUEBCmrlFi8gbci+B9oNrO3JBqOe8VIxV6ioRihKJltEKuEMkJuQtJkmJMplCIhBgEwQgEgiSJ4wjHcwwDcRzPMjbciTNW9H9klNh/p170GLHVc1r0xoARY5xdDjQ1PDTXenmdC2u8arXeqr5amrPt9Nc5/V6rRYp2pXfy+nSPk8yXjGm8EnJgYOEBV0fA5t5ngj+ezW7pOHmrb0xj6/fSrFlmur9IzT2NYHxwZz2g4EGJnIO1UlGUyiqVVHsAEr45blBCLu4OVEB7qoRiqZgiBSqCoqSEUilXiAhciCIwAgskUoEQgUCA41gWBBEIBjmWwVhYp3DIHf/r/3OerrD6F9yPzXgck1u2G/0V8Utyxi93M7eho6Dhvs/XosTdjmW3/I7l2iwL95ZHVJ88uIB4cWrF0O6nW+psykMCj4ZUP8v8rY8PrUdf3N9put/SXxL2Ngu6UM9wUoipEQEkHlpHcqhcgoh8jRiG4CRF0LBQK1Pkmc0eTm9ayahoJUbRckqpUJJKUu5CEoREKJUKxSIchmEMgkmaIBGUZ1nGATEAS0FOHnLgpIH6H/eclGrPOf2DEtTA8od5bfbVfWmd8GT5x9OZR5vfPOz15sqxuJnuz3aZz4w+0mxhVenJoy0utAAOeCUVFRQ1DNySe0lUejdM9c7v+Jc9ywvXNQKD5DErT/z8cG9MLFLt74tZAkl3PxSH1A6YcCFwIerqQwe7Qf6h/kFIqL+caMPiUkIg9FerFN5yipRTJCUnFUq5h4zARRhGoYhQIhOJcBRBYBDgeRiGWQbkeLOjpizPofntKvwf6wFs+IfjKw57bJuspn01LVf38T+pXJwfsPe6T3KX7x9kp0/hl/Z2sOd/v14zKWYB66jwmBXyxC/K3V0+a9fJlH2u19rwD0yduxeY30y2b83yvxVTcuOLIend2T9uQZrGMVy8hVVBJtzLapfQNMvLgHBP0R+ZLvryuAanfFNqHKEkaZnQBaMFQkgmU9IEKSEJgpAIpRKhUETgCIIgiFAgoiga4p0gy7GwSWSjBRhOeYafGULX/m9vAbEKQ5chWtHX3MdHbk59/SJcc/yifcydgE+fAs43bYpxvQduWf1SgjzxmbiE3t/8s/s+5rtwc0t+/Aqr/LB7j5oukxdXp9/D315Gtj58Mv3VxtMTIg85asmoyHPl9sR8nMAaKQw1wTghsqEQCzso1OFmEUVo1V3yOB9xI6yQywgXmZwghAJcrlTQMqVCpKTlCoFMRkslUqlYQcokMolMKBKICKUIJkiOhu0q3NDrUynS3PY/rsc9p9PwP0svk/n2ytyF3xfdtLy8f/H1pofjKzYmrj0lsfW1bu23aEPc3la22B8JUzqdsY9b1ffT592/vM7dEvZ9PX5oHjcXsYuFYYkdLL35Pz3okne8aOAC07Uvs+x+pJ7FXcwI6mYAOF4kB7FC35qgBkW9SnE78RBQElQrchfSYhFNUXJEKJRRJCEmFYREIpBJZRRG0giKELBIJBFKKRLEICfCSXBMrwkwQ4EeTikXUEeV/k+MT/bjvsY7o5BVGzYk+l+dkBKeG7j9xLHBLWKLd84OWt4gOYZ5Na1m1ro8Krk90jwyKf2sQXlr74MxwqW3gtscHxFX3jIz4NncwIzHZWnO6Q9yy7YM7TUw917z4y775QY952JBKRVmhKQWToCBNBwLaJRS0ANhXk491slROoSILZYqJWonTYtgipTJSbmbQkQQUoLABbRYKBSJSAKCIRoRMDQqUKIcajMpDZQvzUD3G6xHj6EP/ievPvPtnPUzRwCeUB9523dSh42e9oUFWdDUvuYuzV+dAYlz7QZMU6Te8qW0HaKbMn/7L/nl2tAK6/2j74OlnZYpBTvCtBt7tqzqeUsd+1J1/UNBn+PTd1e64A1XMv98FrImGCdMKAwZMMgM4B5GJW6z+9c74hnH7058w+uX9ja/f1GEyF2KSERCISuX0VKFUilUKmQUKZYSYqGMlrvQQqUS9jQoFZ6Az9bWPubEd0nEz3Et9ggTMlqEjP+fdExuvJ4/L+3TpDpkbNa1n+fozuVrNwnV+3Jqnzlzjcshj6bmMRvqx9ovFBQpu0yr1kV4OjI29v1/jZ3nWxNb14dnMjPJTCY9oYfewYKACEoTpUoTBUXEAkpRQRGVo54jIiioWBAbKNgQpFlAEeyV3lRAmopIlRZqevJ+eL8/+ifc17r3b+1r73XtTbd/6xTlELfy1f6SEn+3Ktffv6t8Hk1sT5qyUHhiyRWJ12tIcWXKlOI0CrHnIJA1gyhMi3GIguIEXH10VF/4mQZ0ep50GJdoEckkOpWK4DiZyGKrsAgoysRQlI4ycAbCJJIRPoVE1l6frKg3LqaqdpOkw9XsE5R7GPhEz/BLTdLlv2LUmblK96jy1jrIiXbZq38K9vn9/C21py6WbfalYsUaLINXqd80Y1+3Q//r6K3K4srdFeQN2+fd0rEMLFfba/i6bol7GvHDcuE6T/Z3985/y7IZ/KLkNYOljba40RSFpDoDwKw5CFGfIKoI2BLWBKRFHjRvnUM39R8d8W1zhoZVSApcEKczaEIKjYWiGJvKZnJwEolKJFM4HAlOZvOneM2FCiTBmBb5k/Z7FcBgMXE50+je1NHuBdUzFn/F2JvWTvxX/ma6LSknjrp+u/iEKiJvGXtiV7PyxclQs6CU8uRsbd6S9hDljORa7TsRjVzOyi1nGkMd/dTmSsghUz7hzEdFaJvK6aHZ2Z02B49B9/jE418bUocBBotHgpTGISJ3mkrVH5EzyVPTmugIpAhu7LOctoI6N7pi2nzlGYhCl9EoFA5MZ1EBEspASUQazqAzUBhEMPU+MnNyeh6oHzFiRGPqKcqEEqagLeKBXyJW+dpj24q/mrW+HO1Rnxp9h5ZSGzgStObph9Nb64rzx/uqkr7RrVNsYgfLWw74Lx01XARY97RET4cnnHUyKh9s3HBaKpwMHjT8Z//CPfLP/BrbMycPLHDpiHtptrLs/I4AWJeO8ahSJWAOYqMzdCrOwwjDFBLWpTI2T/hlrc7Ym5Sq2Q6JstHQshkdOp0N8jEMh2lknIWyORwyijJQFGHBCiwKbNADGiM8dZ0BrthRJV5bQVyudCt8qP1YByvqxMfHnKi/yZzO+gN5l96jdtRAbtgd05Okbxcyzq60lt7xx/RW+zA+pwdQHwnEeere7G81NWvHv5WYvtqmYdxSk4TbFCQZi7w+NcWsWBLcYW5/w4rx7i2DXjl/o6HZoJtMDiD4FAoSZ0g4Q0zE8HFENi1gyCDhuJZiDcO44dzWqLSUc20+K7cVm2nTMaaEgTLJQiqLRVFgs3EWgwWRSbgaCpsMz3BwLT5VjHt9ffj7+n82bS9+euaL177sLX66ySq/nfM3jNMTzNUHHq5u/2/9SFXkYs9p9YSdDtdi6iQ40DwcumNFP/reJjdOqRLQ4NrvG+5eUz7NNm3g5MouxQacKkwdBDW7Piub6yi3Vr8+g0sQuz1Ju9BBtP3krIqMQiYIARI8S6cx+DhJPi6UA1JQTuKTQaF+rVOLqhZTg7fe8eVkbqwCQqASYDKHSAM4LAWQxWGR6UxMcQbV05uU6vE15pbN9Hj0xq7+KM3cuKaz8Le2y6YrzreDTnFho+Duv2E8zJvdWB24LDfKKfzh+p8KDss24QeopRnpczqW8YxzHhfuglExXufUgquTgwaf7pvarObZYrc3UFLxfdEJHFDran2MjR/JxDnuvu5rKYeb/vE4u6P7hqvjGxyVUhASIiBSxEQqKgWl/EkIA4EJDGYw2g0myJpT89smHGG3n1ilt7WQQKBgGAazEApLgUVlstgSKYmEks3nDE8do/WzFtgY1O17YWh7vfft+vqOI0MNr1SMl7kYWcUpnoz+mz15Ruobp+ZHYB4uHjU9OrotffyGD3l9Ct/W2vHM4nJ9MWDUU9a4Z7/F4kjrio2x5duvrv/dPpdKcbtg8oxaJNA4Rd5509Yu6l3s5NCzwbXvLdwbdsxce3oPItOZ6lwag6HLZmnrMKh0mEDV1CIx2YYUXZlAQcmEMS2EMC0bvzoNPEjQS+DozqO6wSwiUxHHKEQqCZxFQEyVKBfORLCHBZb7P2Ffr0hP0zILT07q2d9S7mL2u7Jvxrg/Uqq49zeZ45UgimqA3oofp9NSN1T73fPd46JNxHq7F4VAIW9IysiRgyfvSmutBR3/Jn7xmnLT6TIbzCy5zqwwx6KWq7xSOFhkXBtgQNkmfbnK39Xtzq7Xz45xrY3zlkvIGFVVhFLIkxhCnBEzZHyEpjUppesN6wAUoeEwlwcEVn1QuFpPVqT6kQdhwIy4d9EJCkdNRmNIJvsZiJKEoAmbWBImrJ5Le54cn2y/PTIUz1ekjsuG3wzuV0x4Xbg16j27fuFfMPpls0Pn3Rgyd8mJMf551stw3SxrjF47XFyuvEXv0JY3JSP3Nl3Qkx4M696qdjlR+aJWjuklFe8zlgUb4hwPFtsHvHq6MnhY15nRa3iZOvI80C/zo0pivvm7eFOUT6ZScDEZVRDCBHQGhEhzVDqXTyKr/kZgukiZJjH/sQ9RKNzVrTLAlcFMOuB+iKimrkhXQH5J5QyKkooSXy4m78fjBS7DXUbjquwIBYI87cjKcbNVE/zj/OLz11+MAFoq4X/havdq+81ljxvXReW6fHN13fm6WN1A9zJirnR+brd6ohHxUcnnnJYBn1ibaq8LJ04LQu2f2AyoaAU2THncCilfMMc3WWLPQwfVgbY0tf1dygGavxK4TiMs5k6ErKXJZrF16TSaJkOBu0BRVXspl6NkqqqjaUkzQGW6+kTcaOF62j5psLbvGMqluBWTeYi0c4zxskygRNZUoovIxgvIlLgYilkkuq486x47Y942wKgTHyiZatIjc3f3v6AJ1Mx1tv5F5sz/x39zIJFgcNJs5wLYam3NWvsHHyveQe6ejnO3gf6y1OZQYciChoGhY/cLE9vGF69MNGtT26LUOmZYKEy308zPtRkWNDln17u5HJ3OiCR9JVan90NjtGIYwik4QKfilDkKZYSI4cNUlDZM0eC+hgABTP9NgfQ/KWR3JreXSerWfDUj9n4o530Y/SGfT5pSp+jgTCWuEHIUk5IfB3vcr2wP3dnk79zLP7ehJrdizUV099QPnLK/Y6jZbOkd6z+7eiBBTFjFe539xHTVxW/PMtafv/u6wj//mv3svw6+EeJnFvoL+CPGB+RUX+xIxcnlBzUkuTer/rF8botv8HNRpnMVK2U072J6Ucx7W4+AK8fszav2jWk+8zLz4+ISCkaVUGhUbRFd/zeNYUb+SZ8vF+BOs12qyNQiAommVv5z88YqMTnuRGFn6qcV5+aYfMxmispmQxCDjTO0Gt9v2D2zrkma5U1+QsjoFs9VZVhH3v1c+T6q3rj3IDFD+fE+PE9R6c919LFYfSiKkr8x7OZadtuT4me0x8YtZkvCWVmK4NbrWjuVXAIvsFViBelDYU0nmK0wCNk+4BvqC8+2mwJCbZuDKZmMzhgNkFPcWvnybOLQqcvZPHmKivnRu2pKOIBRGQyYTSXKyVQ6hoqxAYqUOC3D+iUMKa/eBWMreWwSTuZYP7/iV2v2zfBTZ/IsmYOgNtoATRPS1NMUJp7873jQ/pvDXxtWocmmXNj/9HjDj/PXbrpUGcbbvHuur1SsF2P/F/cdAoWqZznmPzztCn7avDu0w8J2Hhcq7x0woicHD/YNvSuxOMp6InnjsbEytebX0640YX/BRL888/TgsLlD++Ga2YUaKfsL9Uwn9v63UaV2UrgpzL/jmYHo7Sl7JRZDUQEloYo0pqo2m8ViYxSKua4Wd5GawmIbVePxhbP2cLa/17mCq2/0fwe/7OzbqsgU5oE6Ky2Zoxjl10YteWHT9sfZP0plTDVZhREzbMOr4sBl/PdOX316ljCOvBk6eitE72663/KTf3ZVyEKsHUmaD5Z4USVuJ2/1NImaP0W4H12w+4FJErf3tfnpO9k6WxR7nT+o/AYXn2jZkdPeujgvqami/9yOjwNzdaKeeFft0sT/VFl4/kCtZfZq3pdVEdsk8LyxOQpKRkl0JpNOIyEARoLkZCbEI8vo4ww6nhb5M/318kDKg/6aiuhnRksrjvIlBIRW3Y4f30tT7uLJstR9zjsLPqdZanK8a+rd3r7/YnmsN77AMDJnW6H4l86prpdQMl/ks2zcq/XPru6tuxERsDjzWJWQ0MAJCj6+rt8eO92fXO+pTFXDqz+t2nku/dW7KuXKXT61U0/b7txp9lnYdnFrf27FoGps1oYa6S7phRWNDxfmeharPdx3PMyKzr1RYTyg6OEK4xAVwjC2Kh0jwjCCwiSAQSBKCAh1CiOO60nHG66efuzeeKVK1thVfy/hewRnd532NxOJzAisMW2i3eXJH6b7Pt2pHAdnHjZb1tNe6gsPTDh/OFbwhbp69iezcd79gzfalzhM+/6R8a56tnBNiBOjVfdIor5wL+Ber4B8JLJT+nYc1gzNKj/bVHF7+dU7Wo16MkuVUpPd5QWYrPq2o7Fyy5HBRy8+7zkz57muX0+dgVWgC59nWey3MT0wobraooM3dkWVBBBgBKazGSyMgJFBCGfCNIw4BHCQMRCbZU6C6C2PMfVcnFnFTRIU2qiclptbQ4zRo8ojx8cqbbqd8x5uU8yf1aJSDm768UGETcZmJm6vNDQ/pEIVq3r8G7ORrbGJVH9VZ98fGT/GYmdbRxseWrvkMOtN7/wX6nOotbYof0kyJcr34/wzTr12iJFnCKT1oti156ymzpE4VfY/da+kpC32SRpy7xSLo3kOYk2tp5q6zVfG7Sdbpk2JcekHz0xQlOztGSQRABJxFotMxDEyDuAKChAOEaYwEWmMAPJJUr0e04+b7CXtPoo1D87/WJFDmoY8R9SS4+od8IFFjPuq1d9K1jjE8hnFbrsM2BbktXgEMfBUv8f0iEdvqeOqL7Bb2u3pMw5/ficwc8YsHv1C7T6cPCaJOHBu6dMYr9P51fbIJfwxfJzeGkYc9FiKKTx79s3LMmhVBe++/ayumlr03o43bV3XNW2Pr3Nq6yBvZh+vEjfs0PgvkVM7uf+5pzPTmR003xilc9gITBCJxTABJGLKFDpFiUJBVVAyoMxiKyKS76ysIIHCOfbPLptQjaeh/Q8/3oiMrkMpyekzrm9Y8kO3RxIaro28tAyon9s33kL5oCjeO3j8kGY23udUcslHx3A92dEwCzH7Y+bs0Z+3xD/lmEvWwiQQbT5ZLApK1WOppG2aWm0ac+RpltM+97CzqFvIrb6mBdnaPtOG67/n8eqyP6y/Z9riOGO1z7dvVGCY5ZqbFWX6uouaFD2z1OB60K4NjaMDOMlcSS5Rm8VQiRxEyBCKK+M4CophMToHSQhyohSZloeVLbiy6YHzJr5DwVrNx+t9pr9ZeFTRDkP7NrYQT+pcs+b8ILS5VFxIHL9xXtQDMr8u1YPCR944FnwfOmr+6XmaFa4jPqD9xzpm2BkxOYxICu/ZyTDba8mpqlpB5xXr7JxaJL8myDvrM0vMq63vhyhrvw9blbLav7jcmEePv52WbyMI3eWTans/Yqq8+5pvc/770p7n/EW2h/0W8S6SircumGecr0nW0OVwNFQQMkORrqCgRMExAgCAMliOAgQZUT47h8+UkI+rivbYbCwXX4u8FWbuh7jQTngqA/tbkOczBdjJClnA4baFtUPFV4RGBiq4x642ID3AaE7WtVQx35mbZvgp1yD2+h/POrL6AmmUrYlTpjVBJWLL+zEC5cgkwz36KxNC31YGYZ9fFu543Xd2MxI07BRuHdvikB3aOh4Wa1NT4V6cWpTyvK2peKo9Yd07NVuNKbuWZLzDqoCJNQs3bGbrW3nRMBRijErkDDFGZpMZHCIGQTIIBAEYBEUioRwmghIDxjYE3hsazdkzlpMQK39Sm73dYAJD7psvN4mcK4S3mHfa79zmEXzPbu8q73br0q5XDWXrS1cEh+FzXoyAJEHXD+etY7v+xKgT+IsTPpBWpm1i033i142hLfLylSMrrnZ605VOZIatoEca+0YYRgp1Ny3yFO5/5Pbd9vVno7w3PQJuUqHMoLO+gVsdoWpdrctTrfpxKerD6fC1wl2wDUKZn6Y7u4xHAuQa34UISmWiDAxGpAQpEQDkMACJIaZwgiZGhBLoGe2a19IrjWfKgFJBmYqf5QXOVU27PoZXwcgu4WZu0Bko8/1c0kbR/IZ9fcTomijt/erS1OJTLU2mue/Uep23Do8VNf+JccQqyjFjGlv8MsBimVPP+NfltUTNguP7NhMIXf9cCDhbRY4a1T7lb/TO6eVOUszmJdrGKZXncnhOseCjVL43Idwsa9I9ZXsY5eLgfkFLjaIBH2oV8hImj1Xt7vihxuLLpVKTYSlEQ2gYBkoAOfr/XxoKCVIZxgdkYzKmim3SWVGf9gXgwb7lT8502awKOBEgVm+d2hx0wVX190T8q6qNqLxmc1ZCxdkbMyqU3w3K6FAexzaEMVfDXuk7xv5ByHn/p94x61Od7+C/L2hrU/2/3h+efL6gHPkrfb1vrFbcijyP7xX0whNPEqwqwNFdjNUZpXf8HngFlWa98MtJ0XvioXO9Vg6rntvMvPegPzCZMvvC9IbSCuX0EEZKwNEi7o+W6WlUMimRyEcAIolBRggwghExmVxOAEAZKJVKBIQZhE4l4mPgDGs4RCO8RCH7ns9rfsW92LDWBQfXYJ4Hmr6/Pf90rc12fQvHzr6F8TYSlyLevJ+yhuy7i7+5KfkGW63d1DH7LuMPjFGRvlyF1IGfjcox2lyWob7x1bMXyROHa0seYP9oDmvm3mh3fZebmtcGPHhEMX1yreD90GPmZBDdnLKwOPiBhfvA0kc/EPkqqKLoJz/0v5FNZxS4q6rtPQLEw987+uQCQCKalMhFPAKCkyG5DEVRGJXJQAiQS0FIJBVL+Uo03pJaVaN2c1Z1gNaFHU6TjddcOtQOLdNJf7QOqb3sRWz+mm7jUzZUkB8dd0jDPLeUH9bhu/mLqnH+oiN2iR4Fs2eiEur/4OqiK8se2roPlyZ67zifIZ4Xv+7Jm3memYm6WgPt5R5LdBXCc92kGq5b1BM414/wfrVrDEqOzD7es+7x1uDnGTS36767VKaLRt+R9MqeDGYcvm1obb2H86s/X13UR2ROiSVyKYkBTELQLEEmRKQASJSCfAiQSxBAROKLICI4IxskDDEnv7Qw663UehRl41cvX92Bhh+NnimoKLlVseR92yG3kJeLvYdNv7SLeCs+FiapzWYJSx3sPjevelPRy3x+vV91XeAfescDlu75+f+elfxet3ZxHKhTfrYksnNt2SVXjyA717B6wvGLuY9jFyUwjvx6mUN7cI3/+/PRoI+ZO++WFXLJGOeAvZbVSvUfxVsUseqFr9L6U26P3ZoSyl5Fk+auy5YqY6iMAIEQmSBDSQTJnEQiEclkUrFEIhWIYAlEJhAIirA65Tt5gLZH1KPR3K746elDXlWwBeii8yjE+mac9NGbCL4fif3fS+rN5ofvmwKXPyw8r/7dOOJL9IKTN/Uqy0meGQVvwiV/cDXuW+UHbGllrR2lWf76umeTxP6ncOGd1A27VffMN5ns2LT7TMyWb1uLsivnBfzifb68tFtTrfaGHP6xpMjsQ0fqVDeUoJJX+0PJwyeKWCf5nFupdkO0IN4m/GaOqvJQdx0fJJBAIX9OTgAlMjlMoRBQAgTI/3/cBJKLSKQxkrp0ts+Qu19K8AAIRypfbSb5Zwbtg66EmMSbllC1Awe39d0eci9yaH78icQqm/d29Z1nJo/gPaL3yQlzz6WUr97LmFf/wMhPPHfk6M3t3sUGiFdvrqB6567OUcfCLwcUbCwyVuu3Plr+sKMWAFjaezVec5blbL99cHu4mqMzw6jTaUeD1WwGYWTZUsGr2n+j7rdK3xXlTobYhQZzLudDQv7T6TlHOzGEAKhIICYBgFwmhagYisAgAQQIMhQAQSksA3mkfiPWT8W5b8SPWdfUga9q7kDH7Idr65ovZq1ZI3g5dPCt5njfM52xNUbHpr0aM/tkYoetzt5s/28+MO1pz+3RoJcJX67+7/U4jdhb1BNR2y3FtVURGtMHGvZuuWES1e50aezLHM/qi4Vlzfb6xgFL83XFXfuVkgMEYfXeezPdjzhsE9R1n1nV4bBmfJ594UjkvPxAJT3u6Dex+cx3nySTkX2PIrhlFNNRRA4hM7AElIKgXA7IJVKZHIDlACRDQEAGE+WwjC1eJhxHJijuR5wUXh2KyD5ZqrkjgS2x1lW0WuL3c/jEvdfB382BvUWW54rs56TZCY7TXTsXxbzo235z7e+giYvGzulBfqT/zRg2szWDpCA91pGnq251aGvKBFIIzto/7DpgvDJvUvpoiawAWTi/I5IM3u5iO7J+mH/4lKatEe5unvX4nikRLPQ/Sk261ipBlRfOEm+Z3kEMWy7tig4Gzc/f7G51IcxAIhkslYEogQBJAQCECXKAQJCBgBSUywkACJB5IGt4cKFIFrJ7LveyvVGJgVpBFB8YDD1d2H3Iu9FwlU1idvXFj98bv4R7fHv28YP6xy3VMUz7mHIw6PdIOOHyc9ucEePfJNX/7WpkxLXIV1D8ZeWRr+4R08Nck5/R24crooo3LCp96/zxpoVpO9ncJPyfRyLyIWGAU7O7xWbSHeGU4Y0P58YcmveKvQ4e/YSvXR/SG3ygJHneuzLz9JuioaWbWzrKjslQQCIRyQFYTkBJRLlULgNhDCHCREAOUKQQCEkgFJYTEP0ip66cyzN5YN/G7mNxxXEn9nilDtAip13zGuZm1hmT+LxQm4M2YPQp3Uuev8xcD/sXGBfXqpaZNOz04V/uee7YW9qk/r8ZnwcI5q4VdsMj0QyF0dk11Low0346+F2PM1CKhlruXXAzGPE/unx35qax+F+raRQ7m/ZXZ8OocRYbS1iRQOPYlublD9Kci2u8E3RNvU/Lfp/rldhuH/oZk7ZRiySFZTMiAJTDBDKMyCUyAoJDJCIBAgAABgACgCIQPo0iALChQKpeqetXme1Q2LpVezxIVvswwv31lHGyIyifms79dIX84dMNh063ctPVlU1FZjXb4zWz7WI06V+/X/mYUnsg3m3w/wDNJhO2GmPnmQAAAABJRU5ErkJggg==\n",
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from PIL import Image\n",
+ "\n",
+ "filepath = '/home/ma-user/work/Course_NNDL/pack/lab4/resnet/dataset_volcano/train/volcano/' \n",
+ "# 火山地形InSAR监测图\n",
+ "\n",
+ "pictures=os.listdir(filepath)\n",
+ "numofimg = 778 \n",
+ "# 图片序号(修改可查看其他图片)\n",
+ "im = Image.open(filepath+pictures[numofimg])\n",
+ "im.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4028c740",
+ "metadata": {},
+ "source": [
+ "## 3. 构建ResNet50模型"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "47783eba",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "1e923575",
+ "metadata": {},
+ "source": [
+ "基于残差网络,设计出了resnet50,resnet101等神经网络,这里仅对resnet50进行介绍,其网络结构如下:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "131cc34a",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "c2bf0323",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# define net\n",
+ "net = resnet(class_num=config.class_num)\n",
+ "# 详细搭建过程可查看src/resnet.py\n",
+ "if args_opt.parameter_server:\n",
+ " net.set_param_ps()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "71c77af9",
+ "metadata": {},
+ "source": [
+ "## 4. 训练模型"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9aa11884",
+ "metadata": {},
+ "source": [
+ "### 定义损失函数与优化器\n",
+ "\n",
+ "这一部分主要包括初始化环境、定义优化器、定义回调函数、初始化学习率4个模块"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "e8bcd2df",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "target = args_opt.device_target\n",
+ "if target == \"CPU\":\n",
+ " args_opt.run_distribute = False\n",
+ "\n",
+ "ckpt_save_dir = config.save_checkpoint_path\n",
+ "\n",
+ "# init context\n",
+ "context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)\n",
+ "if args_opt.parameter_server:\n",
+ " context.set_ps_context(enable_ps=True)\n",
+ "\n",
+ "# init lr\n",
+ "if args_opt.net in (\"resnet18\", \"resnet50\", \"se-resnet50\"):\n",
+ " lr = get_lr(lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,\n",
+ " warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=step_size,\n",
+ " lr_decay_mode=config.lr_decay_mode)\n",
+ "else:\n",
+ " lr = warmup_cosine_annealing_lr(config.lr, step_size, config.warmup_epochs, config.epoch_size,\n",
+ " config.pretrain_epoch_size * step_size)\n",
+ "lr = Tensor(lr)\n",
+ "\n",
+ "# define opt\n",
+ "decayed_params = []\n",
+ "no_decayed_params = []\n",
+ "for param in net.trainable_params():\n",
+ " if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:\n",
+ " decayed_params.append(param)\n",
+ " else:\n",
+ " no_decayed_params.append(param)\n",
+ "\n",
+ "group_params = [{'params': decayed_params, 'weight_decay': config.weight_decay},\n",
+ " {'params': no_decayed_params},\n",
+ " {'order_params': net.trainable_params()}]\n",
+ "opt = Momentum(group_params, lr, config.momentum, loss_scale=config.loss_scale)\n",
+ "\n",
+ "loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')\n",
+ "loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)\n",
+ "dist_eval_network = ClassifyCorrectCell(net) if args_opt.run_distribute else None\n",
+ "\n",
+ "\n",
+ "# define callbacks\n",
+ "time_cb = TimeMonitor(data_size=step_size)\n",
+ "loss_cb = LossMonitor()\n",
+ "cb = [time_cb, loss_cb]\n",
+ "if config.save_checkpoint:\n",
+ " config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,\n",
+ " keep_checkpoint_max=config.keep_checkpoint_max)\n",
+ " ckpt_cb = ModelCheckpoint(prefix=\"resnet\", directory=ckpt_save_dir, config=config_ck)\n",
+ " cb += [ckpt_cb]\n",
+ "if args_opt.run_eval:\n",
+ " if args_opt.eval_dataset_path is None or (not os.path.isdir(args_opt.eval_dataset_path)):\n",
+ " raise ValueError(\"{} is not a existing path.\".format(args_opt.eval_dataset_path))\n",
+ " eval_dataset = create_dataset(dataset_path=args_opt.eval_dataset_path, do_train=False,\n",
+ " batch_size=config.batch_size, target=target)\n",
+ " eval_param_dict = {\"model\": model, \"dataset\": eval_dataset, \"metrics_name\": \"acc\"}\n",
+ " eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=args_opt.eval_interval,\n",
+ " eval_start_epoch=args_opt.eval_start_epoch, save_best_ckpt=True,\n",
+ " ckpt_directory=ckpt_save_dir, besk_ckpt_name=\"best_acc.ckpt\",\n",
+ " metrics_name=\"acc\")\n",
+ " cb += [eval_cb]\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "02b27e3f",
+ "metadata": {},
+ "source": [
+ "### 定义保存路径与训练\n",
+ "这一部分主要定义了checkpoint的保存、是否使用已有模型和生成训练模型"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "fc73817c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ckpt_save_dir = config.save_checkpoint_path\n",
+ "\n",
+ "# init weight\n",
+ "if args_opt.pre_trained:\n",
+ " param_dict = load_checkpoint(args_opt.pre_trained)\n",
+ " if args_opt.filter_weight:\n",
+ " filter_list = [x.name for x in net.end_point.get_parameters()]\n",
+ " filter_checkpoint_parameter_by_list(param_dict, filter_list)\n",
+ " load_param_into_net(net, param_dict)\n",
+ "else:\n",
+ " for _, cell in net.cells_and_names():\n",
+ " if isinstance(cell, nn.Conv2d):\n",
+ " cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),\n",
+ " cell.weight.shape,\n",
+ " cell.weight.dtype))\n",
+ " if isinstance(cell, nn.Dense):\n",
+ " cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),\n",
+ " cell.weight.shape,\n",
+ " cell.weight.dtype))\n",
+ "\n",
+ "# define model\n",
+ "metrics = {\"acc\"}\n",
+ "if args_opt.run_distribute:\n",
+ " metrics = {'acc': DistAccuracy(batch_size=config.batch_size, device_num=args_opt.device_num)}\n",
+ "model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=metrics,\n",
+ " amp_level=\"O2\", keep_batchnorm_fp32=False, eval_network=dist_eval_network)\n",
+ "if (args_opt.net != \"resnet101\" and args_opt.net != \"resnet50\") or \\\n",
+ " args_opt.parameter_server or target == \"CPU\":\n",
+ " ## fp32 training\n",
+ " model = Model(net, loss_fn=loss, optimizer=opt, metrics=metrics, eval_network=dist_eval_network)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e7a0dea1",
+ "metadata": {},
+ "source": [
+ "### 开始训练"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "f7295540",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "epoch: 1 step: 170, loss is 0.92662865\n",
+ "epoch time: 146278.105 ms, per step time: 860.459 ms\n",
+ "epoch: 2 step: 170, loss is 0.4696266\n",
+ "epoch time: 2888.519 ms, per step time: 16.991 ms\n",
+ "epoch: 3 step: 170, loss is 0.5504666\n",
+ "epoch time: 2888.620 ms, per step time: 16.992 ms\n",
+ "epoch: 4 step: 170, loss is 0.55341554\n",
+ "epoch time: 2887.737 ms, per step time: 16.987 ms\n",
+ "epoch: 5 step: 170, loss is 0.57959074\n",
+ "epoch time: 4202.674 ms, per step time: 24.722 ms\n",
+ "epoch: 6 step: 170, loss is 0.5106913\n",
+ "epoch time: 2897.639 ms, per step time: 17.045 ms\n",
+ "epoch: 7 step: 170, loss is 0.42642742\n",
+ "epoch time: 2893.739 ms, per step time: 17.022 ms\n",
+ "epoch: 8 step: 170, loss is 0.4075601\n",
+ "epoch time: 2888.433 ms, per step time: 16.991 ms\n",
+ "epoch: 9 step: 170, loss is 0.35345113\n",
+ "epoch time: 2901.495 ms, per step time: 17.068 ms\n",
+ "epoch: 10 step: 170, loss is 0.34613144\n",
+ "epoch time: 4622.647 ms, per step time: 27.192 ms\n",
+ "epoch: 11 step: 170, loss is 0.40896726\n",
+ "epoch time: 2887.511 ms, per step time: 16.985 ms\n",
+ "epoch: 12 step: 170, loss is 0.21887568\n",
+ "epoch time: 2959.818 ms, per step time: 17.411 ms\n",
+ "epoch: 13 step: 170, loss is 0.4700133\n",
+ "epoch time: 2900.080 ms, per step time: 17.059 ms\n",
+ "epoch: 14 step: 170, loss is 0.14496548\n",
+ "epoch time: 2890.494 ms, per step time: 17.003 ms\n",
+ "epoch: 15 step: 170, loss is 0.21069637\n",
+ "epoch time: 4350.275 ms, per step time: 25.590 ms\n",
+ "epoch: 16 step: 170, loss is 0.1744703\n",
+ "epoch time: 2892.088 ms, per step time: 17.012 ms\n",
+ "epoch: 17 step: 170, loss is 0.0926687\n",
+ "epoch time: 2889.344 ms, per step time: 16.996 ms\n",
+ "epoch: 18 step: 170, loss is 0.24947652\n",
+ "epoch time: 2890.448 ms, per step time: 17.003 ms\n",
+ "epoch: 19 step: 170, loss is 0.3401838\n",
+ "epoch time: 2888.420 ms, per step time: 16.991 ms\n",
+ "epoch: 20 step: 170, loss is 0.2485364\n",
+ "epoch time: 4708.752 ms, per step time: 27.699 ms\n",
+ "epoch: 21 step: 170, loss is 0.17957874\n",
+ "epoch time: 2895.103 ms, per step time: 17.030 ms\n",
+ "epoch: 22 step: 170, loss is 0.339902\n",
+ "epoch time: 2899.719 ms, per step time: 17.057 ms\n",
+ "epoch: 23 step: 170, loss is 0.31368703\n",
+ "epoch time: 2964.401 ms, per step time: 17.438 ms\n",
+ "epoch: 24 step: 170, loss is 0.19620807\n",
+ "epoch time: 2888.450 ms, per step time: 16.991 ms\n",
+ "epoch: 25 step: 170, loss is 0.1614417\n",
+ "epoch time: 4613.073 ms, per step time: 27.136 ms\n",
+ "epoch: 26 step: 170, loss is 0.08407582\n",
+ "epoch time: 2887.851 ms, per step time: 16.987 ms\n",
+ "epoch: 27 step: 170, loss is 0.13144577\n",
+ "epoch time: 2890.520 ms, per step time: 17.003 ms\n",
+ "epoch: 28 step: 170, loss is 0.12666999\n",
+ "epoch time: 2890.524 ms, per step time: 17.003 ms\n",
+ "epoch: 29 step: 170, loss is 0.23412418\n",
+ "epoch time: 2885.949 ms, per step time: 16.976 ms\n",
+ "epoch: 30 step: 170, loss is 0.22468564\n",
+ "epoch time: 4406.060 ms, per step time: 25.918 ms\n",
+ "epoch: 31 step: 170, loss is 0.24919192\n",
+ "epoch time: 2885.955 ms, per step time: 16.976 ms\n",
+ "epoch: 32 step: 170, loss is 0.1554795\n",
+ "epoch time: 2885.684 ms, per step time: 16.975 ms\n",
+ "epoch: 33 step: 170, loss is 0.2973314\n",
+ "epoch time: 2885.653 ms, per step time: 16.974 ms\n",
+ "epoch: 34 step: 170, loss is 0.11782022\n",
+ "epoch time: 2885.917 ms, per step time: 16.976 ms\n",
+ "epoch: 35 step: 170, loss is 0.21221499\n",
+ "epoch time: 4621.872 ms, per step time: 27.187 ms\n",
+ "epoch: 36 step: 170, loss is 0.15174696\n",
+ "epoch time: 2887.326 ms, per step time: 16.984 ms\n",
+ "epoch: 37 step: 170, loss is 0.20184667\n",
+ "epoch time: 2931.716 ms, per step time: 17.245 ms\n",
+ "epoch: 38 step: 170, loss is 0.27958253\n",
+ "epoch time: 2903.559 ms, per step time: 17.080 ms\n",
+ "epoch: 39 step: 170, loss is 0.4011573\n",
+ "epoch time: 2887.033 ms, per step time: 16.983 ms\n",
+ "epoch: 40 step: 170, loss is 0.27518708\n",
+ "epoch time: 4623.882 ms, per step time: 27.199 ms\n",
+ "epoch: 41 step: 170, loss is 0.0341114\n",
+ "epoch time: 2901.196 ms, per step time: 17.066 ms\n",
+ "epoch: 42 step: 170, loss is 0.2519011\n",
+ "epoch time: 2886.812 ms, per step time: 16.981 ms\n",
+ "epoch: 43 step: 170, loss is 0.14044799\n",
+ "epoch time: 2886.515 ms, per step time: 16.979 ms\n",
+ "epoch: 44 step: 170, loss is 0.32893103\n",
+ "epoch time: 2888.534 ms, per step time: 16.991 ms\n",
+ "epoch: 45 step: 170, loss is 0.1999063\n",
+ "epoch time: 4676.197 ms, per step time: 27.507 ms\n",
+ "epoch: 46 step: 170, loss is 0.20151037\n",
+ "epoch time: 2887.730 ms, per step time: 16.987 ms\n",
+ "epoch: 47 step: 170, loss is 0.23471475\n",
+ "epoch time: 2886.835 ms, per step time: 16.981 ms\n",
+ "epoch: 48 step: 170, loss is 0.15508586\n",
+ "epoch time: 2887.246 ms, per step time: 16.984 ms\n",
+ "epoch: 49 step: 170, loss is 0.16285518\n",
+ "epoch time: 2897.016 ms, per step time: 17.041 ms\n",
+ "epoch: 50 step: 170, loss is 0.09520013\n",
+ "epoch time: 4648.258 ms, per step time: 27.343 ms\n",
+ "epoch: 51 step: 170, loss is 0.32149917\n",
+ "epoch time: 2891.785 ms, per step time: 17.010 ms\n",
+ "epoch: 52 step: 170, loss is 0.11255784\n",
+ "epoch time: 2892.017 ms, per step time: 17.012 ms\n",
+ "epoch: 53 step: 170, loss is 0.14723083\n",
+ "epoch time: 2891.747 ms, per step time: 17.010 ms\n",
+ "epoch: 54 step: 170, loss is 0.102373555\n",
+ "epoch time: 2888.816 ms, per step time: 16.993 ms\n",
+ "epoch: 55 step: 170, loss is 0.09419954\n",
+ "epoch time: 4396.924 ms, per step time: 25.864 ms\n",
+ "epoch: 56 step: 170, loss is 0.1116611\n",
+ "epoch time: 2888.037 ms, per step time: 16.988 ms\n",
+ "epoch: 57 step: 170, loss is 0.103048995\n",
+ "epoch time: 2889.398 ms, per step time: 16.996 ms\n",
+ "epoch: 58 step: 170, loss is 0.074179225\n",
+ "epoch time: 2888.134 ms, per step time: 16.989 ms\n",
+ "epoch: 59 step: 170, loss is 0.19016893\n",
+ "epoch time: 2890.880 ms, per step time: 17.005 ms\n",
+ "epoch: 60 step: 170, loss is 0.39744663\n",
+ "epoch time: 4673.986 ms, per step time: 27.494 ms\n",
+ "epoch: 61 step: 170, loss is 0.17744151\n",
+ "epoch time: 2891.951 ms, per step time: 17.011 ms\n",
+ "epoch: 62 step: 170, loss is 0.05842795\n",
+ "epoch time: 2902.380 ms, per step time: 17.073 ms\n",
+ "epoch: 63 step: 170, loss is 0.10902926\n",
+ "epoch time: 2947.777 ms, per step time: 17.340 ms\n",
+ "epoch: 64 step: 170, loss is 0.09190318\n",
+ "epoch time: 2891.484 ms, per step time: 17.009 ms\n",
+ "epoch: 65 step: 170, loss is 0.110902146\n",
+ "epoch time: 4661.847 ms, per step time: 27.423 ms\n",
+ "epoch: 66 step: 170, loss is 0.04955488\n",
+ "epoch time: 2891.100 ms, per step time: 17.006 ms\n",
+ "epoch: 67 step: 170, loss is 0.19169034\n",
+ "epoch time: 2890.701 ms, per step time: 17.004 ms\n",
+ "epoch: 68 step: 170, loss is 0.034920547\n",
+ "epoch time: 2891.719 ms, per step time: 17.010 ms\n",
+ "epoch: 69 step: 170, loss is 0.0778444\n",
+ "epoch time: 2897.604 ms, per step time: 17.045 ms\n",
+ "epoch: 70 step: 170, loss is 0.12459681\n",
+ "epoch time: 4642.207 ms, per step time: 27.307 ms\n",
+ "epoch: 71 step: 170, loss is 0.3035383\n",
+ "epoch time: 2890.911 ms, per step time: 17.005 ms\n",
+ "epoch: 72 step: 170, loss is 0.2915015\n",
+ "epoch time: 2890.630 ms, per step time: 17.004 ms\n",
+ "epoch: 73 step: 170, loss is 0.071956985\n",
+ "epoch time: 2895.871 ms, per step time: 17.035 ms\n",
+ "epoch: 74 step: 170, loss is 0.052905276\n",
+ "epoch time: 2895.540 ms, per step time: 17.033 ms\n",
+ "epoch: 75 step: 170, loss is 0.03699213\n",
+ "epoch time: 4640.730 ms, per step time: 27.298 ms\n",
+ "epoch: 76 step: 170, loss is 0.24090245\n",
+ "epoch time: 2892.159 ms, per step time: 17.013 ms\n",
+ "epoch: 77 step: 170, loss is 0.22964533\n",
+ "epoch time: 2892.466 ms, per step time: 17.015 ms\n",
+ "epoch: 78 step: 170, loss is 0.09723104\n",
+ "epoch time: 2892.390 ms, per step time: 17.014 ms\n",
+ "epoch: 79 step: 170, loss is 0.06380415\n",
+ "epoch time: 2892.618 ms, per step time: 17.015 ms\n",
+ "epoch: 80 step: 170, loss is 0.06995268\n",
+ "epoch time: 4665.654 ms, per step time: 27.445 ms\n",
+ "epoch: 81 step: 170, loss is 0.09262623\n",
+ "epoch time: 2893.791 ms, per step time: 17.022 ms\n",
+ "epoch: 82 step: 170, loss is 0.15163112\n",
+ "epoch time: 2899.280 ms, per step time: 17.055 ms\n",
+ "epoch: 83 step: 170, loss is 0.062050276\n",
+ "epoch time: 2892.264 ms, per step time: 17.013 ms\n",
+ "epoch: 84 step: 170, loss is 0.12270993\n",
+ "epoch time: 2890.442 ms, per step time: 17.003 ms\n",
+ "epoch: 85 step: 170, loss is 0.17460671\n",
+ "epoch time: 4885.412 ms, per step time: 28.738 ms\n",
+ "epoch: 86 step: 170, loss is 0.06853009\n",
+ "epoch time: 2893.799 ms, per step time: 17.022 ms\n",
+ "epoch: 87 step: 170, loss is 0.13602191\n",
+ "epoch time: 2889.697 ms, per step time: 16.998 ms\n",
+ "epoch: 88 step: 170, loss is 0.08091751\n",
+ "epoch time: 2890.254 ms, per step time: 17.001 ms\n",
+ "epoch: 89 step: 170, loss is 0.012043503\n",
+ "epoch time: 2890.285 ms, per step time: 17.002 ms\n",
+ "epoch: 90 step: 170, loss is 0.06919499\n",
+ "epoch time: 4105.783 ms, per step time: 24.152 ms\n"
+ ]
+ }
+ ],
+ "source": [
+ "# train model\n",
+ "if args_opt.net == \"se-resnet50\":\n",
+ " config.epoch_size = config.train_epoch_size\n",
+ "dataset_sink_mode = (not args_opt.parameter_server) and target != \"CPU\"\n",
+ "print(\"============== Starting Training ==============\")\n",
+ "model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb,\n",
+ " sink_size=dataset.get_dataset_size(), dataset_sink_mode=dataset_sink_mode)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "6d85cc58",
+ "metadata": {},
+ "source": [
+ "## 5. 测试网络模型"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "fb3cfe8e",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "============== Starting Testing ==============\n",
+ "============== {'acc': 0.9821428571428571} ==============\n"
+ ]
+ }
+ ],
+ "source": [
+ "test_data_path='/home/ma-user/work/Course_NNDL/pack/lab4/resnet/dataset_volcano/eval/'\n",
+ "# 测试集地址\n",
+ "\n",
+ "print(\"============== Starting Testing ==============\")\n",
+ "ds_eval = create_dataset(test_data_path, target='Ascend',do_train=False)\n",
+ "if ds_eval.get_dataset_size() == 0:\n",
+ " raise ValueError(\"Please check dataset size > 0 and batch_size <= dataset size\")\n",
+ "\n",
+ "acc = model.eval(ds_eval)\n",
+ "print(\"============== {} ==============\".format(acc))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "709304f2",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "MindSpore-python3.7-aarch64",
+ "language": "python",
+ "name": "mindspore-python3.7-aarch64"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.6"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/.keep" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/.keep"
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/CrossEntropySmooth.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/CrossEntropySmooth.py"
new file mode 100644
index 0000000000000000000000000000000000000000..55d5d8b808217da1219f4a79acfbc6c2903deaa4
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/CrossEntropySmooth.py"
@@ -0,0 +1,38 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""define loss function for network"""
+import mindspore.nn as nn
+from mindspore import Tensor
+from mindspore.common import dtype as mstype
+from mindspore.nn.loss.loss import _Loss
+from mindspore.ops import functional as F
+from mindspore.ops import operations as P
+
+
+class CrossEntropySmooth(_Loss):
+ """CrossEntropy"""
+ def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000):
+ super(CrossEntropySmooth, self).__init__()
+ self.onehot = P.OneHot()
+ self.sparse = sparse
+ self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
+ self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32)
+ self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction)
+
+ def construct(self, logit, label):
+ if self.sparse:
+ label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value)
+ loss = self.ce(logit, label)
+ return loss
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/config.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/config.py"
new file mode 100644
index 0000000000000000000000000000000000000000..8aa42ee023afeacead630de964ff3c61da8f9cea
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/config.py"
@@ -0,0 +1,194 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+network config setting, will be used in train.py and eval.py
+"""
+from easydict import EasyDict as ed
+# config optimizer for resnet50, imagenet2012. Momentum is default, Thor is optional.
+cfg = ed({
+ 'optimizer': 'Momentum',
+ })
+
+# config for resent50, cifar10
+config1 = ed({
+ "class_num": 10,
+ "batch_size": 32,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 90,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 5,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 5,
+ "lr_decay_mode": "poly",
+ "lr_init": 0.01,
+ "lr_end": 0.00001,
+ "lr_max": 0.1
+})
+
+# config for resent50, volcano
+config_v = ed({
+ "class_num": 2,
+ "batch_size": 128,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 100,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 5,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 5,
+ "lr_decay_mode": "poly",
+ "lr_init": 0.01,
+ "lr_end": 0.00001,
+ "lr_max": 0.1
+})
+
+# config for resnet50, imagenet2012
+config2 = ed({
+ "class_num": 1001,
+ "batch_size": 256,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 90,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 5,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 0,
+ "lr_decay_mode": "linear",
+ "use_label_smooth": True,
+ "label_smooth_factor": 0.1,
+ "lr_init": 0,
+ "lr_max": 0.8,
+ "lr_end": 0.0
+})
+
+# config for resent101, imagenet2012
+config3 = ed({
+ "class_num": 1001,
+ "batch_size": 32,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 120,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 5,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 0,
+ "lr_decay_mode": "cosine",
+ "use_label_smooth": True,
+ "label_smooth_factor": 0.1,
+ "lr": 0.1
+})
+
+# config for se-resnet50, imagenet2012
+config4 = ed({
+ "class_num": 1001,
+ "batch_size": 32,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 28,
+ "train_epoch_size": 24,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 4,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 3,
+ "lr_decay_mode": "cosine",
+ "use_label_smooth": True,
+ "label_smooth_factor": 0.1,
+ "lr_init": 0.0,
+ "lr_max": 0.3,
+ "lr_end": 0.0001
+})
+
+# config for resnet50, imagenet2012, Ascend 910
+config_thor_Ascend = ed({
+ "class_num": 1001,
+ "batch_size": 32,
+ "loss_scale": 128,
+ "momentum": 0.9,
+ "weight_decay": 5e-4,
+ "epoch_size": 45,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 2,
+ "keep_checkpoint_max": 15,
+ "save_checkpoint_path": "./",
+ "use_label_smooth": True,
+ "label_smooth_factor": 0.1,
+ "lr_init": 0.05803,
+ "lr_decay": 4.04839,
+ "lr_end_epoch": 53,
+ "damping_init": 0.02714,
+ "damping_decay": 0.50036,
+ "frequency": 834,
+})
+
+# config for resnet50, imagenet2012, GPU
+config_thor_gpu = ed({
+ "class_num": 1001,
+ "batch_size": 32,
+ "loss_scale": 128,
+ "momentum": 0.9,
+ "weight_decay": 5e-4,
+ "epoch_size": 40,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 1,
+ "keep_checkpoint_max": 15,
+ "save_checkpoint_path": "./",
+ "use_label_smooth": True,
+ "label_smooth_factor": 0.1,
+ "lr_init": 0.05672,
+ "lr_decay": 4.9687,
+ "lr_end_epoch": 50,
+ "damping_init": 0.02345,
+ "damping_decay": 0.5467,
+ "frequency": 834,
+})
+
+
+# config for resnet50, Volcano
+config_volcano = ed({
+ "class_num": 2,
+ "batch_size": 32,
+ "loss_scale": 1024,
+ "momentum": 0.9,
+ "weight_decay": 1e-4,
+ "epoch_size": 60,
+ "pretrain_epoch_size": 0,
+ "save_checkpoint": True,
+ "save_checkpoint_epochs": 5,
+ "keep_checkpoint_max": 10,
+ "save_checkpoint_path": "./",
+ "warmup_epochs": 5,
+ "lr_decay_mode": "poly",
+ "lr_init": 0.001,
+ "lr_end": 0.00001,
+ "lr_max": 0.08
+})
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/dataset.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/dataset.py"
new file mode 100644
index 0000000000000000000000000000000000000000..0c2d116a1b80da8447a537856cc72550b326ca51
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/dataset.py"
@@ -0,0 +1,288 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+create train or eval dataset.
+"""
+import os
+import mindspore.common.dtype as mstype
+import mindspore.dataset as ds
+import mindspore.dataset.vision.c_transforms as C
+import mindspore.dataset.transforms.c_transforms as C2
+from mindspore.communication.management import init, get_rank, get_group_size
+
+
+def create_dataset1(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
+ """
+ create a train or evaluate cifar10 dataset for resnet50
+ Args:
+ dataset_path(string): the path of dataset.
+ do_train(bool): whether dataset is used for train or eval.
+ repeat_num(int): the repeat times of dataset. Default: 1
+ batch_size(int): the batch size of dataset. Default: 32
+ target(str): the device target. Default: Ascend
+ distribute(bool): data for distribute or not. Default: False
+
+ Returns:
+ dataset
+ """
+ if target == "Ascend":
+ device_num, rank_id = _get_rank_info()
+ else:
+ if distribute:
+ init()
+ rank_id = get_rank()
+ device_num = get_group_size()
+ else:
+ device_num = 1
+ if device_num == 1:
+ data_set = ds.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True)
+ else:
+ data_set = ds.Cifar10Dataset(dataset_path, num_parallel_workers=8, shuffle=True,
+ num_shards=device_num, shard_id=rank_id)
+
+ # define map operations
+ trans = []
+ if do_train:
+ trans += [
+ C.RandomCrop((32, 32), (4, 4, 4, 4)),
+ C.RandomHorizontalFlip(prob=0.5)
+ ]
+
+ trans += [
+ C.Resize((224, 224)),
+ C.Rescale(1.0 / 255.0, 0.0),
+ C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
+ C.HWC2CHW()
+ ]
+
+ type_cast_op = C2.TypeCast(mstype.int32)
+
+ data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
+ data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
+
+ # apply batch operations
+ data_set = data_set.batch(batch_size, drop_remainder=True)
+ # apply dataset repeat operation
+ data_set = data_set.repeat(repeat_num)
+
+ return data_set
+
+
+def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
+ """
+ create a train or eval imagenet2012 dataset for resnet50
+
+ Args:
+ dataset_path(string): the path of dataset.
+ do_train(bool): whether dataset is used for train or eval.
+ repeat_num(int): the repeat times of dataset. Default: 1
+ batch_size(int): the batch size of dataset. Default: 32
+ target(str): the device target. Default: Ascend
+ distribute(bool): data for distribute or not. Default: False
+
+ Returns:
+ dataset
+ """
+ if target == "Ascend":
+ device_num, rank_id = _get_rank_info()
+ else:
+ if distribute:
+ init()
+ rank_id = get_rank()
+ device_num = get_group_size()
+ else:
+ device_num = 1
+
+ if device_num == 1:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
+ else:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
+ num_shards=device_num, shard_id=rank_id)
+
+ image_size = 224
+ mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
+ std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
+
+ # define map operations
+ if do_train:
+ trans = [
+ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+ C.RandomHorizontalFlip(prob=0.5),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+ else:
+ trans = [
+ C.Decode(),
+ C.Resize(256),
+ C.CenterCrop(image_size),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+
+ type_cast_op = C2.TypeCast(mstype.int32)
+
+ data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
+ data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
+
+ # apply batch operations
+ data_set = data_set.batch(batch_size, drop_remainder=True)
+
+ # apply dataset repeat operation
+ data_set = data_set.repeat(repeat_num)
+
+ return data_set
+
+
+def create_dataset3(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
+ """
+ create a train or eval imagenet2012 dataset for resnet101
+ Args:
+ dataset_path(string): the path of dataset.
+ do_train(bool): whether dataset is used for train or eval.
+ repeat_num(int): the repeat times of dataset. Default: 1
+ batch_size(int): the batch size of dataset. Default: 32
+ target(str): the device target. Default: Ascend
+ distribute(bool): data for distribute or not. Default: False
+
+ Returns:
+ dataset
+ """
+ if target == "Ascend":
+ device_num, rank_id = _get_rank_info()
+ else:
+ if distribute:
+ init()
+ rank_id = get_rank()
+ device_num = get_group_size()
+ else:
+ device_num = 1
+ rank_id = 1
+ if device_num == 1:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True)
+ else:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=8, shuffle=True,
+ num_shards=device_num, shard_id=rank_id)
+ image_size = 224
+ mean = [0.475 * 255, 0.451 * 255, 0.392 * 255]
+ std = [0.275 * 255, 0.267 * 255, 0.278 * 255]
+
+ # define map operations
+ if do_train:
+ trans = [
+ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+ C.RandomHorizontalFlip(rank_id / (rank_id + 1)),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+ else:
+ trans = [
+ C.Decode(),
+ C.Resize(256),
+ C.CenterCrop(image_size),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+
+ type_cast_op = C2.TypeCast(mstype.int32)
+
+ data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=8)
+ data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=8)
+
+ # apply batch operations
+ data_set = data_set.batch(batch_size, drop_remainder=True)
+ # apply dataset repeat operation
+ data_set = data_set.repeat(repeat_num)
+
+ return data_set
+
+
+def create_dataset4(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False):
+ """
+ create a train or eval imagenet2012 dataset for se-resnet50
+
+ Args:
+ dataset_path(string): the path of dataset.
+ do_train(bool): whether dataset is used for train or eval.
+ repeat_num(int): the repeat times of dataset. Default: 1
+ batch_size(int): the batch size of dataset. Default: 32
+ target(str): the device target. Default: Ascend
+ distribute(bool): data for distribute or not. Default: False
+
+ Returns:
+ dataset
+ """
+ if target == "Ascend":
+ device_num, rank_id = _get_rank_info()
+ else:
+ if distribute:
+ init()
+ rank_id = get_rank()
+ device_num = get_group_size()
+ else:
+ device_num = 1
+ if device_num == 1:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True)
+ else:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True,
+ num_shards=device_num, shard_id=rank_id)
+ image_size = 224
+ mean = [123.68, 116.78, 103.94]
+ std = [1.0, 1.0, 1.0]
+
+ # define map operations
+ if do_train:
+ trans = [
+ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+ C.RandomHorizontalFlip(prob=0.5),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+ else:
+ trans = [
+ C.Decode(),
+ C.Resize(292),
+ C.CenterCrop(256),
+ C.Normalize(mean=mean, std=std),
+ C.HWC2CHW()
+ ]
+
+ type_cast_op = C2.TypeCast(mstype.int32)
+ data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12)
+ data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12)
+
+ # apply batch operations
+ data_set = data_set.batch(batch_size, drop_remainder=True)
+
+ # apply dataset repeat operation
+ data_set = data_set.repeat(repeat_num)
+
+ return data_set
+
+
+def _get_rank_info():
+ """
+ get rank size and rank id
+ """
+ rank_size = int(os.environ.get("RANK_SIZE", 1))
+
+ if rank_size > 1:
+ rank_size = get_group_size()
+ rank_id = get_rank()
+ else:
+ rank_size = 1
+ rank_id = 0
+
+ return rank_size, rank_id
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval.py"
new file mode 100644
index 0000000000000000000000000000000000000000..a041ba9778f6500404ce17ee92aabca8f6c0319f
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval.py"
@@ -0,0 +1,95 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train resnet."""
+import os
+import argparse
+from mindspore import context
+from mindspore.common import set_seed
+from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
+from mindspore.train.model import Model
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from src.CrossEntropySmooth import CrossEntropySmooth
+
+parser = argparse.ArgumentParser(description='Image classification')
+parser.add_argument('--net', type=str, default=None, help='Resnet Model, either resnet18, '
+ 'resnet50 or resnet101')
+parser.add_argument('--dataset', type=str, default=None, help='Dataset, either cifar10 or imagenet2012')
+
+parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
+parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
+parser.add_argument('--device_target', type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
+ help="Device target, support Ascend, GPU and CPU.")
+args_opt = parser.parse_args()
+
+set_seed(1)
+
+if args_opt.net in ("resnet18", "resnet50"):
+ if args_opt.net == "resnet18":
+ from src.resnet import resnet18 as resnet
+ if args_opt.net == "resnet50":
+ from src.resnet import resnet50 as resnet
+ if args_opt.dataset == "cifar10":
+ from src.config import config1 as config
+ from src.dataset import create_dataset1 as create_dataset
+ else:
+ from src.config import config2 as config
+ from src.dataset import create_dataset2 as create_dataset
+
+elif args_opt.net == "resnet101":
+ from src.resnet import resnet101 as resnet
+ from src.config import config3 as config
+ from src.dataset import create_dataset3 as create_dataset
+else:
+ from src.resnet import se_resnet50 as resnet
+ from src.config import config4 as config
+ from src.dataset import create_dataset4 as create_dataset
+
+if __name__ == '__main__':
+ target = args_opt.device_target
+
+ # init context
+ context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
+ if target == "Ascend":
+ device_id = int(os.getenv('DEVICE_ID'))
+ context.set_context(device_id=device_id)
+
+ # create dataset
+ dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size,
+ target=target)
+ step_size = dataset.get_dataset_size()
+
+ # define net
+ net = resnet(class_num=config.class_num)
+
+ # load checkpoint
+ param_dict = load_checkpoint(args_opt.checkpoint_path)
+ load_param_into_net(net, param_dict)
+ net.set_train(False)
+
+ # define loss, model
+ if args_opt.dataset == "imagenet2012":
+ if not config.use_label_smooth:
+ config.label_smooth_factor = 0.0
+ loss = CrossEntropySmooth(sparse=True, reduction='mean',
+ smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
+ else:
+ loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
+
+ # define model
+ model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
+
+ # eval model
+ res = model.eval(dataset)
+ print("result:", res, "ckpt=", args_opt.checkpoint_path)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval_callback.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval_callback.py"
new file mode 100644
index 0000000000000000000000000000000000000000..3bf73d9b6f6d19dc27ce0fcc690db0bd5fe81235
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/eval_callback.py"
@@ -0,0 +1,90 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Evaluation callback when training"""
+
+import os
+import stat
+from mindspore import save_checkpoint
+from mindspore import log as logger
+from mindspore.train.callback import Callback
+
+class EvalCallBack(Callback):
+ """
+ Evaluation callback when training.
+
+ Args:
+ eval_function (function): evaluation function.
+ eval_param_dict (dict): evaluation parameters' configure dict.
+ interval (int): run evaluation interval, default is 1.
+ eval_start_epoch (int): evaluation start epoch, default is 1.
+ save_best_ckpt (bool): Whether to save best checkpoint, default is True.
+ besk_ckpt_name (str): bast checkpoint name, default is `best.ckpt`.
+ metrics_name (str): evaluation metrics name, default is `acc`.
+
+ Returns:
+ None
+
+ Examples:
+ >>> EvalCallBack(eval_function, eval_param_dict)
+ """
+
+ def __init__(self, eval_function, eval_param_dict, interval=1, eval_start_epoch=1, save_best_ckpt=True,
+ ckpt_directory="./", besk_ckpt_name="best.ckpt", metrics_name="acc"):
+ super(EvalCallBack, self).__init__()
+ self.eval_param_dict = eval_param_dict
+ self.eval_function = eval_function
+ self.eval_start_epoch = eval_start_epoch
+ if interval < 1:
+ raise ValueError("interval should >= 1.")
+ self.interval = interval
+ self.save_best_ckpt = save_best_ckpt
+ self.best_res = 0
+ self.best_epoch = 0
+ if not os.path.isdir(ckpt_directory):
+ os.makedirs(ckpt_directory)
+ self.bast_ckpt_path = os.path.join(ckpt_directory, besk_ckpt_name)
+ self.metrics_name = metrics_name
+
+ def remove_ckpoint_file(self, file_name):
+ """Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
+ try:
+ os.chmod(file_name, stat.S_IWRITE)
+ os.remove(file_name)
+ except OSError:
+ logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
+ except ValueError:
+ logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
+
+ def epoch_end(self, run_context):
+ """Callback when epoch end."""
+ cb_params = run_context.original_args()
+ cur_epoch = cb_params.cur_epoch_num
+ if cur_epoch >= self.eval_start_epoch and (cur_epoch - self.eval_start_epoch) % self.interval == 0:
+ res = self.eval_function(self.eval_param_dict)
+ print("epoch: {}, {}: {}".format(cur_epoch, self.metrics_name, res), flush=True)
+ if res >= self.best_res:
+ self.best_res = res
+ self.best_epoch = cur_epoch
+ print("update best result: {}".format(res), flush=True)
+ if self.save_best_ckpt:
+ if os.path.exists(self.bast_ckpt_path):
+ self.remove_ckpoint_file(self.bast_ckpt_path)
+ save_checkpoint(cb_params.train_network, self.bast_ckpt_path)
+ print("update best checkpoint at: {}".format(self.bast_ckpt_path), flush=True)
+
+ def end(self, run_context):
+ print("End training, the best {0} is: {1}, the best {0} epoch is {2}".format(self.metrics_name,
+ self.best_res,
+ self.best_epoch), flush=True)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/export.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/export.py"
new file mode 100644
index 0000000000000000000000000000000000000000..6ab49f40cbd9db442693e34112e54225ef75eb17
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/export.py"
@@ -0,0 +1,78 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+##############export checkpoint file into air and onnx models#################
+python export.py
+"""
+import argparse
+import numpy as np
+
+from mindspore import Tensor, load_checkpoint, load_param_into_net, export, context
+
+parser = argparse.ArgumentParser(description='resnet export')
+parser.add_argument('--network_dataset', type=str, default='resnet50_cifar10', choices=['resnet18_cifar10',
+ 'resnet18_imagenet2012',
+ 'resnet50_cifar10',
+ 'resnet50_imagenet2012',
+ 'resnet101_imagenet2012',
+ "se-resnet50_imagenet2012"],
+ help='network and dataset name.')
+parser.add_argument("--device_id", type=int, default=0, help="Device id")
+parser.add_argument("--batch_size", type=int, default=1, help="batch size")
+parser.add_argument("--ckpt_file", type=str, required=True, help="Checkpoint file path.")
+parser.add_argument("--file_name", type=str, default="resnet", help="output file name.")
+parser.add_argument('--width', type=int, default=224, help='input width')
+parser.add_argument('--height', type=int, default=224, help='input height')
+parser.add_argument("--file_format", type=str, choices=["AIR", "ONNX", "MINDIR"], default="AIR", help="file format")
+parser.add_argument("--device_target", type=str, default="Ascend",
+ choices=["Ascend", "GPU", "CPU"], help="device target(default: Ascend)")
+args = parser.parse_args()
+
+context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
+if args.device_target == "Ascend":
+ context.set_context(device_id=args.device_id)
+
+if __name__ == '__main__':
+
+ if args.network_dataset == 'resnet18_cifar10':
+ from src.config import config1 as config
+ from src.resnet import resnet18 as resnet
+ elif args.network_dataset == 'resnet18_imagenet2012':
+ from src.config import config2 as config
+ from src.resnet import resnet18 as resnet
+ elif args.network_dataset == 'resnet50_cifar10':
+ from src.config import config1 as config
+ from src.resnet import resnet50 as resnet
+ elif args.network_dataset == 'resnet50_imagenet2012':
+ from src.config import config2 as config
+ from src.resnet import resnet50 as resnet
+ elif args.network_dataset == 'resnet101_imagenet2012':
+ from src.config import config3 as config
+ from src.resnet import resnet101 as resnet
+ elif args.network_dataset == 'se-resnet50_imagenet2012':
+ from src.config import config4 as config
+ from src.resnet import se_resnet50 as resnet
+ else:
+ raise ValueError("network and dataset is not support.")
+
+ net = resnet(config.class_num)
+
+ assert args.ckpt_file is not None, "checkpoint_path is None."
+
+ param_dict = load_checkpoint(args.ckpt_file)
+ load_param_into_net(net, param_dict)
+
+ input_arr = Tensor(np.zeros([args.batch_size, 3, args.height, args.width], np.float32))
+ export(net, input_arr, file_name=args.file_name, file_format=args.file_format)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/gpu_resnet_benchmark.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/gpu_resnet_benchmark.py"
new file mode 100644
index 0000000000000000000000000000000000000000..10ef312ca00aae66dc044221022fa748a09aae9b
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/gpu_resnet_benchmark.py"
@@ -0,0 +1,290 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train resnet."""
+import argparse
+import ast
+import time
+import numpy as np
+from mindspore import context
+from mindspore import Tensor
+from mindspore.nn.optim.momentum import Momentum
+from mindspore.train.model import Model
+from mindspore.context import ParallelMode
+from mindspore.train.callback import Callback, ModelCheckpoint, CheckpointConfig
+from mindspore.train.loss_scale_manager import FixedLossScaleManager
+from mindspore.communication.management import init, get_rank, get_group_size
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from mindspore.common import set_seed
+import mindspore.nn as nn
+import mindspore.common.initializer as weight_init
+import mindspore.dataset as ds
+import mindspore.dataset.vision.c_transforms as C
+from src.resnet_gpu_benchmark import resnet50 as resnet
+from src.CrossEntropySmooth import CrossEntropySmooth
+from src.momentum import Momentum as MomentumWeightDecay
+
+parser = argparse.ArgumentParser(description='Image classification')
+parser.add_argument('--batch_size', type=str, default="256", help='Batch_size: default 256.')
+parser.add_argument('--epoch_size', type=str, default="2", help='Epoch_size: default 2')
+parser.add_argument('--print_per_steps', type=str, default="20", help='Print loss and time per steps: default 20')
+parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
+parser.add_argument('--save_ckpt', type=ast.literal_eval, default=False, help='Save ckpt or not: default False')
+parser.add_argument('--eval', type=ast.literal_eval, default=False, help='Eval ckpt : default False')
+parser.add_argument('--dataset_path', type=str, default=None, help='Imagenet dataset path')
+parser.add_argument('--ckpt_path', type=str, default="./", help='The path to save ckpt if save_ckpt is True;\
+ Or the ckpt model file when eval is True')
+parser.add_argument('--mode', type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"], help='Execute mode')
+parser.add_argument('--dtype', type=str, choices=["fp32", "fp16", "FP16", "FP32"], default="fp16", \
+ help='Compute data type fp32 or fp16: default fp16')
+args_opt = parser.parse_args()
+
+set_seed(1)
+
+
+class MyTimeMonitor(Callback):
+ def __init__(self, batch_size, sink_size, dataset_size, mode):
+ super(MyTimeMonitor, self).__init__()
+ self.batch_size = batch_size
+ self.size = sink_size
+ self.data_size = dataset_size
+ self.mode = mode
+
+ def step_begin(self, run_context):
+ self.step_time = time.time()
+
+ def step_end(self, run_context):
+ cb_params = run_context.original_args()
+ loss = cb_params.net_outputs
+
+ if isinstance(loss, (tuple, list)):
+ if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
+ loss = loss[0]
+
+ if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
+ loss = np.mean(loss.asnumpy())
+
+ cur_epoch_num = int(cb_params.cur_epoch_num / (self.data_size / self.size) +1)
+ cur_step_in_epoch = int(self.size * (cb_params.cur_epoch_num % (self.data_size / self.size)))
+ total_epochs = int((cb_params.epoch_num - 1) / (self.data_size / self.size) + 1)
+ if self.mode == context.PYNATIVE_MODE:
+ cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
+ cur_epoch_num = cb_params.cur_epoch_num
+ total_epochs = cb_params.epoch_num
+
+ if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
+ raise ValueError("epoch: {} step: {}. Invalid loss, terminating training.".format(
+ cur_epoch_num, cur_step_in_epoch))
+ step_mseconds = (time.time() - self.step_time) * 1000
+ fps = self.batch_size / step_mseconds * 1000 * self.size
+ print("epoch: [%s/%s] step: [%s/%s], loss is %s" % (cur_epoch_num, total_epochs,\
+ cur_step_in_epoch, self.data_size, loss),\
+ "Epoch time: {:5.3f} ms, fps: {:d} img/sec.".format(step_mseconds, int(fps)), flush=True)
+
+
+def create_dataset(dataset_path, do_train, repeat_num=1, batch_size=32, target="GPU", dtype="fp16",
+ device_num=1):
+ if args_opt.mode == "GRAPH":
+ ds_num_parallel_worker = 4
+ map_num_parallel_worker = 8
+ batch_num_parallel_worker = None
+ else:
+ ds_num_parallel_worker = 2
+ map_num_parallel_worker = 3
+ batch_num_parallel_worker = 2
+ ds.config.set_numa_enable(True)
+ if device_num == 1:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=ds_num_parallel_worker, shuffle=True)
+ else:
+ data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=ds_num_parallel_worker, shuffle=True,
+ num_shards=device_num, shard_id=get_rank())
+ image_size = 224
+ mean = [0.485 * 255, 0.456 * 255, 0.406 * 255]
+ std = [0.229 * 255, 0.224 * 255, 0.225 * 255]
+
+ # define map operations
+ normalize_op = C.Normalize(mean=mean, std=std)
+ if dtype == "fp16":
+ if args_opt.eval:
+ x_dtype = "float32"
+ else:
+ x_dtype = "float16"
+ normalize_op = C.NormalizePad(mean=mean, std=std, dtype=x_dtype)
+ if do_train:
+ trans = [
+ C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)),
+ C.RandomHorizontalFlip(prob=0.5),
+ normalize_op,
+ ]
+ else:
+ trans = [
+ C.Decode(),
+ C.Resize(256),
+ C.CenterCrop(image_size),
+ normalize_op,
+ ]
+ if dtype == "fp32":
+ trans.append(C.HWC2CHW())
+ data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=map_num_parallel_worker)
+ # apply batch operations
+ data_set = data_set.batch(batch_size, drop_remainder=True, num_parallel_workers=batch_num_parallel_worker)
+ # apply dataset repeat operation
+ if repeat_num > 1:
+ data_set = data_set.repeat(repeat_num)
+
+ return data_set
+
+
+def get_liner_lr(lr_init, lr_end, lr_max, warmup_epochs, total_epochs, steps_per_epoch):
+ lr_each_step = []
+ total_steps = steps_per_epoch * total_epochs
+ warmup_steps = steps_per_epoch * warmup_epochs
+
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr_ = lr_init + (lr_max - lr_init) * i / warmup_steps
+ else:
+ lr_ = lr_max - (lr_max - lr_end) * (i - warmup_steps) / (total_steps - warmup_steps)
+ lr_each_step.append(lr_)
+ lr_each_step = np.array(lr_each_step).astype(np.float32)
+ return lr_each_step
+
+
+def train():
+ # set args
+ dev = "GPU"
+ epoch_size = int(args_opt.epoch_size)
+ total_batch = int(args_opt.batch_size)
+ print_per_steps = int(args_opt.print_per_steps)
+ compute_type = str(args_opt.dtype).lower()
+ ckpt_save_dir = str(args_opt.ckpt_path)
+ save_ckpt = bool(args_opt.save_ckpt)
+ device_num = 1
+ # init context
+ if args_opt.mode == "GRAPH":
+ mode = context.GRAPH_MODE
+ all_reduce_fusion_config = [85, 160]
+ else:
+ mode = context.PYNATIVE_MODE
+ all_reduce_fusion_config = [30, 90, 160]
+ context.set_context(mode=mode, device_target=dev, save_graphs=False)
+ if args_opt.run_distribute:
+ init()
+ device_num = get_group_size()
+ context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+ gradients_mean=True, all_reduce_fusion_config=all_reduce_fusion_config)
+ ckpt_save_dir = ckpt_save_dir + "ckpt_" + str(get_rank()) + "/"
+
+ # create dataset
+ dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, repeat_num=1,
+ batch_size=total_batch, target=dev, dtype=compute_type, device_num=device_num)
+ step_size = dataset.get_dataset_size()
+ if (print_per_steps > step_size or print_per_steps < 1):
+ print("Arg: print_per_steps should lessequal to dataset_size ", step_size)
+ print("Change to default: 20")
+ print_per_steps = 20
+ # define net
+ net = resnet(class_num=1001, dtype=compute_type)
+
+ # init weight
+ for _, cell in net.cells_and_names():
+ if isinstance(cell, nn.Conv2d):
+ cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
+ cell.weight.shape,
+ cell.weight.dtype))
+ if isinstance(cell, nn.Dense):
+ cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),
+ cell.weight.shape,
+ cell.weight.dtype))
+
+ # init lr
+ lr = get_liner_lr(lr_init=0, lr_end=0, lr_max=0.8, warmup_epochs=0, total_epochs=epoch_size,
+ steps_per_epoch=step_size)
+ lr = Tensor(lr)
+
+ # define opt
+ decayed_params = []
+ no_decayed_params = []
+ for param in net.trainable_params():
+ if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
+ decayed_params.append(param)
+ else:
+ no_decayed_params.append(param)
+
+ # define loss, model
+ loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=0.1, num_classes=1001)
+ opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4)
+ loss_scale = FixedLossScaleManager(1024, drop_overflow_update=False)
+ model = Model(net, loss_fn=loss, optimizer=opt, metrics={'acc'})
+ # Mixed precision
+ if compute_type == "fp16":
+ if mode == context.PYNATIVE_MODE:
+ opt = MomentumWeightDecay(filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4, 1024)
+ else:
+ opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9, 1e-4, 1024)
+ model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics={'acc'},
+ amp_level="O2", keep_batchnorm_fp32=False)
+ # define callbacks
+ if mode == context.PYNATIVE_MODE:
+ print_per_steps = 1
+ time_cb = MyTimeMonitor(total_batch, print_per_steps, step_size, mode)
+ cb = [time_cb]
+ if save_ckpt:
+ config_ck = CheckpointConfig(save_checkpoint_steps=5 * step_size, keep_checkpoint_max=5)
+ ckpt_cb = ModelCheckpoint(prefix="resnet_benchmark", directory=ckpt_save_dir, config=config_ck)
+ cb += [ckpt_cb]
+ # train model
+ print("========START RESNET50 GPU BENCHMARK========")
+ if mode == context.GRAPH_MODE:
+ model.train(int(epoch_size * step_size / print_per_steps), dataset, callbacks=cb, sink_size=print_per_steps)
+ else:
+ model.train(epoch_size, dataset, callbacks=cb)
+
+
+def eval_():
+ # set args
+ dev = "GPU"
+ compute_type = str(args_opt.dtype).lower()
+ ckpt_dir = str(args_opt.ckpt_path)
+ total_batch = int(args_opt.batch_size)
+ # init context
+ if args_opt.mode == "GRAPH":
+ mode = context.GRAPH_MODE
+ else:
+ mode = context.PYNATIVE_MODE
+ context.set_context(mode=mode, device_target=dev, save_graphs=False)
+ # create dataset
+ dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, repeat_num=1,
+ batch_size=total_batch, target=dev, dtype=compute_type)
+ # define net
+ net = resnet(class_num=1001, dtype=compute_type)
+ # load checkpoint
+ param_dict = load_checkpoint(ckpt_dir)
+ load_param_into_net(net, param_dict)
+ net.set_train(False)
+ # define loss, model
+ loss = CrossEntropySmooth(sparse=True, reduction='mean', smooth_factor=0.1, num_classes=1001)
+ # define model
+ model = Model(net, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
+ # eval model
+ print("========START EVAL RESNET50 ON GPU ========")
+ res = model.eval(dataset)
+ print("result:", res, "ckpt=", ckpt_dir)
+
+
+if __name__ == '__main__':
+ if not args_opt.eval:
+ train()
+ else:
+ eval_()
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/lr_generator.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/lr_generator.py"
new file mode 100644
index 0000000000000000000000000000000000000000..7768b10fc2fa59a5a48a873de9c341ed9b386ed8
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/lr_generator.py"
@@ -0,0 +1,240 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""learning rate generator"""
+import math
+import numpy as np
+
+
+def _generate_steps_lr(lr_init, lr_max, total_steps, warmup_steps):
+ """
+ Applies three steps decay to generate learning rate array.
+
+ Args:
+ lr_init(float): init learning rate.
+ lr_max(float): max learning rate.
+ total_steps(int): all steps in training.
+ warmup_steps(int): all steps in warmup epochs.
+
+ Returns:
+ np.array, learning rate array.
+ """
+ decay_epoch_index = [0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps]
+ lr_each_step = []
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr = lr_init + (lr_max - lr_init) * i / warmup_steps
+ else:
+ if i < decay_epoch_index[0]:
+ lr = lr_max
+ elif i < decay_epoch_index[1]:
+ lr = lr_max * 0.1
+ elif i < decay_epoch_index[2]:
+ lr = lr_max * 0.01
+ else:
+ lr = lr_max * 0.001
+ lr_each_step.append(lr)
+ return lr_each_step
+
+
+def _generate_poly_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps):
+ """
+ Applies polynomial decay to generate learning rate array.
+
+ Args:
+ lr_init(float): init learning rate.
+ lr_end(float): end learning rate
+ lr_max(float): max learning rate.
+ total_steps(int): all steps in training.
+ warmup_steps(int): all steps in warmup epochs.
+
+ Returns:
+ np.array, learning rate array.
+ """
+ lr_each_step = []
+ if warmup_steps != 0:
+ inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps)
+ else:
+ inc_each_step = 0
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr = float(lr_init) + inc_each_step * float(i)
+ else:
+ base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps)))
+ lr = float(lr_max) * base * base
+ if lr < 0.0:
+ lr = 0.0
+ lr_each_step.append(lr)
+ return lr_each_step
+
+
+def _generate_cosine_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps):
+ """
+ Applies cosine decay to generate learning rate array.
+
+ Args:
+ lr_init(float): init learning rate.
+ lr_end(float): end learning rate
+ lr_max(float): max learning rate.
+ total_steps(int): all steps in training.
+ warmup_steps(int): all steps in warmup epochs.
+
+ Returns:
+ np.array, learning rate array.
+ """
+ decay_steps = total_steps - warmup_steps
+ lr_each_step = []
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr_inc = (float(lr_max) - float(lr_init)) / float(warmup_steps)
+ lr = float(lr_init) + lr_inc * (i + 1)
+ else:
+ linear_decay = (total_steps - i) / decay_steps
+ cosine_decay = 0.5 * (1 + math.cos(math.pi * 2 * 0.47 * i / decay_steps))
+ decayed = linear_decay * cosine_decay + 0.00001
+ lr = lr_max * decayed
+ lr_each_step.append(lr)
+ return lr_each_step
+
+
+def _generate_liner_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps):
+ """
+ Applies liner decay to generate learning rate array.
+
+ Args:
+ lr_init(float): init learning rate.
+ lr_end(float): end learning rate
+ lr_max(float): max learning rate.
+ total_steps(int): all steps in training.
+ warmup_steps(int): all steps in warmup epochs.
+
+ Returns:
+ np.array, learning rate array.
+ """
+ lr_each_step = []
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr = lr_init + (lr_max - lr_init) * i / warmup_steps
+ else:
+ lr = lr_max - (lr_max - lr_end) * (i - warmup_steps) / (total_steps - warmup_steps)
+ lr_each_step.append(lr)
+ return lr_each_step
+
+
+
+def get_lr(lr_init, lr_end, lr_max, warmup_epochs, total_epochs, steps_per_epoch, lr_decay_mode):
+ """
+ generate learning rate array
+
+ Args:
+ lr_init(float): init learning rate
+ lr_end(float): end learning rate
+ lr_max(float): max learning rate
+ warmup_epochs(int): number of warmup epochs
+ total_epochs(int): total epoch of training
+ steps_per_epoch(int): steps of one epoch
+ lr_decay_mode(string): learning rate decay mode, including steps, poly, cosine or liner(default)
+
+ Returns:
+ np.array, learning rate array
+ """
+ lr_each_step = []
+ total_steps = steps_per_epoch * total_epochs
+ warmup_steps = steps_per_epoch * warmup_epochs
+
+ if lr_decay_mode == 'steps':
+ lr_each_step = _generate_steps_lr(lr_init, lr_max, total_steps, warmup_steps)
+ elif lr_decay_mode == 'poly':
+ lr_each_step = _generate_poly_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps)
+ elif lr_decay_mode == 'cosine':
+ lr_each_step = _generate_cosine_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps)
+ else:
+ lr_each_step = _generate_liner_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps)
+
+ lr_each_step = np.array(lr_each_step).astype(np.float32)
+ return lr_each_step
+
+
+def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr):
+ lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps)
+ lr = float(init_lr) + lr_inc * current_step
+ return lr
+
+
+def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch=120, global_step=0):
+ """
+ generate learning rate array with cosine
+
+ Args:
+ lr(float): base learning rate
+ steps_per_epoch(int): steps size of one epoch
+ warmup_epochs(int): number of warmup epochs
+ max_epoch(int): total epochs of training
+ global_step(int): the current start index of lr array
+ Returns:
+ np.array, learning rate array
+ """
+ base_lr = lr
+ warmup_init_lr = 0
+ total_steps = int(max_epoch * steps_per_epoch)
+ warmup_steps = int(warmup_epochs * steps_per_epoch)
+ decay_steps = total_steps - warmup_steps
+
+ lr_each_step = []
+ for i in range(total_steps):
+ if i < warmup_steps:
+ lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr)
+ else:
+ linear_decay = (total_steps - i) / decay_steps
+ cosine_decay = 0.5 * (1 + math.cos(math.pi * 2 * 0.47 * i / decay_steps))
+ decayed = linear_decay * cosine_decay + 0.00001
+ lr = base_lr * decayed
+ lr_each_step.append(lr)
+
+ lr_each_step = np.array(lr_each_step).astype(np.float32)
+ learning_rate = lr_each_step[global_step:]
+ return learning_rate
+
+
+def get_thor_lr(global_step, lr_init, decay, total_epochs, steps_per_epoch, decay_epochs=100):
+ """get_model_lr"""
+ lr_each_step = []
+ total_steps = steps_per_epoch * total_epochs
+ for i in range(total_steps):
+ epoch = (i + 1) / steps_per_epoch
+ base = (1.0 - float(epoch) / total_epochs) ** decay
+ lr_local = lr_init * base
+ if epoch >= decay_epochs:
+ lr_local = lr_local * 0.5
+ if epoch >= decay_epochs + 1:
+ lr_local = lr_local * 0.5
+ lr_each_step.append(lr_local)
+ current_step = global_step
+ lr_each_step = np.array(lr_each_step).astype(np.float32)
+ learning_rate = lr_each_step[current_step:]
+ return learning_rate
+
+
+def get_thor_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch):
+ """get_model_damping"""
+ damping_each_step = []
+ total_steps = steps_per_epoch * total_epochs
+ for step in range(total_steps):
+ epoch = (step + 1) / steps_per_epoch
+ damping_here = damping_init * (decay_rate ** (epoch / 10))
+ damping_each_step.append(damping_here)
+ current_step = global_step
+ damping_each_step = np.array(damping_each_step).astype(np.float32)
+ damping_now = damping_each_step[current_step:]
+ return damping_now
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/metric.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/metric.py"
new file mode 100644
index 0000000000000000000000000000000000000000..d9d574f3aaf9533bb28445844e739f2a935356bb
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/metric.py"
@@ -0,0 +1,132 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""evaluation metric."""
+
+from mindspore.communication.management import GlobalComm
+from mindspore.ops import operations as P
+import mindspore.nn as nn
+import mindspore.common.dtype as mstype
+
+
+class ClassifyCorrectCell(nn.Cell):
+ r"""
+ Cell that returns correct count of the prediction in classification network.
+ This Cell accepts a network as arguments.
+ It returns orrect count of the prediction to calculate the metrics.
+
+ Args:
+ network (Cell): The network Cell.
+
+ Inputs:
+ - **data** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
+ - **label** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
+
+ Outputs:
+ Tuple, containing a scalar correct count of the prediction
+
+ Examples:
+ >>> # For a defined network Net without loss function
+ >>> net = Net()
+ >>> eval_net = nn.ClassifyCorrectCell(net)
+ """
+
+ def __init__(self, network):
+ super(ClassifyCorrectCell, self).__init__(auto_prefix=False)
+ self._network = network
+ self.argmax = P.Argmax()
+ self.equal = P.Equal()
+ self.cast = P.Cast()
+ self.reduce_sum = P.ReduceSum()
+ self.allreduce = P.AllReduce(P.ReduceOp.SUM, GlobalComm.WORLD_COMM_GROUP)
+
+ def construct(self, data, label):
+ outputs = self._network(data)
+ y_pred = self.argmax(outputs)
+ y_pred = self.cast(y_pred, mstype.int32)
+ y_correct = self.equal(y_pred, label)
+ y_correct = self.cast(y_correct, mstype.float32)
+ y_correct = self.reduce_sum(y_correct)
+ total_correct = self.allreduce(y_correct)
+ return (total_correct,)
+
+
+class DistAccuracy(nn.Metric):
+ r"""
+ Calculates the accuracy for classification data in distributed mode.
+ The accuracy class creates two local variables, correct number and total number that are used to compute the
+ frequency with which predictions matches labels. This frequency is ultimately returned as the accuracy: an
+ idempotent operation that simply divides correct number by total number.
+
+ .. math::
+
+ \text{accuracy} =\frac{\text{true_positive} + \text{true_negative}}
+
+ {\text{true_positive} + \text{true_negative} + \text{false_positive} + \text{false_negative}}
+
+ Args:
+ eval_type (str): Metric to calculate the accuracy over a dataset, for classification (single-label).
+
+ Examples:
+ >>> y_correct = Tensor(np.array([20]))
+ >>> metric = nn.DistAccuracy(batch_size=3, device_num=8)
+ >>> metric.clear()
+ >>> metric.update(y_correct)
+ >>> accuracy = metric.eval()
+ """
+
+ def __init__(self, batch_size, device_num):
+ super(DistAccuracy, self).__init__()
+ self.clear()
+ self.batch_size = batch_size
+ self.device_num = device_num
+
+ def clear(self):
+ """Clears the internal evaluation result."""
+ self._correct_num = 0
+ self._total_num = 0
+
+ def update(self, *inputs):
+ """
+ Updates the internal evaluation result :math:`y_{pred}` and :math:`y`.
+
+ Args:
+ inputs: Input `y_correct`. `y_correct` is a `scalar Tensor`.
+ `y_correct` is the right prediction count that gathered from all devices
+ it's a scalar in float type
+
+ Raises:
+ ValueError: If the number of the input is not 1.
+ """
+
+ if len(inputs) != 1:
+ raise ValueError('Distribute accuracy needs 1 input (y_correct), but got {}'.format(len(inputs)))
+ y_correct = self._convert_data(inputs[0])
+ self._correct_num += y_correct
+ self._total_num += self.batch_size * self.device_num
+
+ def eval(self):
+ """
+ Computes the accuracy.
+
+ Returns:
+ Float, the computed result.
+
+ Raises:
+ RuntimeError: If the sample size is 0.
+ """
+
+ if self._total_num == 0:
+ raise RuntimeError('Accuracy can not be calculated, because the number of samples is 0.')
+ return self._correct_num / self._total_num
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/mindspore_hub_conf.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/mindspore_hub_conf.py"
new file mode 100644
index 0000000000000000000000000000000000000000..3db760e0415b33ef768328f89c1e8e258e18fd71
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/mindspore_hub_conf.py"
@@ -0,0 +1,25 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""hub config."""
+from src.resnet import resnet50, resnet101, se_resnet50
+
+def create_network(name, *args, **kwargs):
+ if name == 'resnet50':
+ return resnet50(*args, **kwargs)
+ if name == 'resnet101':
+ return resnet101(*args, **kwargs)
+ if name == 'se_resnet50':
+ return se_resnet50(*args, **kwargs)
+ raise NotImplementedError(f"{name} is not implemented in the repo")
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/momentum.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/momentum.py"
new file mode 100644
index 0000000000000000000000000000000000000000..5ba140a7f95e4f9cbfc00b92e986ff6040ac3e29
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/momentum.py"
@@ -0,0 +1,152 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""momentum"""
+from mindspore.ops import functional as F, composite as C, operations as P
+from mindspore.common.parameter import Parameter
+from mindspore.common.tensor import Tensor
+import mindspore.common.dtype as mstype
+from mindspore._checkparam import Validator
+from mindspore.nn.optim.optimizer import Optimizer
+
+_momentum_opt = C.MultitypeFuncGraph("momentum_opt")
+
+
+@_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
+def _tensor_run_opt_ext(opt, weight_decay, scale, momentum, learning_rate, gradient, weight, moment):
+ """Apply momentum optimizer to the weight parameter using Tensor."""
+ success = F.depend(True, opt(weight_decay, scale, weight, moment, learning_rate, gradient, momentum))
+ return success
+
+
+class Momentum(Optimizer):
+ r"""
+ Implements the Momentum algorithm.
+
+ Refer to the paper on the importance of initialization and momentum in deep learning for more details.
+
+ .. math::
+ v_{t} = v_{t-1} \ast u + gradients
+
+ If use_nesterov is True:
+
+ .. math::
+ p_{t} = p_{t-1} - (grad \ast lr + v_{t} \ast u \ast lr)
+
+ If use_nesterov is False:
+
+ .. math::
+ p_{t} = p_{t-1} - lr \ast v_{t}
+
+ Here: where grad, lr, p, v and u denote the gradients, learning_rate, params, moments, and momentum respectively.
+
+ Note:
+ When separating parameter groups, the weight decay in each group will be applied on the parameters if the
+ weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied
+ on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
+
+ To improve parameter groups performance, the customized order of parameters can be supported.
+
+ Args:
+ params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated,
+ the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params",
+ "lr", "weight_decay" and "order_params" are the keys can be parsed.
+
+ - params: Required. The value must be a list of `Parameter`.
+
+ - lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
+ If not, the `learning_rate` in the API will be used.
+
+ - weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
+ will be used. If not, the `weight_decay` in the API will be used.
+
+ - order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
+ the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
+ in the value of 'order_params' must be in one of group parameters.
+
+ learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate.
+ When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
+ the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
+ use dynamic learning rate, the i-th learning rate will be calculated during the process of training
+ according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
+ dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
+ equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
+ momentum (float): Hyperparameter of type float, means momentum for the moving average.
+ It must be at least 0.0.
+ weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0.
+ loss_scale (int, float): A floating point value for the loss scale. It must be greater than 0.0. Default: 1.0.
+ use_nesterov (bool): Enable Nesterov momentum. Default: False.
+
+ Inputs:
+ - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
+
+ Outputs:
+ tuple[bool], all elements are True.
+
+ Raises:
+ ValueError: If the momentum is less than 0.0.
+ TypeError: If the momentum is not a float or use_nesterov is not a bool.
+
+ Supported Platforms:
+ ``GPU``
+
+ Examples:
+ >>> net = Net()
+ >>> #1) All parameters use the same learning rate and weight decay
+ >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
+ >>>
+ >>> #2) Use parameter groups and set different values
+ >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
+ >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
+ >>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
+ ... {'params': no_conv_params, 'lr': 0.01},
+ ... {'order_params': net.trainable_params()}]
+ >>> optim = Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
+ >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
+ >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
+ >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
+ >>>
+ >>> loss = nn.SoftmaxCrossEntropyWithLogits()
+ >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
+ """
+ def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, use_nesterov=False):
+ super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale)
+ Validator.check_value_type("momentum", momentum, [float], self.cls_name)
+ if isinstance(momentum, float) and momentum < 0.0:
+ raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
+ self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
+ self.params = self.parameters
+ self.use_nesterov = Validator.check_bool(use_nesterov)
+ self.moments = self.params.clone(prefix="moments", init='zeros')
+ self.hyper_map = C.HyperMap()
+ # Use FusedWeightScaleApplyMomentum to avoid extra kernel launch.
+ self.opt = P.FusedWeightScaleApplyMomentum()
+
+ def construct(self, gradients):
+ params = self.params
+ moments = self.moments
+ weight_decay = Tensor(0.0, mstype.float32)
+ scale = Tensor(1.0, mstype.float32)
+ if self.exec_weight_decay:
+ weight_decay = self.weight_decay_tensor
+ if self.need_scale:
+ scale = self.reciprocal_scale
+ lr = self.get_lr()
+ if self.is_group_lr:
+ success = self.hyper_map(F.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum),
+ lr, gradients, params, moments)
+ else:
+ success = self.hyper_map(F.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum, lr),
+ gradients, params, moments)
+ return success
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/postprocess.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/postprocess.py"
new file mode 100644
index 0000000000000000000000000000000000000000..a2e14686398f14930a5e278a97598c272ab8afc9
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/postprocess.py"
@@ -0,0 +1,51 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# less required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""post process for 310 inference"""
+import os
+import json
+import argparse
+import numpy as np
+from src.config import config2 as config
+
+batch_size = 1
+parser = argparse.ArgumentParser(description="resnet inference")
+parser.add_argument("--result_path", type=str, required=True, help="result files path.")
+parser.add_argument("--label_path", type=str, required=True, help="image file path.")
+args = parser.parse_args()
+
+
+def get_result(result_path, label_path):
+ files = os.listdir(result_path)
+ with open(label_path, "r") as label:
+ labels = json.load(label)
+
+ top1 = 0
+ top5 = 0
+ total_data = len(files)
+ for file in files:
+ img_ids_name = file.split('_0.')[0]
+ data_path = os.path.join(result_path, img_ids_name + "_0.bin")
+ result = np.fromfile(data_path, dtype=np.float32).reshape(batch_size, config.class_num)
+ for batch in range(batch_size):
+ predict = np.argsort(-result[batch], axis=-1)
+ if labels[img_ids_name+".JPEG"] == predict[0]:
+ top1 += 1
+ if labels[img_ids_name+".JPEG"] in predict[:5]:
+ top5 += 1
+ print(f"Total data: {total_data}, top1 accuracy: {top1/total_data}, top5 accuracy: {top5/total_data}.")
+
+
+if __name__ == '__main__':
+ get_result(args.result_path, args.label_path)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet.py"
new file mode 100644
index 0000000000000000000000000000000000000000..08bc5dba1fb3ba28253c574eb5b03f9dc8cc7f59
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet.py"
@@ -0,0 +1,573 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""ResNet."""
+import math
+import numpy as np
+import mindspore.nn as nn
+import mindspore.common.dtype as mstype
+from mindspore.ops import operations as P
+from mindspore.ops import functional as F
+from mindspore.common.tensor import Tensor
+from scipy.stats import truncnorm
+
+
+def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
+ fan_in = in_channel * kernel_size * kernel_size
+ scale = 1.0
+ scale /= max(1., fan_in)
+ stddev = (scale ** 0.5) / .87962566103423978
+ mu, sigma = 0, stddev
+ weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
+ weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size))
+ return Tensor(weight, dtype=mstype.float32)
+
+
+def _weight_variable(shape, factor=0.01):
+ init_value = np.random.randn(*shape).astype(np.float32) * factor
+ return Tensor(init_value)
+
+
+def calculate_gain(nonlinearity, param=None):
+ """calculate_gain"""
+ linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
+ res = 0
+ if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
+ res = 1
+ elif nonlinearity == 'tanh':
+ res = 5.0 / 3
+ elif nonlinearity == 'relu':
+ res = math.sqrt(2.0)
+ elif nonlinearity == 'leaky_relu':
+ if param is None:
+ negative_slope = 0.01
+ elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
+ # True/False are instances of int, hence check above
+ negative_slope = param
+ else:
+ raise ValueError("negative_slope {} not a valid number".format(param))
+ res = math.sqrt(2.0 / (1 + negative_slope ** 2))
+ else:
+ raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
+ return res
+
+
+def _calculate_fan_in_and_fan_out(tensor):
+ """_calculate_fan_in_and_fan_out"""
+ dimensions = len(tensor)
+ if dimensions < 2:
+ raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
+ if dimensions == 2: # Linear
+ fan_in = tensor[1]
+ fan_out = tensor[0]
+ else:
+ num_input_fmaps = tensor[1]
+ num_output_fmaps = tensor[0]
+ receptive_field_size = 1
+ if dimensions > 2:
+ receptive_field_size = tensor[2] * tensor[3]
+ fan_in = num_input_fmaps * receptive_field_size
+ fan_out = num_output_fmaps * receptive_field_size
+ return fan_in, fan_out
+
+
+def _calculate_correct_fan(tensor, mode):
+ mode = mode.lower()
+ valid_modes = ['fan_in', 'fan_out']
+ if mode not in valid_modes:
+ raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
+ fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
+ return fan_in if mode == 'fan_in' else fan_out
+
+
+def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
+ fan = _calculate_correct_fan(inputs_shape, mode)
+ gain = calculate_gain(nonlinearity, a)
+ std = gain / math.sqrt(fan)
+ return np.random.normal(0, std, size=inputs_shape).astype(np.float32)
+
+
+def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'):
+ fan = _calculate_correct_fan(inputs_shape, mode)
+ gain = calculate_gain(nonlinearity, a)
+ std = gain / math.sqrt(fan)
+ bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
+ return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)
+
+
+def _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+ if use_se:
+ weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3)
+ else:
+ weight_shape = (out_channel, in_channel, 3, 3)
+ weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+ if res_base:
+ return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
+ padding=1, pad_mode='pad', weight_init=weight)
+ return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
+ padding=0, pad_mode='same', weight_init=weight)
+
+
+def _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+ if use_se:
+ weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1)
+ else:
+ weight_shape = (out_channel, in_channel, 1, 1)
+ weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+ if res_base:
+ return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
+ padding=0, pad_mode='pad', weight_init=weight)
+ return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
+ padding=0, pad_mode='same', weight_init=weight)
+
+
+def _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False):
+ if use_se:
+ weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7)
+ else:
+ weight_shape = (out_channel, in_channel, 7, 7)
+ weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
+ if res_base:
+ return nn.Conv2d(in_channel, out_channel,
+ kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight)
+ return nn.Conv2d(in_channel, out_channel,
+ kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight)
+
+
+def _bn(channel, res_base=False):
+ if res_base:
+ return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1,
+ gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
+ return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
+ gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
+
+
+def _bn_last(channel):
+ return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
+ gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1)
+
+
+def _fc(in_channel, out_channel, use_se=False):
+ if use_se:
+ weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel)
+ weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32)
+ else:
+ weight_shape = (out_channel, in_channel)
+ weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))
+ return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
+
+
+class ResidualBlock(nn.Cell):
+ """
+ ResNet V1 residual block definition.
+
+ Args:
+ in_channel (int): Input channel.
+ out_channel (int): Output channel.
+ stride (int): Stride size for the first convolutional layer. Default: 1.
+ use_se (bool): Enable SE-ResNet50 net. Default: False.
+ se_block(bool): Use se block in SE-ResNet50 net. Default: False.
+
+ Returns:
+ Tensor, output tensor.
+
+ Examples:
+ >>> ResidualBlock(3, 256, stride=2)
+ """
+ expansion = 4
+
+ def __init__(self,
+ in_channel,
+ out_channel,
+ stride=1,
+ use_se=False, se_block=False):
+ super(ResidualBlock, self).__init__()
+ self.stride = stride
+ self.use_se = use_se
+ self.se_block = se_block
+ channel = out_channel // self.expansion
+ self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se)
+ self.bn1 = _bn(channel)
+ if self.use_se and self.stride != 1:
+ self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel),
+ nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')])
+ else:
+ self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se)
+ self.bn2 = _bn(channel)
+
+ self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se)
+ self.bn3 = _bn_last(out_channel)
+ if self.se_block:
+ self.se_global_pool = P.ReduceMean(keep_dims=False)
+ self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se)
+ self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se)
+ self.se_sigmoid = nn.Sigmoid()
+ self.se_mul = P.Mul()
+ self.relu = nn.ReLU()
+
+ self.down_sample = False
+
+ if stride != 1 or in_channel != out_channel:
+ self.down_sample = True
+ self.down_sample_layer = None
+
+ if self.down_sample:
+ if self.use_se:
+ if stride == 1:
+ self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel,
+ stride, use_se=self.use_se), _bn(out_channel)])
+ else:
+ self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'),
+ _conv1x1(in_channel, out_channel, 1,
+ use_se=self.use_se), _bn(out_channel)])
+ else:
+ self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
+ use_se=self.use_se), _bn(out_channel)])
+
+ def construct(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+ if self.use_se and self.stride != 1:
+ out = self.e2(out)
+ else:
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+ out = self.conv3(out)
+ out = self.bn3(out)
+ if self.se_block:
+ out_se = out
+ out = self.se_global_pool(out, (2, 3))
+ out = self.se_dense_0(out)
+ out = self.relu(out)
+ out = self.se_dense_1(out)
+ out = self.se_sigmoid(out)
+ out = F.reshape(out, F.shape(out) + (1, 1))
+ out = self.se_mul(out, out_se)
+
+ if self.down_sample:
+ identity = self.down_sample_layer(identity)
+
+ out = out + identity
+ out = self.relu(out)
+
+ return out
+
+
+class ResidualBlockBase(nn.Cell):
+ """
+ ResNet V1 residual block definition.
+
+ Args:
+ in_channel (int): Input channel.
+ out_channel (int): Output channel.
+ stride (int): Stride size for the first convolutional layer. Default: 1.
+ use_se (bool): Enable SE-ResNet50 net. Default: False.
+ se_block(bool): Use se block in SE-ResNet50 net. Default: False.
+ res_base (bool): Enable parameter setting of resnet18. Default: True.
+
+ Returns:
+ Tensor, output tensor.
+
+ Examples:
+ >>> ResidualBlockBase(3, 256, stride=2)
+ """
+
+ def __init__(self,
+ in_channel,
+ out_channel,
+ stride=1,
+ use_se=False,
+ se_block=False,
+ res_base=True):
+ super(ResidualBlockBase, self).__init__()
+ self.res_base = res_base
+ self.conv1 = _conv3x3(in_channel, out_channel, stride=stride, res_base=self.res_base)
+ self.bn1d = _bn(out_channel)
+ self.conv2 = _conv3x3(out_channel, out_channel, stride=1, res_base=self.res_base)
+ self.bn2d = _bn(out_channel)
+ self.relu = nn.ReLU()
+
+ self.down_sample = False
+ if stride != 1 or in_channel != out_channel:
+ self.down_sample = True
+
+ self.down_sample_layer = None
+ if self.down_sample:
+ self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
+ use_se=use_se, res_base=self.res_base),
+ _bn(out_channel, res_base)])
+
+ def construct(self, x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.bn1d(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2d(out)
+
+ if self.down_sample:
+ identity = self.down_sample_layer(identity)
+
+ out = out + identity
+ out = self.relu(out)
+
+ return out
+
+
+class ResNet(nn.Cell):
+ """
+ ResNet architecture.
+
+ Args:
+ block (Cell): Block for network.
+ layer_nums (list): Numbers of block in different layers.
+ in_channels (list): Input channel in each layer.
+ out_channels (list): Output channel in each layer.
+ strides (list): Stride size in each layer.
+ num_classes (int): The number of classes that the training images are belonging to.
+ use_se (bool): Enable SE-ResNet50 net. Default: False.
+ se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False.
+ res_base (bool): Enable parameter setting of resnet18. Default: False.
+
+ Returns:
+ Tensor, output tensor.
+
+ Examples:
+ >>> ResNet(ResidualBlock,
+ >>> [3, 4, 6, 3],
+ >>> [64, 256, 512, 1024],
+ >>> [256, 512, 1024, 2048],
+ >>> [1, 2, 2, 2],
+ >>> 10)
+ """
+
+ def __init__(self,
+ block,
+ layer_nums,
+ in_channels,
+ out_channels,
+ strides,
+ num_classes,
+ use_se=False,
+ res_base=False):
+ super(ResNet, self).__init__()
+
+ if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
+ raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
+ self.use_se = use_se
+ self.res_base = res_base
+ self.se_block = False
+ if self.use_se:
+ self.se_block = True
+
+ if self.use_se:
+ self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se)
+ self.bn1_0 = _bn(32)
+ self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se)
+ self.bn1_1 = _bn(32)
+ self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se)
+ else:
+ self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base)
+ self.bn1 = _bn(64, self.res_base)
+ self.relu = P.ReLU()
+
+ if self.res_base:
+ self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1)))
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid")
+ else:
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
+
+ self.layer1 = self._make_layer(block,
+ layer_nums[0],
+ in_channel=in_channels[0],
+ out_channel=out_channels[0],
+ stride=strides[0],
+ use_se=self.use_se)
+ self.layer2 = self._make_layer(block,
+ layer_nums[1],
+ in_channel=in_channels[1],
+ out_channel=out_channels[1],
+ stride=strides[1],
+ use_se=self.use_se)
+ self.layer3 = self._make_layer(block,
+ layer_nums[2],
+ in_channel=in_channels[2],
+ out_channel=out_channels[2],
+ stride=strides[2],
+ use_se=self.use_se,
+ se_block=self.se_block)
+ self.layer4 = self._make_layer(block,
+ layer_nums[3],
+ in_channel=in_channels[3],
+ out_channel=out_channels[3],
+ stride=strides[3],
+ use_se=self.use_se,
+ se_block=self.se_block)
+
+ self.mean = P.ReduceMean(keep_dims=True)
+ self.flatten = nn.Flatten()
+ self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se)
+
+ def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False):
+ """
+ Make stage network of ResNet.
+
+ Args:
+ block (Cell): Resnet block.
+ layer_num (int): Layer number.
+ in_channel (int): Input channel.
+ out_channel (int): Output channel.
+ stride (int): Stride size for the first convolutional layer.
+ se_block(bool): Use se block in SE-ResNet50 net. Default: False.
+ Returns:
+ SequentialCell, the output layer.
+
+ Examples:
+ >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
+ """
+ layers = []
+
+ resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se)
+ layers.append(resnet_block)
+ if se_block:
+ for _ in range(1, layer_num - 1):
+ resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
+ layers.append(resnet_block)
+ resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block)
+ layers.append(resnet_block)
+ else:
+ for _ in range(1, layer_num):
+ resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se)
+ layers.append(resnet_block)
+ return nn.SequentialCell(layers)
+
+ def construct(self, x):
+ if self.use_se:
+ x = self.conv1_0(x)
+ x = self.bn1_0(x)
+ x = self.relu(x)
+ x = self.conv1_1(x)
+ x = self.bn1_1(x)
+ x = self.relu(x)
+ x = self.conv1_2(x)
+ else:
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ if self.res_base:
+ x = self.pad(x)
+ c1 = self.maxpool(x)
+
+ c2 = self.layer1(c1)
+ c3 = self.layer2(c2)
+ c4 = self.layer3(c3)
+ c5 = self.layer4(c4)
+
+ out = self.mean(c5, (2, 3))
+ out = self.flatten(out)
+ out = self.end_point(out)
+
+ return out
+
+
+def resnet18(class_num=10):
+ """
+ Get ResNet18 neural network.
+
+ Args:
+ class_num (int): Class number.
+
+ Returns:
+ Cell, cell instance of ResNet18 neural network.
+
+ Examples:
+ >>> net = resnet18(10)
+ """
+ return ResNet(ResidualBlockBase,
+ [2, 2, 2, 2],
+ [64, 64, 128, 256],
+ [64, 128, 256, 512],
+ [1, 2, 2, 2],
+ class_num,
+ res_base=True)
+
+
+def resnet50(class_num=10):
+ """
+ Get ResNet50 neural network.
+
+ Args:
+ class_num (int): Class number.
+
+ Returns:
+ Cell, cell instance of ResNet50 neural network.
+
+ Examples:
+ >>> net = resnet50(10)
+ """
+ return ResNet(ResidualBlock,
+ [3, 4, 6, 3],
+ [64, 256, 512, 1024],
+ [256, 512, 1024, 2048],
+ [1, 2, 2, 2],
+ class_num)
+
+
+def se_resnet50(class_num=1001):
+ """
+ Get SE-ResNet50 neural network.
+
+ Args:
+ class_num (int): Class number.
+
+ Returns:
+ Cell, cell instance of SE-ResNet50 neural network.
+
+ Examples:
+ >>> net = se-resnet50(1001)
+ """
+ return ResNet(ResidualBlock,
+ [3, 4, 6, 3],
+ [64, 256, 512, 1024],
+ [256, 512, 1024, 2048],
+ [1, 2, 2, 2],
+ class_num,
+ use_se=True)
+
+
+def resnet101(class_num=1001):
+ """
+ Get ResNet101 neural network.
+
+ Args:
+ class_num (int): Class number.
+
+ Returns:
+ Cell, cell instance of ResNet101 neural network.
+
+ Examples:
+ >>> net = resnet101(1001)
+ """
+ return ResNet(ResidualBlock,
+ [3, 4, 23, 3],
+ [64, 256, 512, 1024],
+ [256, 512, 1024, 2048],
+ [1, 2, 2, 2],
+ class_num)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet_gpu_benchmark.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet_gpu_benchmark.py"
new file mode 100644
index 0000000000000000000000000000000000000000..65e65bfeed79fd16ddcc016e6a56c09fa23ef8f2
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/resnet_gpu_benchmark.py"
@@ -0,0 +1,274 @@
+# Copyright 2020 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""ResNet."""
+import numpy as np
+import mindspore.nn as nn
+import mindspore.common.dtype as mstype
+from mindspore.ops import operations as P
+from mindspore.common.tensor import Tensor
+from scipy.stats import truncnorm
+
+format_ = "NHWC"
+# tranpose shape to NCHW, default init is NHWC.
+def _trans_shape(shape, shape_format):
+ if shape_format == "NCHW":
+ return (shape[0], shape[3], shape[1], shape[2])
+ return shape
+
+def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size):
+ fan_in = in_channel * kernel_size * kernel_size
+ scale = 1.0
+ scale /= max(1., fan_in)
+ stddev = (scale ** 0.5) / .87962566103423978
+ mu, sigma = 0, stddev
+ weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size)
+ weight = np.reshape(weight, (out_channel, kernel_size, kernel_size, in_channel))
+ return Tensor(weight, dtype=mstype.float32)
+
+def _weight_variable(shape, factor=0.01):
+ init_value = np.random.randn(*shape).astype(np.float32) * factor
+ return Tensor(init_value)
+
+
+def _conv3x3(in_channel, out_channel, stride=1):
+ weight_shape = (out_channel, 3, 3, in_channel)
+ weight_shape = _trans_shape(weight_shape, format_)
+ weight = _weight_variable(weight_shape)
+ return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride,
+ padding=1, pad_mode='pad', weight_init=weight, data_format=format_)
+
+def _conv1x1(in_channel, out_channel, stride=1):
+ weight_shape = (out_channel, 1, 1, in_channel)
+ weight_shape = _trans_shape(weight_shape, format_)
+ weight = _weight_variable(weight_shape)
+ return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride,
+ padding=0, pad_mode='pad', weight_init=weight, data_format=format_)
+
+def _conv7x7(in_channel, out_channel, stride=1):
+ weight_shape = (out_channel, 7, 7, in_channel)
+ weight_shape = _trans_shape(weight_shape, format_)
+ weight = _weight_variable(weight_shape)
+ return nn.Conv2d(in_channel, out_channel, kernel_size=7, stride=stride,
+ padding=3, pad_mode='pad', weight_init=weight, data_format=format_)
+
+
+def _bn(channel):
+ return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0,
+ moving_mean_init=0, moving_var_init=1, data_format=format_)
+
+def _bn_last(channel):
+ return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, gamma_init=0, beta_init=0,
+ moving_mean_init=0, moving_var_init=1, data_format=format_)
+
+def _fc(in_channel, out_channel):
+ weight_shape = (out_channel, in_channel)
+ weight = _weight_variable(weight_shape)
+ return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0)
+
+
+class ResidualBlock(nn.Cell):
+ """
+ ResNet V1 residual block definition.
+
+ Args:
+ in_channel (int): Input channel.
+ out_channel (int): Output channel.
+ stride (int): Stride size for the first convolutional layer. Default: 1.
+
+ Returns:
+ Tensor, output tensor.
+
+ Examples:
+ >>> ResidualBlock(3, 256, stride=2)
+ """
+ expansion = 4
+
+ def __init__(self,
+ in_channel,
+ out_channel,
+ stride=1):
+ super(ResidualBlock, self).__init__()
+ self.stride = stride
+ channel = out_channel // self.expansion
+ self.conv1 = _conv1x1(in_channel, channel, stride=1)
+ self.bn1 = _bn(channel)
+ self.conv2 = _conv3x3(channel, channel, stride=stride)
+ self.bn2 = _bn(channel)
+
+ self.conv3 = _conv1x1(channel, out_channel, stride=1)
+ self.bn3 = _bn_last(out_channel)
+ self.relu = nn.ReLU()
+
+ self.down_sample = False
+
+ if stride != 1 or in_channel != out_channel:
+ self.down_sample = True
+ self.down_sample_layer = None
+
+ if self.down_sample:
+ self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride), _bn(out_channel)])
+ self.add = P.Add()
+
+ def construct(self, x):
+ identity = x
+ if self.down_sample:
+ identity = self.down_sample_layer(identity)
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ out = self.add(identity, out)
+ out = self.relu(out)
+
+ return out
+
+
+class ResNet(nn.Cell):
+ """
+ ResNet architecture.
+
+ Args:
+ block (Cell): Block for network.
+ layer_nums (list): Numbers of block in different layers.
+ in_channels (list): Input channel in each layer.
+ out_channels (list): Output channel in each layer.
+ strides (list): Stride size in each layer.
+ num_classes (int): The number of classes that the training images are belonging to.
+ Returns:
+ Tensor, output tensor.
+
+ Examples:
+ >>> ResNet(ResidualBlock,
+ >>> [3, 4, 6, 3],
+ >>> [64, 256, 512, 1024],
+ >>> [256, 512, 1024, 2048],
+ >>> [1, 2, 2, 2],
+ >>> 10)
+ """
+
+ def __init__(self,
+ block,
+ layer_nums,
+ in_channels,
+ out_channels,
+ strides,
+ num_classes):
+ super(ResNet, self).__init__()
+
+ if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
+ raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
+ input_data_channel = 4
+ if format_ == "NCHW":
+ input_data_channel = 3
+ self.conv1 = _conv7x7(input_data_channel, 64, stride=2)
+ self.bn1 = _bn(64)
+ self.relu = P.ReLU()
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same", data_format=format_)
+ self.layer1 = self._make_layer(block,
+ layer_nums[0],
+ in_channel=in_channels[0],
+ out_channel=out_channels[0],
+ stride=strides[0])
+ self.layer2 = self._make_layer(block,
+ layer_nums[1],
+ in_channel=in_channels[1],
+ out_channel=out_channels[1],
+ stride=strides[1])
+ self.layer3 = self._make_layer(block,
+ layer_nums[2],
+ in_channel=in_channels[2],
+ out_channel=out_channels[2],
+ stride=strides[2])
+ self.layer4 = self._make_layer(block,
+ layer_nums[3],
+ in_channel=in_channels[3],
+ out_channel=out_channels[3],
+ stride=strides[3])
+
+ self.avg_pool = P.AvgPool(7, 1, data_format=format_)
+ self.flatten = nn.Flatten()
+ self.end_point = _fc(out_channels[3], num_classes)
+
+ def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
+ """
+ Make stage network of ResNet.
+
+ Args:
+ block (Cell): Resnet block.
+ layer_num (int): Layer number.
+ in_channel (int): Input channel.
+ out_channel (int): Output channel.
+ stride (int): Stride size for the first convolutional layer.
+ Returns:
+ SequentialCell, the output layer.
+
+ Examples:
+ >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
+ """
+ layers = []
+
+ resnet_block = block(in_channel, out_channel, stride=stride)
+ layers.append(resnet_block)
+ for _ in range(1, layer_num):
+ resnet_block = block(out_channel, out_channel, stride=1)
+ layers.append(resnet_block)
+ return nn.SequentialCell(layers)
+
+ def construct(self, x):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ c1 = self.maxpool(x)
+
+ c2 = self.layer1(c1)
+ c3 = self.layer2(c2)
+ c4 = self.layer3(c3)
+ c5 = self.layer4(c4)
+
+ out = self.avg_pool(c5)
+ out = self.flatten(out)
+ out = self.end_point(out)
+
+ return out
+
+
+def resnet50(class_num=1001, dtype="fp16"):
+ """
+ Get ResNet50 neural network.
+
+ Args:
+ class_num (int): Class number.
+
+ Returns:
+ Cell, cell instance of ResNet50 neural network.
+
+ Examples:
+ >>> net = resnet50(1001)
+ """
+ global format_
+ if dtype == "fp32":
+ format_ = "NCHW"
+ return ResNet(ResidualBlock,
+ [3, 4, 6, 3],
+ [64, 256, 512, 1024],
+ [256, 512, 1024, 2048],
+ [1, 2, 2, 2],
+ class_num)
diff --git "a/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/train.py" "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/train.py"
new file mode 100644
index 0000000000000000000000000000000000000000..370d9f1636cb6071288638a8d1c53a21a2f65ad1
--- /dev/null
+++ "b/code/2021_autumn/\345\224\220\351\237\254-\345\237\272\344\272\216InSAR\345\275\261\345\203\217\345\222\214Resnet-50\347\232\204\347\201\253\345\261\261\345\234\260\350\241\250\345\275\242\345\217\230\347\233\221\346\265\213/src/train.py"
@@ -0,0 +1,254 @@
+# Copyright 2020-2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train resnet."""
+import os
+import argparse
+import ast
+from mindspore import context
+from mindspore import Tensor
+from mindspore.nn.optim import Momentum, THOR
+from mindspore.train.model import Model
+from mindspore.context import ParallelMode
+from mindspore.train.train_thor import ConvertModelUtils
+from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
+from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
+from mindspore.train.loss_scale_manager import FixedLossScaleManager
+from mindspore.train.serialization import load_checkpoint, load_param_into_net
+from mindspore.communication.management import init, get_rank, get_group_size
+from mindspore.common import set_seed
+from mindspore.parallel import set_algo_parameters
+import mindspore.nn as nn
+import mindspore.common.initializer as weight_init
+import mindspore.log as logger
+from src.lr_generator import get_lr, warmup_cosine_annealing_lr
+from src.CrossEntropySmooth import CrossEntropySmooth
+from src.config import cfg
+from src.eval_callback import EvalCallBack
+from src.metric import DistAccuracy, ClassifyCorrectCell
+
+parser = argparse.ArgumentParser(description='Image classification')
+parser.add_argument('--net', type=str, default=None, help='Resnet Model, resnet18, resnet50 or resnet101')
+parser.add_argument('--dataset', type=str, default=None, help='Dataset, either cifar10 or imagenet2012')
+parser.add_argument('--run_distribute', type=ast.literal_eval, default=False, help='Run distribute')
+parser.add_argument('--device_num', type=int, default=1, help='Device num.')
+
+parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
+parser.add_argument('--device_target', type=str, default='Ascend', choices=("Ascend", "GPU", "CPU"),
+ help="Device target, support Ascend, GPU and CPU.")
+parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
+parser.add_argument('--parameter_server', type=ast.literal_eval, default=False, help='Run parameter server train')
+parser.add_argument("--filter_weight", type=ast.literal_eval, default=False,
+ help="Filter head weight parameters, default is False.")
+parser.add_argument("--run_eval", type=ast.literal_eval, default=False,
+ help="Run evaluation when training, default is False.")
+parser.add_argument('--eval_dataset_path', type=str, default=None, help='Evaluation dataset path when run_eval is True')
+parser.add_argument("--save_best_ckpt", type=ast.literal_eval, default=True,
+ help="Save best checkpoint when run_eval is True, default is True.")
+parser.add_argument("--eval_start_epoch", type=int, default=40,
+ help="Evaluation start epoch when run_eval is True, default is 40.")
+parser.add_argument("--eval_interval", type=int, default=1,
+ help="Evaluation interval when run_eval is True, default is 1.")
+args_opt = parser.parse_args()
+
+set_seed(1)
+
+if args_opt.net in ("resnet18", "resnet50"):
+ if args_opt.net == "resnet18":
+ from src.resnet import resnet18 as resnet
+ if args_opt.net == "resnet50":
+ from src.resnet import resnet50 as resnet
+ if args_opt.dataset == "cifar10":
+ from src.config import config1 as config
+ from src.dataset import create_dataset1 as create_dataset
+ else:
+ from src.config import config2 as config
+ from src.dataset import create_dataset2 as create_dataset
+
+elif args_opt.net == "resnet101":
+ from src.resnet import resnet101 as resnet
+ from src.config import config3 as config
+ from src.dataset import create_dataset3 as create_dataset
+else:
+ from src.resnet import se_resnet50 as resnet
+ from src.config import config4 as config
+ from src.dataset import create_dataset4 as create_dataset
+
+if cfg.optimizer == "Thor":
+ if args_opt.device_target == "Ascend":
+ from src.config import config_thor_Ascend as config
+ else:
+ from src.config import config_thor_gpu as config
+
+
+def filter_checkpoint_parameter_by_list(origin_dict, param_filter):
+ """remove useless parameters according to filter_list"""
+ for key in list(origin_dict.keys()):
+ for name in param_filter:
+ if name in key:
+ print("Delete parameter from checkpoint: ", key)
+ del origin_dict[key]
+ break
+
+def apply_eval(eval_param):
+ eval_model = eval_param["model"]
+ eval_ds = eval_param["dataset"]
+ metrics_name = eval_param["metrics_name"]
+ res = eval_model.eval(eval_ds)
+ return res[metrics_name]
+
+if __name__ == '__main__':
+ target = args_opt.device_target
+ if target == "CPU":
+ args_opt.run_distribute = False
+
+ ckpt_save_dir = config.save_checkpoint_path
+
+ # init context
+ context.set_context(mode=context.GRAPH_MODE, device_target=target, save_graphs=False)
+ if args_opt.parameter_server:
+ context.set_ps_context(enable_ps=True)
+ if args_opt.run_distribute:
+ if target == "Ascend":
+ device_id = int(os.getenv('DEVICE_ID'))
+ context.set_context(device_id=device_id, enable_auto_mixed_precision=True)
+ context.set_auto_parallel_context(device_num=args_opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL,
+ gradients_mean=True)
+ set_algo_parameters(elementwise_op_strategy_follow=True)
+ if args_opt.net == "resnet50" or args_opt.net == "se-resnet50":
+ context.set_auto_parallel_context(all_reduce_fusion_config=[85, 160])
+ elif args_opt.net == "resnet101":
+ context.set_auto_parallel_context(all_reduce_fusion_config=[80, 210, 313])
+ init()
+ # GPU target
+ else:
+ init()
+ context.set_auto_parallel_context(device_num=get_group_size(), parallel_mode=ParallelMode.DATA_PARALLEL,
+ gradients_mean=True)
+ if args_opt.net == "resnet50":
+ context.set_auto_parallel_context(all_reduce_fusion_config=[85, 160])
+ ckpt_save_dir = config.save_checkpoint_path + "ckpt_" + str(get_rank()) + "/"
+
+ # create dataset
+ dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=True, repeat_num=1,
+ batch_size=config.batch_size, target=target, distribute=args_opt.run_distribute)
+ step_size = dataset.get_dataset_size()
+
+ # define net
+ net = resnet(class_num=config.class_num)
+ if args_opt.parameter_server:
+ net.set_param_ps()
+
+ # init weight
+ if args_opt.pre_trained:
+ param_dict = load_checkpoint(args_opt.pre_trained)
+ if args_opt.filter_weight:
+ filter_list = [x.name for x in net.end_point.get_parameters()]
+ filter_checkpoint_parameter_by_list(param_dict, filter_list)
+ load_param_into_net(net, param_dict)
+ else:
+ for _, cell in net.cells_and_names():
+ if isinstance(cell, nn.Conv2d):
+ cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(),
+ cell.weight.shape,
+ cell.weight.dtype))
+ if isinstance(cell, nn.Dense):
+ cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(),
+ cell.weight.shape,
+ cell.weight.dtype))
+
+ # init lr
+ if cfg.optimizer == "Thor":
+ from src.lr_generator import get_thor_lr
+ lr = get_thor_lr(0, config.lr_init, config.lr_decay, config.lr_end_epoch, step_size, decay_epochs=39)
+ else:
+ if args_opt.net in ("resnet18", "resnet50", "se-resnet50"):
+ lr = get_lr(lr_init=config.lr_init, lr_end=config.lr_end, lr_max=config.lr_max,
+ warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=step_size,
+ lr_decay_mode=config.lr_decay_mode)
+ else:
+ lr = warmup_cosine_annealing_lr(config.lr, step_size, config.warmup_epochs, config.epoch_size,
+ config.pretrain_epoch_size * step_size)
+ lr = Tensor(lr)
+
+ # define opt
+ decayed_params = []
+ no_decayed_params = []
+ for param in net.trainable_params():
+ if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name:
+ decayed_params.append(param)
+ else:
+ no_decayed_params.append(param)
+
+ group_params = [{'params': decayed_params, 'weight_decay': config.weight_decay},
+ {'params': no_decayed_params},
+ {'order_params': net.trainable_params()}]
+ opt = Momentum(group_params, lr, config.momentum, loss_scale=config.loss_scale)
+ if args_opt.dataset == "imagenet2012":
+ if not config.use_label_smooth:
+ config.label_smooth_factor = 0.0
+ loss = CrossEntropySmooth(sparse=True, reduction="mean",
+ smooth_factor=config.label_smooth_factor, num_classes=config.class_num)
+ else:
+ loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
+ loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False)
+ dist_eval_network = ClassifyCorrectCell(net) if args_opt.run_distribute else None
+ metrics = {"acc"}
+ if args_opt.run_distribute:
+ metrics = {'acc': DistAccuracy(batch_size=config.batch_size, device_num=args_opt.device_num)}
+ model = Model(net, loss_fn=loss, optimizer=opt, loss_scale_manager=loss_scale, metrics=metrics,
+ amp_level="O2", keep_batchnorm_fp32=False, eval_network=dist_eval_network)
+ if (args_opt.net != "resnet101" and args_opt.net != "resnet50") or \
+ args_opt.parameter_server or target == "CPU":
+ ## fp32 training
+ model = Model(net, loss_fn=loss, optimizer=opt, metrics=metrics, eval_network=dist_eval_network)
+ if cfg.optimizer == "Thor" and args_opt.dataset == "imagenet2012":
+ from src.lr_generator import get_thor_damping
+ damping = get_thor_damping(0, config.damping_init, config.damping_decay, 70, step_size)
+ split_indices = [26, 53]
+ opt = THOR(net, lr, Tensor(damping), config.momentum, config.weight_decay, config.loss_scale,
+ config.batch_size, split_indices=split_indices)
+ model = ConvertModelUtils().convert_to_thor_model(model=model, network=net, loss_fn=loss, optimizer=opt,
+ loss_scale_manager=loss_scale, metrics={'acc'},
+ amp_level="O2", keep_batchnorm_fp32=False,
+ frequency=config.frequency)
+ args_opt.run_eval = False
+ logger.warning("Thor optimizer not support evaluation while training.")
+
+ # define callbacks
+ time_cb = TimeMonitor(data_size=step_size)
+ loss_cb = LossMonitor()
+ cb = [time_cb, loss_cb]
+ if config.save_checkpoint:
+ config_ck = CheckpointConfig(save_checkpoint_steps=config.save_checkpoint_epochs * step_size,
+ keep_checkpoint_max=config.keep_checkpoint_max)
+ ckpt_cb = ModelCheckpoint(prefix="resnet", directory=ckpt_save_dir, config=config_ck)
+ cb += [ckpt_cb]
+ if args_opt.run_eval:
+ if args_opt.eval_dataset_path is None or (not os.path.isdir(args_opt.eval_dataset_path)):
+ raise ValueError("{} is not a existing path.".format(args_opt.eval_dataset_path))
+ eval_dataset = create_dataset(dataset_path=args_opt.eval_dataset_path, do_train=False,
+ batch_size=config.batch_size, target=target)
+ eval_param_dict = {"model": model, "dataset": eval_dataset, "metrics_name": "acc"}
+ eval_cb = EvalCallBack(apply_eval, eval_param_dict, interval=args_opt.eval_interval,
+ eval_start_epoch=args_opt.eval_start_epoch, save_best_ckpt=True,
+ ckpt_directory=ckpt_save_dir, besk_ckpt_name="best_acc.ckpt",
+ metrics_name="acc")
+ cb += [eval_cb]
+ # train model
+ if args_opt.net == "se-resnet50":
+ config.epoch_size = config.train_epoch_size
+ dataset_sink_mode = (not args_opt.parameter_server) and target != "CPU"
+ model.train(config.epoch_size - config.pretrain_epoch_size, dataset, callbacks=cb,
+ sink_size=dataset.get_dataset_size(), dataset_sink_mode=dataset_sink_mode)