From 26e32fe217e7cea5ea47116b9aa829cc0110969d Mon Sep 17 00:00:00 2001
From: cui-gaoleng <562344211@qq.com>
Date: Wed, 17 Dec 2025 14:24:38 +0800
Subject: [PATCH] mcp_center
---
mcp_center/LICENSE | 194 ++++++
mcp_center/README.en.md | 507 ++++++++++++++
mcp_center/README.md | 507 ++++++++++++++
mcp_center/mcp_config/change.py | 67 ++
.../mcp_config/mcp_server_mcp/config.json | 9 +
mcp_center/mcp_config/mcp_to_app_config.toml | 11 +
mcp_center/mcp_config/rag_mcp/config.json | 9 +
mcp_center/requiremenets.txt | 5 +
mcp_center/run.sh | 34 +
.../servers/oe_cli_mcp_server/.gitignore | 17 +
.../servers/oe_cli_mcp_server/README.en.md | 36 +
.../servers/oe_cli_mcp_server/README.md | 39 ++
.../oe_cli_mcp_server/client/client.py | 384 +++++++++++
.../oe_cli_mcp_server/client/client_1.py | 65 ++
.../client/mult_client_test.py | 79 +++
.../oe_cli_mcp_server/mcp-server.service | 29 +
.../mcp_server/MCP_TOOLS.json | 0
.../oe_cli_mcp_server/mcp_server/cli.py | 50 ++
.../mcp_server/cli/__init__.py | 0
.../mcp_server/cli/handle.py | 203 ++++++
.../mcp_server/cli/parse_args.py | 25 +
.../mcp_server/dependency.py | 300 +++++++++
.../mcp_server/manager/manager.py | 322 +++++++++
.../mcp_server/manager/package_loader.py | 340 ++++++++++
.../mcp_server/manager/package_unloader.py | 140 ++++
.../mcp_server/manager/tool_repository.py | 452 +++++++++++++
.../mcp_server/mcp_manager.py | 380 +++++++++++
.../mcp_tools/AI_tools/nvidia_tool/base.py | 159 +++++
.../AI_tools/nvidia_tool/config.json | 8 +
.../mcp_tools/AI_tools/nvidia_tool/deps.toml | 23 +
.../mcp_tools/AI_tools/nvidia_tool/tool.py | 81 +++
.../base_tools/cmd_executor_tool/base.py | 3 +
.../base_tools/cmd_executor_tool/config.json | 8 +
.../base_tools/cmd_executor_tool/deps.toml.py | 0
.../base_tools/cmd_executor_tool/tool.py | 129 ++++
.../mcp_tools/base_tools/file_tool/base.py | 253 +++++++
.../base_tools/file_tool/config.json | 8 +
.../mcp_tools/base_tools/file_tool/deps.toml | 0
.../mcp_tools/base_tools/file_tool/tool.py | 107 +++
.../base_tools/network_tools/base.py | 224 +++++++
.../base_tools/network_tools/config.json | 10 +
.../base_tools/network_tools/deps.toml | 11 +
.../base_tools/network_tools/tool.py | 24 +
.../mcp_tools/base_tools/pkg_tool/base.py | 273 ++++++++
.../mcp_tools/base_tools/pkg_tool/config.json | 8 +
.../mcp_tools/base_tools/pkg_tool/deps.toml | 0
.../mcp_tools/base_tools/pkg_tool/tool.py | 115 ++++
.../mcp_tools/base_tools/proc_tool/base.py | 240 +++++++
.../base_tools/proc_tool/config.json | 8 +
.../mcp_tools/base_tools/proc_tool/deps.toml | 0
.../mcp_tools/base_tools/proc_tool/tool.py | 56 ++
.../mcp_tools/base_tools/ssh_fix_tool/base.py | 385 +++++++++++
.../base_tools/ssh_fix_tool/config.json | 10 +
.../base_tools/ssh_fix_tool/deps.toml | 11 +
.../mcp_tools/base_tools/ssh_fix_tool/tool.py | 28 +
.../base_tools/sys_info_tool/base.py | 419 ++++++++++++
.../base_tools/sys_info_tool/config.json | 8 +
.../base_tools/sys_info_tool/deps.toml | 0
.../base_tools/sys_info_tool/tool.py | 50 ++
.../oe_cli_mcp_server/mcp_tools/tool_type.py | 10 +
mcp_center/servers/oe_cli_mcp_server/run.sh | 41 ++
.../servers/oe_cli_mcp_server/server.py | 15 +
mcp_center/servers/oe_cli_mcp_server/setup.py | 14 +
.../util/get_project_root.py | 20 +
.../util/get_tool_state_path.py | 12 +
.../oe_cli_mcp_server/util/get_type.py | 14 +
.../oe_cli_mcp_server/util/test_llm_valid.py | 35 +
.../util/tool_package_file_check.py | 17 +
.../oe_cli_mcp_server/util/venv_util.py | 66 ++
.../oe_cli_mcp_server/util/zip_tool_util.py | 118 ++++
mcp_center/servers/rag/README.en.md | 136 ++++
mcp_center/servers/rag/README.md | 136 ++++
mcp_center/servers/rag/run.sh | 49 ++
mcp_center/servers/rag/src/base/config.py | 174 +++++
mcp_center/servers/rag/src/base/embedding.py | 145 ++++
.../rag/src/base/manager/database_manager.py | 257 +++++++
.../rag/src/base/manager/document_manager.py | 394 +++++++++++
mcp_center/servers/rag/src/base/models.py | 79 +++
mcp_center/servers/rag/src/base/parser/doc.py | 61 ++
.../servers/rag/src/base/parser/parser.py | 59 ++
mcp_center/servers/rag/src/base/parser/pdf.py | 80 +++
mcp_center/servers/rag/src/base/parser/txt.py | 30 +
mcp_center/servers/rag/src/base/rerank.py | 64 ++
.../servers/rag/src/base/search/keyword.py | 92 +++
.../servers/rag/src/base/search/vector.py | 67 ++
.../weighted_keyword_and_vector_search.py | 122 ++++
mcp_center/servers/rag/src/base/token_tool.py | 157 +++++
mcp_center/servers/rag/src/cli.py | 82 +++
mcp_center/servers/rag/src/cli/__init__.py | 0
mcp_center/servers/rag/src/cli/handle.py | 168 +++++
mcp_center/servers/rag/src/cli/parse_args.py | 58 ++
mcp_center/servers/rag/src/config.json | 48 ++
mcp_center/servers/rag/src/rag_config.json | 20 +
mcp_center/servers/rag/src/requirements.txt | 6 +
mcp_center/servers/rag/src/server.py | 176 +++++
mcp_center/servers/rag/src/tool.py | 632 ++++++++++++++++++
mcp_center/service/rag.service | 20 +
97 files changed, 10537 insertions(+)
create mode 100644 mcp_center/LICENSE
create mode 100644 mcp_center/README.en.md
create mode 100644 mcp_center/README.md
create mode 100644 mcp_center/mcp_config/change.py
create mode 100644 mcp_center/mcp_config/mcp_server_mcp/config.json
create mode 100644 mcp_center/mcp_config/mcp_to_app_config.toml
create mode 100644 mcp_center/mcp_config/rag_mcp/config.json
create mode 100644 mcp_center/requiremenets.txt
create mode 100755 mcp_center/run.sh
create mode 100644 mcp_center/servers/oe_cli_mcp_server/.gitignore
create mode 100644 mcp_center/servers/oe_cli_mcp_server/README.en.md
create mode 100644 mcp_center/servers/oe_cli_mcp_server/README.md
create mode 100644 mcp_center/servers/oe_cli_mcp_server/client/client.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/client/client_1.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/client/mult_client_test.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp-server.service
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/MCP_TOOLS.json
create mode 100755 mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/__init__.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/handle.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/parse_args.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/dependency.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/manager.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_loader.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_unloader.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/tool_repository.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_server/mcp_manager.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/deps.toml.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/base.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/config.json
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/deps.toml
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/tool.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/mcp_tools/tool_type.py
create mode 100755 mcp_center/servers/oe_cli_mcp_server/run.sh
create mode 100644 mcp_center/servers/oe_cli_mcp_server/server.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/setup.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/get_project_root.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/get_tool_state_path.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/get_type.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/test_llm_valid.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/tool_package_file_check.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/venv_util.py
create mode 100644 mcp_center/servers/oe_cli_mcp_server/util/zip_tool_util.py
create mode 100644 mcp_center/servers/rag/README.en.md
create mode 100644 mcp_center/servers/rag/README.md
create mode 100644 mcp_center/servers/rag/run.sh
create mode 100644 mcp_center/servers/rag/src/base/config.py
create mode 100644 mcp_center/servers/rag/src/base/embedding.py
create mode 100644 mcp_center/servers/rag/src/base/manager/database_manager.py
create mode 100644 mcp_center/servers/rag/src/base/manager/document_manager.py
create mode 100644 mcp_center/servers/rag/src/base/models.py
create mode 100644 mcp_center/servers/rag/src/base/parser/doc.py
create mode 100644 mcp_center/servers/rag/src/base/parser/parser.py
create mode 100644 mcp_center/servers/rag/src/base/parser/pdf.py
create mode 100644 mcp_center/servers/rag/src/base/parser/txt.py
create mode 100644 mcp_center/servers/rag/src/base/rerank.py
create mode 100644 mcp_center/servers/rag/src/base/search/keyword.py
create mode 100644 mcp_center/servers/rag/src/base/search/vector.py
create mode 100644 mcp_center/servers/rag/src/base/search/weighted_keyword_and_vector_search.py
create mode 100644 mcp_center/servers/rag/src/base/token_tool.py
create mode 100644 mcp_center/servers/rag/src/cli.py
create mode 100644 mcp_center/servers/rag/src/cli/__init__.py
create mode 100644 mcp_center/servers/rag/src/cli/handle.py
create mode 100644 mcp_center/servers/rag/src/cli/parse_args.py
create mode 100644 mcp_center/servers/rag/src/config.json
create mode 100644 mcp_center/servers/rag/src/rag_config.json
create mode 100644 mcp_center/servers/rag/src/requirements.txt
create mode 100644 mcp_center/servers/rag/src/server.py
create mode 100644 mcp_center/servers/rag/src/tool.py
create mode 100644 mcp_center/service/rag.service
diff --git a/mcp_center/LICENSE b/mcp_center/LICENSE
new file mode 100644
index 00000000..f6c26977
--- /dev/null
+++ b/mcp_center/LICENSE
@@ -0,0 +1,194 @@
+木兰宽松许可证,第2版
+
+木兰宽松许可证,第2版
+
+2020年1月 http://license.coscl.org.cn/MulanPSL2
+
+您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束:
+
+0. 定义
+
+“软件” 是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。
+
+“贡献” 是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。
+
+“贡献者” 是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。
+
+“法人实体” 是指提交贡献的机构及其“关联实体”。
+
+“关联实体” 是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是
+指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。
+
+1. 授予版权许可
+
+每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可
+以复制、使用、修改、分发其“贡献”,不论修改与否。
+
+2. 授予专利许可
+
+每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定
+撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡
+献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软
+件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“
+关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或
+其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权
+行动之日终止。
+
+3. 无商标许可
+
+“本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定
+的声明义务而必须使用除外。
+
+4. 分发限制
+
+您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“
+本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。
+
+5. 免责声明与责任限制
+
+“软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对
+任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于
+何种法律理论,即使其曾被建议有此种损失的可能性。
+
+6. 语言
+
+“本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文
+版为准。
+
+条款结束
+
+如何将木兰宽松许可证,第2版,应用到您的软件
+
+如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步:
+
+1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字;
+
+2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中;
+
+3, 请将如下声明文本放入每个源文件的头部注释中。
+
+Copyright (c) [Year] [name of copyright holder]
+[Software Name] is licensed under Mulan PSL v2.
+You can use this software according to the terms and conditions of the Mulan
+PSL v2.
+You may obtain a copy of Mulan PSL v2 at:
+ http://license.coscl.org.cn/MulanPSL2
+THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
+KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+See the Mulan PSL v2 for more details.
+
+Mulan Permissive Software License,Version 2
+
+Mulan Permissive Software License,Version 2 (Mulan PSL v2)
+
+January 2020 http://license.coscl.org.cn/MulanPSL2
+
+Your reproduction, use, modification and distribution of the Software shall
+be subject to Mulan PSL v2 (this License) with the following terms and
+conditions:
+
+0. Definition
+
+Software means the program and related documents which are licensed under
+this License and comprise all Contribution(s).
+
+Contribution means the copyrightable work licensed by a particular
+Contributor under this License.
+
+Contributor means the Individual or Legal Entity who licenses its
+copyrightable work under this License.
+
+Legal Entity means the entity making a Contribution and all its
+Affiliates.
+
+Affiliates means entities that control, are controlled by, or are under
+common control with the acting entity under this License, ‘control’ means
+direct or indirect ownership of at least fifty percent (50%) of the voting
+power, capital or other securities of controlled or commonly controlled
+entity.
+
+1. Grant of Copyright License
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to you a perpetual, worldwide, royalty-free, non-exclusive,
+irrevocable copyright license to reproduce, use, modify, or distribute its
+Contribution, with modification or not.
+
+2. Grant of Patent License
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to you a perpetual, worldwide, royalty-free, non-exclusive,
+irrevocable (except for revocation under this Section) patent license to
+make, have made, use, offer for sale, sell, import or otherwise transfer its
+Contribution, where such patent license is only limited to the patent claims
+owned or controlled by such Contributor now or in future which will be
+necessarily infringed by its Contribution alone, or by combination of the
+Contribution with the Software to which the Contribution was contributed.
+The patent license shall not apply to any modification of the Contribution,
+and any other combination which includes the Contribution. If you or your
+Affiliates directly or indirectly institute patent litigation (including a
+cross claim or counterclaim in a litigation) or other patent enforcement
+activities against any individual or entity by alleging that the Software or
+any Contribution in it infringes patents, then any patent license granted to
+you under this License for the Software shall terminate as of the date such
+litigation or activity is filed or taken.
+
+3. No Trademark License
+
+No trademark license is granted to use the trade names, trademarks, service
+marks, or product names of Contributor, except as required to fulfill notice
+requirements in section 4.
+
+4. Distribution Restriction
+
+You may distribute the Software in any medium with or without modification,
+whether in source or executable forms, provided that you provide recipients
+with a copy of this License and retain copyright, patent, trademark and
+disclaimer statements in the Software.
+
+5. Disclaimer of Warranty and Limitation of Liability
+
+THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY
+KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR
+COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT
+LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING
+FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO
+MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGES.
+
+6. Language
+
+THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION
+AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF
+DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION
+SHALL PREVAIL.
+
+END OF THE TERMS AND CONDITIONS
+
+How to Apply the Mulan Permissive Software License,Version 2
+(Mulan PSL v2) to Your Software
+
+To apply the Mulan PSL v2 to your work, for easy identification by
+recipients, you are suggested to complete following three steps:
+
+i. Fill in the blanks in following statement, including insert your software
+name, the year of the first publication of your software, and your name
+identified as the copyright owner;
+
+ii. Create a file named "LICENSE" which contains the whole context of this
+License in the first directory of your software package;
+
+iii. Attach the statement to the appropriate annotated syntax at the
+beginning of each source file.
+
+Copyright (c) [Year] [name of copyright holder]
+[Software Name] is licensed under Mulan PSL v2.
+You can use this software according to the terms and conditions of the Mulan
+PSL v2.
+You may obtain a copy of Mulan PSL v2 at:
+ http://license.coscl.org.cn/MulanPSL2
+THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
+KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
+NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
+See the Mulan PSL v2 for more details.
diff --git a/mcp_center/README.en.md b/mcp_center/README.en.md
new file mode 100644
index 00000000..d7eac625
--- /dev/null
+++ b/mcp_center/README.en.md
@@ -0,0 +1,507 @@
+# mcp_center
+
+## 1. Introduction
+mcp_center is used to build the oe intelligent assistant, and its directory structure is as follows:
+```
+├── client - Test client
+├── config - Public and private configuration files
+├── mcp_config - Configuration files for mcp registration to the framework
+├── README.en.md - English version description
+├── README.md - Chinese version description
+├── requiremenets.txt - Overall dependencies
+├── run.sh - Script to start the mcp service
+├── servers - Directory containing mcp server source code
+└── service - Directory containing .service files for mcp
+```
+
+### Running Instructions
+1. Before running the mcp server, execute the following command in the mcp_center directory:
+ ```
+ export PYTHONPATH=$(pwd)
+ ```
+2. Start the mcp server through Python for testing
+3. You can test each mcp tool through client.py in the client directory. The specific URL, tool name, and input parameters can be adjusted as needed.
+
+
+## 2. Rules for Adding New mcp
+1. **Create Service Source Code Directory**
+ Create a new folder under the `mcp_center/servers` directory. Example (taking top mcp as an example):
+ ```
+ servers/top/
+ ├── README.en.md English version of mcp service details
+ ├── README.md Chinese version of mcp service details
+ ├── requirements.txt Contains only private installation dependencies (to avoid conflicts with public dependencies)
+ └── src Source code directory (including server main entry)
+ └── server.py
+ ```
+
+2. **Configuration File Settings**
+ Create a new configuration file under the `mcp_center/config/private` directory. Example (taking top mcp as an example):
+ ```
+ config/private/top
+ ├── config_loader.py Configuration loader (including public configuration and private custom configuration)
+ └── config.toml Private custom configuration
+ ```
+
+3. **Document Updates**
+ For each new mcp added, you need to synchronously add the basic information of the mcp to the existing mcp section in the main directory's README (ensure that ports do not conflict, starting from 12100).
+ For each new mcp added, you need to add a .service file in the service directory of the main directory to make the mcp a service.
+ For each new mcp added, you need to create a corresponding directory in mcp_config of the main directory and create a config.json under it (for registering the mcp to the framework).
+ For each new mcp added, you need to add a command in run.sh of the main directory to start the mcp service.
+
+4. **General Parameter Requirements**
+ Each mcp tool requires a host as an input parameter for communication with the remote server.
+
+5. **Remote Command Execution**
+ Remote command execution can be implemented through `paramiko`.
+
+
+## 3. Existing MCP Services
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/remote_info |
+| Directory| mcp_center/servers/remote_info |
+| Port Used| 12100 |
+| Introduction | Obtain endpoint information |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/shell_generator |
+| Directory| mcp_center/servers/shell_generator |
+| Port Used| 12101 |
+| Introduction | Generate & execute shell commands |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/top |
+| Directory| mcp_center/servers/top |
+| Port Used| 12110 |
+| Introduction | Get system load info |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/kill |
+| Directory| mcp_center/servers/kill |
+| Port Used| 12111 |
+| Introduction | Process control & signal meanings |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/nohup |
+| Directory| mcp_center/servers/nohup |
+| Port Used| 12112 |
+| Introduction | Background process execution |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/strace |
+| Directory| mcp_center/servers/strace |
+| Port Used| 12113 |
+| Introduction | Process tracing for anomaly analysis |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/nvidia |
+| Directory| mcp_center/servers/nvidia |
+| Port Used| 12114 |
+| Introduction | mcp_center/servers/nvidia |
+
+| Category | Details |
+|------------|------------------------------|
+| Name | servers/npu |
+| Directory | mcp_center/servers/npu |
+| Port Used | 12115 |
+| Description| Query and control of NPU |
+
+| Category | Details |
+|------------|--------------------------|
+| Name | servers/iftop |
+| Directory | mcp_center/servers/iftop |
+| Port Used | 12116 |
+| Description| Network traffic monitoring |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/nload |
+| Directory| mcp_center/servers/nload |
+| Port Used| 12117 |
+| Introduction | Nload Bandwidth Monitoring |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/netstat |
+| Directory| mcp_center/servers/netstat |
+| Port Used| 12118 |
+| Introduction | netstat Network Connection Monitoring |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/lsof |
+| Directory| mcp_center/servers/lsof |
+| Port Used| 12119 |
+| Introduction | Quickly troubleshoot file occupation conflicts, abnormal network connections and process resource occupation issues |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/ifconfig |
+| Directory| mcp_center/servers/ifconfig |
+| Port Used| 12120 |
+| Introduction | ifconfig Network Interface Information Monitoring |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/ethtool |
+| Directory| mcp_center/servers/ethtool |
+| Port Used| 12121 |
+| Introduction | ethtool Network Card Information Query, Feature Status and Network Card Settings |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/tshark |
+| Directory| mcp_center/servers/tshark |
+| Port Used| 12122 |
+| Introduction | Capture, Display and Analyze Network Traffic |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/file_content_tool |
+| Directory| mcp_center/servers/file_content_tool |
+| Port Used| 12125 |
+| Introduction | File Content Creation, Deletion, Modification and Query |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/firewalld |
+| Directory| mcp_center/servers/firewalld |
+| Port Used| 12130 |
+| Introduction | Firewalld Network Firewall Management Tool |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/iptable |
+| Directory| mcp_center/servers/iptable |
+| Port Used| 12131 |
+| Introduction | iptables Firewall Management Tool |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/docker |
+| Directory| mcp_center/servers/docker |
+| Port Used| 12133 |
+| Introduction | docker Tool |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/qemu |
+| Directory| mcp_center/servers/qemu |
+| Port Used| 12134 |
+| Introduction | Qemu Virtual Machine Management Tool |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/nmap |
+| Directory| mcp_center/servers/nmap |
+| Port Used| 12135 |
+| Introduction | Nmap IP Scanning Tool |
+
+| Category | Details |
+|----------|-------------------------------|
+| Name | servers/file_transfer |
+| Directory| mcp_center/servers/file_transfer |
+| Port Used| 12136 |
+| Introduction | File Transfer/Download |
+
+| Category | Details |
+|----------------|--------------------------------------------------|
+| Name | servers/systrace-mcpserver |
+| Directory | mcp_center/servers/systrace/systrace_mcp |
+| Port Occupied | 12145 |
+| Description | Start MCP Server Service |
+
+| Category | Details |
+|----------------|--------------------------------------------------|
+| Name | servers/systrace-openapi |
+| Directory | mcp_center/servers/systrace/systrace_mcp |
+| Port Occupied | 12146 |
+| Description | Start OpenAPI Server Service |
+
+| Category | Details |
+|----------------|--------------------------------------------------|
+| Name | servers/systrace-mcpserver |
+| Directory | mcp_center/servers/euler-copilot-tune |
+| Port Occupied | 12147 |
+| Description | Tuning MCP Service |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/ |
+| Directory | mcp_center/servers/lscpu |
+| Port Occupied | 12202 |
+| Description | Collects static information such as CPU architecture |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_topo |
+| Directory | mcp_center/servers/numa_topo |
+| Port Occupied | 12203 |
+| Description | Queries NUMA hardware topology and system configuration |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_bind_proc |
+| Directory | mcp_center/servers/numa_bind_proc |
+| Port Occupied | 12204 |
+| Description | Binds processes to specified NUMA nodes at startup |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_rebind_proc |
+| Directory | mcp_center/servers/numa_rebind_proc |
+| Port Occupied | 12205 |
+| Description | Modifies NUMA bindings of already started processes |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_bind_docker |
+| Directory | mcp_center/servers/numa_bind_docker |
+| Port Occupied | 12206 |
+| Description | Configure NUMA binding for Docker containers |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_perf_compare |
+| Directory | mcp_center/servers/numa_perf_compare |
+| Port Occupied | 12208 |
+| Description | Control test variables with NUMA binding |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_diagnose |
+| Directory | mcp_center/servers/numa_diagnose |
+| Port Occupied | 12209 |
+| Description | Locate hardware issues with NUMA binding |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numastat |
+| Directory | mcp_center/servers/numastat |
+| Port Occupied | 12210 |
+| Description | View the overall NUMA memory access status of the system |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_cross_node |
+| Directory | mcp_center/servers/numa_cross_node |
+| Port Occupied | 12211 |
+| Description | Identify processes with excessive cross-node memory access |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/numa_container |
+| Directory | mcp_center/servers/numa_container |
+| Port Occupied | 12214 |
+| Description | Monitor NUMA memory access in Docker containers |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/hotspot_trace |
+| Directory | mcp_center/servers/hotspot_trace |
+| Port Occupied | 12216 |
+| Description | Quickly locate CPU performance bottlenecks in systems/processes |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/cache_miss_audit |
+| Directory | mcp_center/servers/cache_miss_audit |
+| Port Occupied | 12217 |
+| Description | Identify performance losses due to CPU cache misses |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/func_timing_trace |
+| Directory | mcp_center/servers/func_timing_trace |
+| Port Occupied | 12218 |
+| Description | Accurately measure function execution time (including call stack) |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/strace_syscall |
+| Directory | mcp_center/servers/strace_syscall |
+| Port Occupied | 12219 |
+| Description | Investigate unreasonable system calls (high frequency / time-consuming) |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/perf_interrupt |
+| Directory | mcp_center/servers/perf_interrupt |
+| Port Occupied | 12220 |
+| Description | Locate CPU usage caused by high-frequency interrupts |
+
+| Category | Details |
+|----------|--------------------------|
+| Name | servers/flame_graph |
+| Directory | mcp_center/servers/flame_graph |
+| Port Occupied | 12222 |
+| Description | Flame graph generation: Visualize performance bottlenecks |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/free |
+| Directory| mcp_center/servers/free |
+| Port Used| 13100 |
+| Introduction | Obtain the overall status of system memory |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/vmstat |
+| Directory| mcp_center/servers/vmstat |
+| Port Used| 13101 |
+| Introduction | Collect information on system resource interaction bottlenecks |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/sar |
+| Directory| mcp_center/servers/sar |
+| Port Used| 13102 |
+| Introduction | System resource monitoring and fault diagnosis |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/sync |
+| Directory| mcp_center/servers/sync |
+| Port Used| 13103 |
+| Introduction | Write memory buffer data to disk |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/swapon |
+| Directory| mcp_center/servers/swapon |
+| Port Used| 13104 |
+| Introduction | Check the status of swap devices |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/swapoff |
+| Directory| mcp_center/servers/swapoff |
+| Port Used| 13105 |
+| Introduction | Disable swap devices |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/fallocate |
+| Directory| mcp_center/servers/fallocate|
+| Port Used| 13106 |
+| Introduction | Temporarily create and enable swap files |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/find |
+| Directory| mcp_center/servers/find |
+| Port Used| 13107 |
+| Introduction | File Search |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/touch |
+| Directory| mcp_center/servers/touch |
+| Port Used| 13108 |
+| Introduction | File Creation and Time Calibration |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/mkdir |
+| Directory| mcp_center/servers/mkdir |
+| Port Used| 13109 |
+| Introduction | Directory Creation |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/rm |
+| Directory| mcp_center/servers/rm |
+| Port Used| 13110 |
+| Introduction | File Deletion |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/mv |
+| Directory| mcp_center/servers/mv |
+| Port Used| 13111 |
+| Introduction | File move or rename |
+
+| Category | Details |
+|----------|-----------------------------|
+| Name | servers/ls |
+| Directory| mcp_center/servers/ls |
+| Port Used| 13112 |
+| Introduction | View directory contents |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | head |
+| Directory| mcp_center/servers/head |
+| Port Used | 13113 |
+| Introduction | File beginning content viewing tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | tail |
+| Directory| mcp_center/servers/tail |
+| Port Used | 13114 |
+| Introduction | File ending content viewing tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | cat |
+| Directory| mcp_center/servers/cat |
+| Port Used | 13115 |
+| Introduction | File content viewing tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | chown |
+| Directory| mcp_center/servers/chown |
+| Port Used | 13116 |
+| Introduction | File owner modification tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | chmod |
+| Directory| mcp_center/servers/chmod |
+| Port Used | 13117 |
+| Introduction | File permission modification tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | tar |
+| Directory| mcp_center/servers/tar |
+| Port Used | 13118 |
+| Introduction | File compression and decompression tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | zip |
+| Directory| mcp_center/servers/zip |
+| Port Used | 13119 |
+| Introduction | File compression and decompression tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | grep |
+| Directory| mcp_center/servers/grep |
+| Port Used | 13120 |
+| Introduction | File content search tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | sed |
+| Directory| mcp_center/servers/sed |
+| Port Used | 13121 |
+| Introduction | Text processing tool |
+
+| Category | Details |
+|----------|--------------------------------------|
+| Name | echo |
+| Directory| mcp_center/servers/echo |
+| Port Used | 13125 |
+| Introduction | Text writing tool |
diff --git a/mcp_center/README.md b/mcp_center/README.md
new file mode 100644
index 00000000..09767284
--- /dev/null
+++ b/mcp_center/README.md
@@ -0,0 +1,507 @@
+# mcp_center
+
+## 一、介绍
+mcp_center 用于构建 oe 智能助手,其目录结构如下:
+```
+├── client 测试用客户端
+├── config 公共和私有配置文件
+├── mcp_config mcp注册到框架的配置文件
+├── README.en.md 英文版本说明
+├── README.md 中文版本说明
+├── requiremenets.txt 整体的依赖
+├── run.sh 唤起mcp服务的脚本
+├── servers mcp server源码所在目录
+└── service mcp的.serivce文件所在目录
+```
+
+### 运行说明
+1. 运行 mcp server 前,需在 mcp_center 目录下执行:
+ ```
+ export PYTHONPATH=$(pwd)
+ ```
+2. 通过 Python 唤起 mcp server 进行测试
+3. 可通过 client 目录下的 client.py 对每个 mcp 工具进行测试,具体的 URL、工具名称和入参可自行调整
+
+
+## 二、新增 mcp 规则
+1. **创建服务源码目录**
+ 在 `mcp_center/servers` 目录下新建文件夹,示例(以 top mcp 为例):
+ ```
+ servers/top/
+ ├── README.en.md 英文版本的 mcp 服务详情描述
+ ├── README.md 中文版本的 mcp 服务详情描述
+ ├── requirements.txt 仅包含私有安装依赖(避免与公共依赖冲突)
+ └── src 源码目录(含 server 主入口)
+ └── server.py
+ ```
+
+2. **配置文件设置**
+ 在 `mcp_center/config/private` 目录下新建配置文件,示例(以 top mcp 为例):
+ ```
+ config/private/top
+ ├── config_loader.py 配置加载器(含公共配置和私有自定义配置)
+ └── config.toml 私有自定义配置
+ ```
+
+3. **文档更新**
+ 每新增一个 mcp,需在主目录的 README 中现有 mcp 板块同步新增该 mcp 的基本信息(确保端口不冲突,端口从 12100 开始)
+ 每新增一个 mcp,需要在主目录中的 service 中增加.service文件用于将mcp制作成服务
+ 每新增一个 mcp,需要在主目录中的 mcp_config 中新建对应名称的目录并在下面创建一个config.json(用于将mcp注册到框架)
+ 每新增一个 mcp,需要在主目录中的 run.sh 中增加一条命令用于唤起mcp服务
+4. **通用参数要求**
+ 每个 mcp 的工具都需要一个 host 作为入参,用于与远端服务器通信。
+
+5. **远程命令执行**
+ 可通过 `paramiko` 实现远程命令执行。
+
+
+## 三、现有的 MCP 服务
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/remote_info |
+| 目录 | mcp_center/servers/remote_info |
+| 占用端口 | 12100 |
+| 简介 | 获取端点信息 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/shell_generator |
+| 目录 | mcp_center/servers/shell_generator |
+| 占用端口 | 12101 |
+| 简介 | 生成&执行shell命令 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/top |
+| 目录 | mcp_center/servers/top |
+| 占用端口 | 12110 |
+| 简介 | 获取系统负载信息 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/kill |
+| 目录 | mcp_center/servers/kill |
+| 占用端口 | 12111 |
+| 简介 | 控制进程&查看进程信号量含义 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/nohup |
+| 目录 | mcp_center/servers/nohup |
+| 占用端口 | 12112 |
+| 简介 | 后台执行进程 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/strace |
+| 目录 | mcp_center/servers/strace |
+| 占用端口 | 12113 |
+| 简介 | 跟踪进程信息,可以用于异常情况分析 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/nvidia |
+| 目录 | mcp_center/servers/nvidia |
+| 占用端口 | 12114 |
+| 简介 | GPU负载信息查询 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/npu |
+| 目录 | mcp_center/servers/npu |
+| 占用端口 | 12115 |
+| 简介 | npu的查询和控制 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/iftop |
+| 目录 | mcp_center/servers/iftop |
+| 占用端口 | 12116 |
+| 简介 | 网络流量监控 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/nload |
+| 目录 | mcp_center/servers/nload |
+| 占用端口 | 12117 |
+| 简介 | Nload带宽监控 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/netstat |
+| 目录 | mcp_center/servers/netstat |
+| 占用端口 | 12118 |
+| 简介 | netstat网络连接监控 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/lsof |
+| 目录 | mcp_center/servers/lsof |
+| 占用端口 | 12119 |
+| 简介 | 快速排查文件占用冲突、网络连接异常及进程资源占用问题 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/ifconfig |
+| 目录 | mcp_center/servers/ifconfig |
+| 占用端口 | 12120 |
+| 简介 | ifconfig 网络接口信息监控 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/ethtool |
+| 目录 | mcp_center/servers/ethtool |
+| 占用端口 | 12121 |
+| 简介 | ethtool网卡信息查询,特性情况,网卡设置 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/tshark |
+| 目录 | mcp_center/servers/tshark |
+| 占用端口 | 12122 |
+| 简介 | 捕获、显示和分析网络流量 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/file_content_tool |
+| 目录 | mcp_center/servers/file_content_tool |
+| 占用端口 | 12125 |
+| 简介 | 文件内容增删改查 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/firewalld |
+| 目录 | mcp_center/servers/firewalld |
+| 占用端口 | 12130 |
+| 简介 | Firewalld网络防火墙管理工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/iptable |
+| 目录 | mcp_center/servers/iptable |
+| 占用端口 | 12131 |
+| 简介 | iptables防火墙管理工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/docker |
+| 目录 | mcp_center/servers/docker |
+| 占用端口 | 12133 |
+| 简介 | docker工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/qemu |
+| 目录 | mcp_center/servers/qemu |
+| 占用端口 | 12134 |
+| 简介 | Qemu虚拟机管理工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/nmap |
+| 目录 | mcp_center/servers/nmap |
+| 占用端口 | 12135 |
+| 简介 | Nmap扫描IP |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/file_transfer |
+| 目录 | mcp_center/servers/file_transfer |
+| 占用端口 | 12136 |
+| 简介 | 文件传输/下载 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/systrace-mcpserver |
+| 目录 | mcp_center/servers/systrace/systrace_mcp |
+| 占用端口 | 12145 |
+| 简介 | 开启MCP Server服务 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/ssystrace-openapi |
+| 目录 | mcp_center/servers/systrace/systrace_mcp |
+| 占用端口 | 12146 |
+| 简介 | 开启OpenAPI Server服务 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/systrace-mcpserver |
+| 目录 | mcp_center/servers/euler-copilot-tune |
+| 占用端口 | 12147 |
+| 简介 | 调优MCP服务 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/lscpu |
+| 目录 | mcp_center/servers/lscpu |
+| 占用端口 | 12202 |
+| 简介 | cpu架构等静态信息收集 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_topo |
+| 目录 | mcp_center/servers/numa_topo |
+| 占用端口 | 12203 |
+| 简介 | 查询 NUMA 硬件拓扑与系统配置 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_bind_proc |
+| 目录 | mcp_center/servers/numa_bind_proc |
+| 占用端口 | 12204 |
+| 简介 | 启动时绑定进程到指定 NUMA 节点 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_rebind_proc |
+| 目录 | mcp_center/servers/numa_rebind_proc |
+| 占用端口 | 12205 |
+| 简介 | 修改已启动进程的 NUMA 绑定 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_bind_docker |
+| 目录 | mcp_center/servers/numa_bind_docker |
+| 占用端口 | 12206 |
+| 简介 | 为 Docker 容器配置 NUMA 绑定 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_perf_compare |
+| 目录 | mcp_center/servers/numa_perf_compare |
+| 占用端口 | 12208 |
+| 简介 | 用 NUMA 绑定控制测试变量 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_diagnose |
+| 目录 | mcp_center/servers/numa_diagnose |
+| 占用端口 | 12209 |
+| 简介 | 用 NUMA 绑定定位硬件问题 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numastat |
+| 目录 | mcp_center/servers/numastat |
+| 占用端口 | 12210 |
+| 简介 | 查看系统整体 NUMA 内存访问状态 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_cross_node |
+| 目录 | mcp_center/servers/numa_cross_node |
+| 占用端口 | 12211 |
+| 简介 | 定位跨节点内存访问过高的进程 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/numa_container |
+| 目录 | mcp_center/servers/numa_container |
+| 占用端口 | 12214 |
+| 简介 | 监控 Docker 容器的 NUMA 内存访问 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/hotspot_trace |
+| 目录 | mcp_center/servers/hotspot_trace |
+| 占用端口 | 12216 |
+| 简介 | 快速定位系统 / 进程的 CPU 性能瓶颈 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/cache_miss_audit |
+| 目录 | mcp_center/servers/cache_miss_audit |
+| 占用端口 | 12217 |
+| 简介 | 定位 CPU 缓存失效导致的性能损耗 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/func_timing_trace |
+| 目录 | mcp_center/servers/func_timing_trace |
+| 占用端口 | 12218 |
+| 简介 | 精准测量函数执行时间(含调用栈) |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/strace_syscall |
+| 目录 | mcp_center/servers/strace_syscall |
+| 占用端口 | 12219 |
+| 简介 | 排查不合理的系统调用(高频 / 耗时) |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/perf_interrupt |
+| 目录 | mcp_center/servers/perf_interrupt |
+| 占用端口 | 12220 |
+| 简介 | 定位高频中断导致的 CPU 占用 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/flame_graph |
+| 目录 | mcp_center/servers/flame_graph |
+| 占用端口 | 12222 |
+| 简介 | 火焰图生成:可视化展示性能瓶颈 |
+
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/free |
+| 目录 | mcp_center/servers/free |
+| 占用端口 | 13100 |
+| 简介 | 获取系统内存整体状态 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/vmstat |
+| 目录 | mcp_center/servers/vmstat |
+| 占用端口 | 13101 |
+| 简介 | 系统资源交互瓶颈信息采集 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/sar |
+| 目录 | mcp_center/servers/sar |
+| 占用端口 | 13102 |
+| 简介 | 系统资源监控与故障诊断 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/sync |
+| 目录 | mcp_center/servers/sync |
+| 占用端口 | 13103 |
+| 简介 | 内存缓冲区数据写入磁盘 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/swapon |
+| 目录 | mcp_center/servers/swapon |
+| 占用端口 | 13104 |
+| 简介 | 查看swap设备状态 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/swapoff |
+| 目录 | mcp_center/servers/swapoff |
+| 占用端口 | 13105 |
+| 简介 | swap设备停用 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/fallocate |
+| 目录 | mcp_center/servers/fallocate |
+| 占用端口 | 13106 |
+| 简介 | 临时创建并启用swap文件 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/find |
+| 目录 | mcp_center/servers/find |
+| 占用端口 | 13107 |
+| 简介 | 文件查找 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/touch |
+| 目录 | mcp_center/servers/touch |
+| 占用端口 | 13108 |
+| 简介 | 文件创建与时间校准 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/mkdir |
+| 目录 | mcp_center/servers/mkdir |
+| 占用端口 | 13109 |
+| 简介 | 文件夹创建 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/rm |
+| 目录 | mcp_center/servers/rm |
+| 占用端口 | 13110 |
+| 简介 | 文件删除 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/mv |
+| 目录 | mcp_center/servers/mv |
+| 占用端口 | 13111 |
+| 简介 | 文件移动或重命名 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | servers/ls |
+| 目录 | mcp_center/servers/ls |
+| 占用端口 | 13112 |
+| 简介 | 查看目录内容 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | head |
+| 目录 | mcp_center/servers/head |
+| 占用端口 | 13113 |
+| 简介 | 文件开头内容查看工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | tail |
+| 目录 | mcp_center/servers/tail |
+| 占用端口 | 13114 |
+| 简介 | 文件末尾内容查看工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | cat |
+| 目录 | mcp_center/servers/cat |
+| 占用端口 | 13115 |
+| 简介 | 文件内容查看工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | chown |
+| 目录 | mcp_center/servers/chown |
+| 占用端口 | 13116 |
+| 简介 | 文件所有者修改工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | chmod |
+| 目录 | mcp_center/servers/chmod |
+| 占用端口 | 13117 |
+| 简介 | 文件权限修改工具 |
+
+| 类别 | 详情 |
+|--------|------------------------|
+| 名称 | tar |
+| 目录 | mcp_center/servers/tar |
+| 占用端口 | 13118 |
+| 简介 | 文件压缩解压工具 |
+
+| 类别 | 详情 |
+|--------|------------------------|
+| 名称 | zip |
+| 目录 | mcp_center/servers/zip |
+| 占用端口 | 13119 |
+| 简介 | 文件压缩解压工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | grep |
+| 目录 | mcp_center/servers/grep |
+| 占用端口 | 13120 |
+| 简介 | 文件内容搜索工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | sed |
+| 目录 | mcp_center/servers/sed |
+| 占用端口 | 13121 |
+| 简介 | 文本处理工具 |
+
+| 类别 | 详情 |
+|--------|--------------------------|
+| 名称 | echo |
+| 目录 | mcp_center/servers/echo |
+| 占用端口 | 13125 |
+| 简介 | 文本写入工具 |
\ No newline at end of file
diff --git a/mcp_center/mcp_config/change.py b/mcp_center/mcp_config/change.py
new file mode 100644
index 00000000..cec514e9
--- /dev/null
+++ b/mcp_center/mcp_config/change.py
@@ -0,0 +1,67 @@
+import json
+import toml
+
+def json_to_toml(json_data, toml_file_path, top_level_key="data"):
+ """
+ 将JSON数据转换为TOML格式并写入文件
+
+ 参数:
+ json_data: JSON数据,可以是字典、列表或JSON字符串
+ toml_file_path: 输出的TOML文件路径
+ top_level_key: 当输入为列表时,用于包装列表的顶级键名
+ """
+ try:
+ # 如果输入是JSON字符串,则先解析为Python对象
+ if isinstance(json_data, str):
+ data = json.loads(json_data)
+ else:
+ data = json_data
+
+ # TOML不支持顶级列表,需要包装在字典中
+ if isinstance(data, list):
+ data = {top_level_key: data}
+
+ # 将数据转换为TOML格式并写入文件
+ with open(toml_file_path, 'w', encoding='utf-8') as f:
+ toml.dump(data, f)
+
+ print(f"成功将JSON数据转换为TOML并写入文件: {toml_file_path}")
+ return True
+
+ except json.JSONDecodeError as e:
+ print(f"JSON解析错误: {e}")
+ except Exception as e:
+ print(f"转换过程中发生错误: {e}")
+ return False
+
+if __name__ == "__main__":
+ # 示例JSON数据(列表形式)
+ sample_json = [
+ {
+ "appType":"agent",
+ "name":"hce运维助手",
+ "description":"hce运维助手,用于诊断hce环境和执行shell命令",
+ "mcpPath":[
+ "remote_info_mcp",
+ "shell_generator_mcp"
+ ],
+ "published":True
+ }
+ ]
+
+ # 转换并写入TOML文件
+ # 对于列表数据,指定一个顶级键名(如"applications")使其符合TOML格式要求
+ json_to_toml(sample_json, "mcp_to_app_config.toml", "applications")
+
+ # 测试字典类型的JSON数据
+ dict_json = {
+ "name": "测试",
+ "version": "1.0.0",
+ "features": ["简单", "易用"]
+ }
+ json_to_toml(dict_json, "from_dict.toml")
+
+ # 测试JSON字符串
+ json_str = '{"name": "字符串测试", "version": "2.0.0"}'
+ json_to_toml(json_str, "from_string.toml")
+
\ No newline at end of file
diff --git a/mcp_center/mcp_config/mcp_server_mcp/config.json b/mcp_center/mcp_config/mcp_server_mcp/config.json
new file mode 100644
index 00000000..0e884824
--- /dev/null
+++ b/mcp_center/mcp_config/mcp_server_mcp/config.json
@@ -0,0 +1,9 @@
+{
+ "name": "oe-智能运维工具",
+ "overview": "文件管理,文件操作,软件包管理,系统信息查询,进程管理,网络修复,ssh修复,cmd执行",
+ "description": "文件管理,文件操作,软件包管理,系统信息查询,进程管理,网络修复,ssh修复,cmd执行",
+ "mcpType": "sse",
+ "config": {
+ "url": "http://127.0.0.1:12555/sse"
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/mcp_config/mcp_to_app_config.toml b/mcp_center/mcp_config/mcp_to_app_config.toml
new file mode 100644
index 00000000..ad0d5df7
--- /dev/null
+++ b/mcp_center/mcp_config/mcp_to_app_config.toml
@@ -0,0 +1,11 @@
+[[applications]]
+appType = "agent"
+name = "OE-智能运维助手"
+description = "提供通用文件管理,软件包管理,系统信息查询,进程管理,知识库系统,运维场景修复,cmd执行"
+mcpPath = [
+ "mcp_server_mcp",
+ "rag_mcp"
+]
+published = true
+
+
diff --git a/mcp_center/mcp_config/rag_mcp/config.json b/mcp_center/mcp_config/rag_mcp/config.json
new file mode 100644
index 00000000..48941de5
--- /dev/null
+++ b/mcp_center/mcp_config/rag_mcp/config.json
@@ -0,0 +1,9 @@
+{
+ "name": "轻量化知识库",
+ "overview": "轻量化知识库",
+ "description": "基于 SQLite 的检索增强生成(RAG)知识库,提供知识库全生命周期管理。支持 TXT、DOCX、DOC、PDF 格式,采用 FTS5 全文检索与 sqlite-vec 向量检索的混合搜索策略,结合关键词与语义检索,提升检索准确性。支持异步批量向量化处理、多知识库管理、文档导入导出,并提供命令行工具与 MCP 服务接口,适配中英文环境,适用于轻量级知识库构建与智能检索场景。",
+ "mcpType": "sse",
+ "config": {
+ "url": "http://127.0.0.1:12311/sse"
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/requiremenets.txt b/mcp_center/requiremenets.txt
new file mode 100644
index 00000000..f7e4ad52
--- /dev/null
+++ b/mcp_center/requiremenets.txt
@@ -0,0 +1,5 @@
+paramiko==4.0.0
+psutil==7.0.0
+toml == 0.10.2
+mcp == 1.9.4
+scp == 0.15.0
\ No newline at end of file
diff --git a/mcp_center/run.sh b/mcp_center/run.sh
new file mode 100755
index 00000000..53f9def0
--- /dev/null
+++ b/mcp_center/run.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+SERVICE_DIR="/usr/lib/euler-copilot-framework/mcp_center/service"
+SYSTEMD_TARGET_DIR="/etc/systemd/system"
+
+# 添加可执行权限并运行 oe_cli_mcp_server 脚本
+chmod +x /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/run.sh
+if ! /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/run.sh; then
+ echo "错误: oe_cli_mcp_server/run.sh 执行失败,退出码: $?" >&2
+fi
+
+# 添加可执行权限并运行 rag 脚本
+chmod +x /usr/lib/euler-copilot-framework/mcp_center/servers/rag/run.sh
+if ! /usr/lib/euler-copilot-framework/mcp_center/servers/rag/run.sh; then
+ echo "错误: rag/run.sh 执行失败,退出码: $?" >&2
+fi
+
+systemctl daemon-reload
+
+for service_file in "$SERVICE_DIR"/*.service; do
+ # 只保留「文件存在」的核心判断,其他验证全删
+ if [ -f "$service_file" ]; then
+ service_name=$(basename "$service_file" .service)
+ dest_service="$SYSTEMD_TARGET_DIR/$service_name.service"
+ echo "正在载入service: $dest_service"
+ # 直接复制(-f 强制覆盖已存在的文件,-a 保留权限)
+ cp -af "$service_file" "$dest_service"
+
+ # 原有的启用和启动命令
+ systemctl enable "$service_name"
+ systemctl start "$service_name"
+ fi
+done
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/.gitignore b/mcp_center/servers/oe_cli_mcp_server/.gitignore
new file mode 100644
index 00000000..e2b10898
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/.gitignore
@@ -0,0 +1,17 @@
+# 虚拟环境(本地开发环境,无需提交)
+venv/
+env/
+ENV/
+.venv/
+virtualenv/
+*_venv/ # 匹配自定义虚拟环境名称(如 target_venv)
+
+# 编译产物
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+
+test_tool
+data/tool_state.json
diff --git a/mcp_center/servers/oe_cli_mcp_server/README.en.md b/mcp_center/servers/oe_cli_mcp_server/README.en.md
new file mode 100644
index 00000000..46d9216f
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/README.en.md
@@ -0,0 +1,36 @@
+# oe-cli-mcp-server
+
+#### Description
+{**When you're done, you can delete the content in this README and update the file with details for others getting started with your repository**}
+
+#### Software Architecture
+Software architecture description
+
+#### Installation
+
+1. xxxx
+2. xxxx
+3. xxxx
+
+#### Instructions
+
+1. xxxx
+2. xxxx
+3. xxxx
+
+#### Contribution
+
+1. Fork the repository
+2. Create Feat_xxx branch
+3. Commit your code
+4. Create Pull Request
+
+
+#### Gitee Feature
+
+1. You can use Readme\_XXX.md to support different languages, such as Readme\_en.md, Readme\_zh.md
+2. Gitee blog [blog.gitee.com](https://blog.gitee.com)
+3. Explore open source project [https://gitee.com/explore](https://gitee.com/explore)
+4. The most valuable open source project [GVP](https://gitee.com/gvp)
+5. The manual of Gitee [https://gitee.com/help](https://gitee.com/help)
+6. The most popular members [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
diff --git a/mcp_center/servers/oe_cli_mcp_server/README.md b/mcp_center/servers/oe_cli_mcp_server/README.md
new file mode 100644
index 00000000..4dc845de
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/README.md
@@ -0,0 +1,39 @@
+# oe-cli-mcp-server
+
+#### 介绍
+{**以下是 Gitee 平台说明,您可以替换此简介**
+Gitee 是 OSCHINA 推出的基于 Git 的代码托管平台(同时支持 SVN)。专为开发者提供稳定、高效、安全的云端软件开发协作平台
+无论是个人、团队、或是企业,都能够用 Gitee 实现代码托管、项目管理、协作开发。企业项目请看 [https://gitee.com/enterprises](https://gitee.com/enterprises)}
+
+#### 软件架构
+软件架构说明
+
+
+#### 安装教程
+
+1. xxxx
+2. xxxx
+3. xxxx
+
+#### 使用说明
+
+1. export PYTHONPATH=/usr/lib/euler-copilot-framework/mcp_center/servers/oe-cli-mcp-server/:$PYTHONPATH
+2. xxxx
+3. xxxx
+
+#### 参与贡献
+
+1. Fork 本仓库
+2. 新建 Feat_xxx 分支
+3. 提交代码
+4. 新建 Pull Request
+
+
+#### 特技
+
+1. 使用 Readme\_XXX.md 来支持不同的语言,例如 Readme\_en.md, Readme\_zh.md
+2. Gitee 官方博客 [blog.gitee.com](https://blog.gitee.com)
+3. 你可以 [https://gitee.com/explore](https://gitee.com/explore) 这个地址来了解 Gitee 上的优秀开源项目
+4. [GVP](https://gitee.com/gvp) 全称是 Gitee 最有价值开源项目,是综合评定出的优秀开源项目
+5. Gitee 官方提供的使用手册 [https://gitee.com/help](https://gitee.com/help)
+6. Gitee 封面人物是一档用来展示 Gitee 会员风采的栏目 [https://gitee.com/gitee-stars/](https://gitee.com/gitee-stars/)
diff --git a/mcp_center/servers/oe_cli_mcp_server/client/client.py b/mcp_center/servers/oe_cli_mcp_server/client/client.py
new file mode 100644
index 00000000..6f6c563e
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/client/client.py
@@ -0,0 +1,384 @@
+# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
+"""MCP Client"""
+
+import asyncio
+import logging
+import time
+from contextlib import AsyncExitStack
+from typing import TYPE_CHECKING, Union
+from pydantic import BaseModel, Field
+from enum import Enum
+from mcp import ClientSession
+from mcp.client.sse import sse_client
+from mcp.client.stdio import stdio_client
+
+
+logger = logging.getLogger(__name__)
+
+
+class MCPStatus(str, Enum):
+ """MCP状态枚举"""
+ UNINITIALIZED = "UNINITIALIZED"
+ RUNNING = "RUNNING"
+ STOPPED = "STOPPED"
+ ERROR = "ERROR"
+
+
+class MCPClient:
+ """MCP客户端基类"""
+
+ def __init__(self, url: str, headers: dict[str, str]) -> None:
+ """初始化MCP Client"""
+ self.url = url
+ self.headers = headers
+ self.client: Union[ClientSession, None] = None
+ self.status = MCPStatus.UNINITIALIZED
+
+ async def _main_loop(
+ self
+ ) -> None:
+ """
+ 创建MCP Client
+
+ 抽象函数;作用为在初始化的时候使用MCP SDK创建Client
+ 由于目前MCP的实现中Client和Session是1:1的关系,所以直接创建了 :class:`~mcp.ClientSession`
+ """
+ # 创建Client
+ try:
+ client = sse_client(
+ url=self.url,
+ headers=self.headers
+ )
+ except Exception as e:
+ self.error_sign.set()
+ err = f"创建Client失败,错误信息:{e}"
+ print(err)
+ raise Exception(err)
+ # 创建Client、Session
+ try:
+ exit_stack = AsyncExitStack()
+ read, write = await exit_stack.enter_async_context(client)
+ self.client = ClientSession(read, write)
+ session = await exit_stack.enter_async_context(self.client)
+ # 初始化Client
+ await session.initialize()
+ except Exception:
+ self.error_sign.set()
+ self.status = MCPStatus.STOPPED
+ err = f"初始化Client失败,错误信息:{e}"
+ print(err)
+ raise
+
+ self.ready_sign.set()
+ self.status = MCPStatus.RUNNING
+ # 等待关闭信号
+ await self.stop_sign.wait()
+
+ # 关闭Client
+ try:
+ await exit_stack.aclose() # type: ignore[attr-defined]
+ self.status = MCPStatus.STOPPED
+ except Exception:
+ print(f"关闭Client失败,错误信息:{e}")
+
+ async def init(self) -> None:
+ """
+ 初始化 MCP Client类
+ :return: None
+ """
+ # 初始化变量
+ self.ready_sign = asyncio.Event()
+ self.error_sign = asyncio.Event()
+ self.stop_sign = asyncio.Event()
+
+ # 创建协程
+ self.task = asyncio.create_task(self._main_loop())
+
+ # 等待初始化完成
+ done, pending = await asyncio.wait(
+ [asyncio.create_task(self.ready_sign.wait()),
+ asyncio.create_task(self.error_sign.wait())],
+ return_when=asyncio.FIRST_COMPLETED
+ )
+ if self.error_sign.is_set():
+ self.status = MCPStatus.ERROR
+ print("MCP Client 初始化失败")
+ raise Exception("MCP Client 初始化失败")
+
+ async def call_tool(self, tool_name: str, params: dict) -> "CallToolResult":
+ """调用MCP Server的工具"""
+ return await self.client.call_tool(tool_name, params)
+
+ async def stop(self) -> None:
+ """停止MCP Client"""
+ self.stop_sign.set()
+ try:
+ await self.task
+ except Exception as e:
+ err = f"关闭MCP Client失败,错误信息:{e}"
+ print(err)
+
+
+async def main() -> None:
+ """测试MCP Client"""
+ url = "http://0.0.0.0:12555/sse"
+ headers = {}
+ client = MCPClient(url, headers)
+ await client.init()
+
+ # # 初始化时多余的调用移除,保留下方有序测试用例
+ # # ==================================
+ # # 1. sys_info_tool 测试用例(3个,修复无效枚举值)
+ # # ==================================
+ # print("\n" + "="*60)
+ # print("1. sys_info_tool - 采集CPU+内存+磁盘+系统信息")
+ # print("="*60)
+ # result = await client.call_tool("sys_info_tool", {"info_types": ["cpu", "mem", "disk", "os"]})
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("2. sys_info_tool - 单独采集网络信息(IP/网卡)")
+ # print("="*60)
+ # result = await client.call_tool("sys_info_tool", {"info_types": ["net"]})
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("3. sys_info_tool - 采集安全信息(SELinux+防火墙)")
+ # print("="*60)
+ # result = await client.call_tool("sys_info_tool", {"info_types": ["selinux", "firewall"]})
+ # print(result)
+ #
+ # # 移除无效的 "kernel" 和 "all" 类型测试(工具不支持)
+ #
+ # # ==================================
+ # # 2. file_tool 测试用例(4个,修复枚举值、参数名)
+ # # ==================================
+ # print("\n" + "="*60)
+ # print("4. file_tool - 列出 /etc 目录下的 .conf 配置文件(过滤关键词)")
+ # print("="*60)
+ # # 用 ls + 后续过滤实现(工具无find枚举,参数名改为file_path)
+ # result = await client.call_tool("file_tool", {
+ # "action": "ls",
+ # "file_path": "/etc",
+ # "detail": False,
+ # "encoding": "utf-8"
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("5. file_tool - 读取 /etc/os-release 文件内容(系统版本)")
+ # print("="*60)
+ # # action改为cat,参数名改为file_path
+ # result = await client.call_tool("file_tool", {
+ # "action": "cat",
+ # "file_path": "/etc/os-release",
+ # "encoding": "utf-8"
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("6. file_tool - 新建临时文件并写入内容")
+ # print("="*60)
+ # # 工具无find/mtime枚举,替换为add+edit实用场景
+ # result = await client.call_tool("file_tool", {
+ # "action": "add",
+ # "file_path": "/tmp/file_tool_test.txt",
+ # "overwrite": True
+ # })
+ # print("新建文件结果:", result)
+ # result = await client.call_tool("file_tool", {
+ # "action": "edit",
+ # "file_path": "/tmp/file_tool_test.txt",
+ # "content": "file_tool测试内容\n系统版本:Ubuntu 22.04",
+ # "encoding": "utf-8"
+ # })
+ # print("写入内容结果:", result)
+ #
+ # print("\n" + "="*60)
+ # print("7. file_tool - 修改 /tmp/file_tool_test.txt 权限为755")
+ # print("="*60)
+ # # action改为chmod,参数名改为file_path
+ # result = await client.call_tool("file_tool", {
+ # "action": "chmod",
+ # "file_path": "/tmp/file_tool_test.txt",
+ # "mode": "755"
+ # })
+ # print(result)
+ #
+ # # ==================================
+ # # 3. pkg_tool 测试用例(4个,修复无效枚举、参数)
+ # # ==================================
+ # print("\n" + "="*60)
+ # print("8. pkg_tool - 列出已安装的所有 nginx 相关包")
+ # print("="*60)
+ # result = await client.call_tool("pkg_tool", {
+ # "action": "list",
+ # "filter_key": "nginx"
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("9. pkg_tool - 查询 openssh-server 包详情(版本/依赖)")
+ # print("="*60)
+ # result = await client.call_tool("pkg_tool", {
+ # "action": "info",
+ # "pkg_name": "openssh-server"
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("10. pkg_tool - 安装 nginx 包 + 验证安装结果")
+ # print("="*60)
+ #
+ # # 步骤1:安装 nginx 包(双系统兼容,自动适配 apt/dnf)
+ # print("正在安装 nginx 包...")
+ # install_result = await client.call_tool("pkg_tool", {
+ # "action": "install", # 安装动作(双系统兼容)
+ # "pkg_name": "nginx", # 要安装的包名
+ # "yes": True # 自动确认安装(避免交互)
+ # })
+ # print("安装执行结果:")
+ # print(install_result)
+ #
+ # # 步骤2:验证安装结果(用 list 方法过滤 nginx 相关包)
+ # print("\n" + "-"*40)
+ # print("验证:查询已安装的 nginx 相关包")
+ # print("-"*40)
+ # verify_result = await client.call_tool("pkg_tool", {
+ # "action": "list", # 列出已安装包
+ # "filter_key": "nginx" # 过滤关键词(只显示 nginx 相关)
+ # })
+ # print("验证结果:")
+ # print(verify_result)
+ #
+ # print("\n" + "="*60)
+ # print("11. pkg_tool - 清理 yum/dnf 包缓存(all类型)")
+ # print("="*60)
+ # result = await client.call_tool("pkg_tool", {
+ # "action": "clean",
+ # "cache_type": "all",
+ # "yes": True
+ # })
+ # print(result)
+ #
+ # # ==================================
+ # # 4. proc_tool 测试用例(4个,修复无效枚举、参数)
+ # # ==================================
+ # print("\n" + "="*60)
+ # print("12. proc_tool - 查找所有 systemd 相关进程")
+ # print("="*60)
+ # result = await client.call_tool("proc_tool", {
+ # "proc_actions": ["find"],
+ # "proc_name": "systemd"
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("13. proc_tool - 查询 PID=1 进程(systemd)资源占用")
+ # print("="*60)
+ # result = await client.call_tool("proc_tool", {
+ # "proc_actions": ["stat"],
+ # "pid": 1
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("14. proc_tool - 列出所有进程(后续可筛选CPU占用前5)")
+ # print("="*60)
+ # # 工具无top枚举,用list获取所有进程(业务层可筛选)
+ # result = await client.call_tool("proc_tool", {
+ # "proc_actions": ["list"]
+ # })
+ # print(result)
+ #
+ # print("\n" + "="*60)
+ # print("15. proc_tool - 重启 sshd 服务(systemd服务)")
+ # print("="*60)
+ # # 工具无tree枚举,替换为restart实用场景
+ # result = await client.call_tool("proc_tool", {
+ # "proc_actions": ["restart"],
+ # "service_name": "sshd" # openEuler中sshd服务名为ssh
+ # })
+ # print(result)
+ #
+ # # 清理临时文件
+ # print("\n" + "="*60)
+ # print("16. file_tool - 删除临时测试文件")
+ # print("="*60)
+ # result = await client.call_tool("file_tool", {
+ # "action": "delete",
+ # "file_path": "/tmp/file_tool_test.txt"
+ # })
+ # print(result)
+ # ==================================
+ # 5. cmd_executor_tool 测试用例(4个,修复无效枚举、参数)
+ # ==================================
+ # 场景1:执行普通ls命令(基础功能验证)
+ print("\n" + "="*60)
+ print("场景1:cmd_executor_tool - 执行本地ls命令(查看/tmp目录)")
+ print("="*60)
+ result = await client.call_tool("cmd_executor_tool", {
+ "command": "ls /tmp"
+ })
+ print(f"执行结果:{result}")
+
+ # 场景2:重点验证超时终止能力(sleep 10秒,设置超时5秒)
+ print("\n" + "="*60)
+ print("场景2:cmd_executor_tool - 验证超时终止能力(sleep 10秒,超时5秒)")
+ print("="*60)
+ start_time = time.time() # 记录命令开始执行时间
+ print(f"命令开始执行时间戳:{start_time:.2f}(当前时间:{time.ctime(start_time)})")
+ # 执行sleep 10,超时设置为5秒
+ result = await client.call_tool("cmd_executor_tool", {
+ "command": "sleep 10", # 命令需要执行10秒
+ "timeout": 5 # 超时时间仅5秒,会触发超时终止
+ })
+ end_time = time.time() # 记录命令执行结束时间
+ print(f"命令执行结束时间戳:{end_time:.2f}(当前时间:{time.ctime(end_time)})")
+ print(f"实际执行时长:{end_time - start_time:.2f}秒(预期超时时间:5秒)")
+ print(f"超时终止结果:{result}")
+
+ # 场景3:验证Shell脚本的超时终止(脚本内sleep 8秒,设置超时4秒)
+ print("\n" + "="*60)
+ print("场景3:cmd_executor_tool - 验证Shell脚本的超时终止(脚本内sleep 8秒,超时4秒)")
+ print("="*60)
+ # 第一步:创建一个包含sleep的测试脚本
+ create_script_result = await client.call_tool("cmd_executor_tool", {
+ "command": "echo 'echo \"脚本开始执行,将sleep 8秒...\"; sleep 8; echo \"脚本执行完成\"' > /tmp/timeout_test.sh && chmod +x /tmp/timeout_test.sh",
+ "timeout": 10
+ })
+ print(f"创建超时测试脚本结果:{create_script_result}")
+ # 第二步:执行脚本,设置超时4秒(远小于脚本内的8秒)
+ start_time = time.time()
+ print(f"脚本开始执行时间戳:{start_time:.2f}(当前时间:{time.ctime(start_time)})")
+ result = await client.call_tool("cmd_executor_tool", {
+ "command": "/tmp/timeout_test.sh",
+ "timeout": 4 # 超时4秒,触发脚本执行超时终止
+ })
+ end_time = time.time()
+ print(f"脚本执行结束时间戳:{end_time:.2f}(当前时间:{time.ctime(end_time)})")
+ print(f"实际执行时长:{end_time - start_time:.2f}秒(预期超时时间:4秒)")
+ print(f"脚本超时终止结果:{result}")
+
+ # 场景4:空命令测试(参数校验验证)
+ print("\n" + "="*60)
+ print("场景4:cmd_executor_tool - 空命令测试(验证参数校验)")
+ print("="*60)
+ result = await client.call_tool("cmd_executor_tool", {
+ "command": ""
+ })
+ print(f"执行结果:{result}")
+
+ # 场景5:清理测试文件
+ print("\n" + "="*60)
+ print("场景5:cmd_executor_tool - 清理测试脚本(/tmp/timeout_test.sh)")
+ print("="*60)
+ result = await client.call_tool("cmd_executor_tool", {
+ "command": "rm -f /tmp/timeout_test.sh",
+ "timeout": 5
+ })
+ print(f"清理结果:{result}")
+ await client.stop()
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/mcp_center/servers/oe_cli_mcp_server/client/client_1.py b/mcp_center/servers/oe_cli_mcp_server/client/client_1.py
new file mode 100644
index 00000000..6a89aa08
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/client/client_1.py
@@ -0,0 +1,65 @@
+# Copyright (c) Huawei Technologies Co., Ltd. 2023-2025. All rights reserved.
+"""MCP Client"""
+
+import asyncio
+import logging
+from contextlib import AsyncExitStack
+from typing import TYPE_CHECKING, Union
+from pydantic import BaseModel, Field
+from enum import Enum
+from mcp import ClientSession
+from mcp.client.sse import sse_client
+from mcp.client.stdio import stdio_client
+
+from mcp_center.servers.oe_cli_mcp_server.client.client import MCPClient
+
+logger = logging.getLogger(__name__)
+
+
+async def main() -> None:
+ """测试MCP Client"""
+ url = "http://0.0.0.0:12555/sse"
+ headers = {}
+ client = MCPClient(url, headers)
+ await client.init()
+ # ===================== 核心配置 =====================
+ target_count = 100 # 要执行的总次数(可改为任意N)
+ total_time = 1.0 # 总时长(1秒)
+ # ====================================================
+
+ print(f"\n开始在 {total_time} 秒内执行 {target_count} 次 sys_info_tool 调用...")
+ print("="*60)
+
+ # 定义单个调用任务(复用你的调用逻辑)
+ async def tool_call_task():
+ return await client.call_tool(
+ "sys_info_tool",
+ {"info_types": ["cpu", "mem", "disk", "os"]}
+ )
+
+ # 1. 均匀间隔启动所有任务(确保1秒内触发完)
+ tasks = []
+ interval = total_time / target_count # 每个任务的启动间隔
+ for _ in range(target_count):
+ # 启动任务并加入列表(并发执行)
+ task = asyncio.create_task(tool_call_task())
+ tasks.append(task)
+ # 间隔一段时间启动下一个,确保1秒内刚好启动完所有任务
+ await asyncio.sleep(interval)
+
+ # 2. 等待所有任务执行完成,并收集结果
+ results = await asyncio.gather(*tasks)
+
+ # 输出结果(可选:按需打印,也可以只返回results)
+ print(f"\n{target_count} 次调用全部完成!")
+ print("="*60)
+ # 打印前3个结果示例(避免输出过多)
+ for i in range(min(3, target_count)):
+ print(f"第 {i+1} 次结果:{results[i]}")
+ if target_count > 3:
+ print(f"... 省略剩余 {target_count - 3} 次结果 ...")
+
+ await client.stop()
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/mcp_center/servers/oe_cli_mcp_server/client/mult_client_test.py b/mcp_center/servers/oe_cli_mcp_server/client/mult_client_test.py
new file mode 100644
index 00000000..dcf874f9
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/client/mult_client_test.py
@@ -0,0 +1,79 @@
+import asyncio
+import time
+
+from mcp_center.servers.oe_cli_mcp_server.client.client import MCPClient
+
+# 替换为你的 MCPClient 真实导入路径
+
+
+# 压测基础配置
+CLIENT_NUM = 10 # 10个客户端
+PRESSURE_TIME = 600 # 压测10分钟(600秒)
+MCP_URL = "http://0.0.0.0:12555/sse"
+MCP_HEADERS = {}
+TOOL_PARAMS = {"info_types": ["cpu", "mem", "disk", "os"]}
+
+# 单个客户端初始化
+async def init_single_client(client_id):
+ """初始化1个客户端并返回实例"""
+ try:
+ client = MCPClient(MCP_URL, MCP_HEADERS)
+ await client.init()
+ print(f"客户端{client_id} 初始化成功")
+ return client
+ except Exception as e:
+ print(f"客户端{client_id} 初始化失败:{e}")
+ return None
+
+# 单个客户端压测任务(持续发请求)
+async def single_client_pressure(client_id, client):
+ """1个客户端持续压测,直到达到时长"""
+ if not client:
+ return
+ start_time = time.time()
+ req_count = 0 # 记录该客户端请求次数
+ print(f"客户端{client_id} 开始压测")
+
+ # 持续压测:直到超过设定时间
+ while time.time() - start_time < PRESSURE_TIME:
+ try:
+ # 发送工具调用请求(串行,适配客户端单连接)
+ await client.call_tool("sys_info_tool", TOOL_PARAMS)
+ req_count += 1
+ except Exception as e:
+ print(f"客户端{client_id} 请求失败:{e}")
+ # 可选:若MCP扛不住,加微小间隔(如0.001秒),默认无间隔
+ # await asyncio.sleep(0.001)
+
+ # 单个客户端压测结束
+ print(f"客户端{client_id} 压测结束,总请求数:{req_count}")
+
+# 主逻辑:初始化10个客户端+并发压测
+async def main():
+ print(f"开始初始化 {CLIENT_NUM} 个客户端...")
+
+ # 1. 批量初始化10个客户端
+ clients = []
+ for i in range(1, CLIENT_NUM + 1):
+ client = await init_single_client(i)
+ if client:
+ clients.append((i, client))
+
+ # 若客户端初始化失败,提示并退出
+ if len(clients) < CLIENT_NUM:
+ print(f"仅 {len(clients)} 个客户端初始化成功,开始压测...")
+ else:
+ print(f"{CLIENT_NUM} 个客户端全部初始化成功,开始压测(持续{int(PRESSURE_TIME/60)}分钟)...")
+
+ # 2. 启动所有客户端的压测任务(并发执行)
+ pressure_tasks = []
+ for client_id, client in clients:
+ task = asyncio.create_task(single_client_pressure(client_id, client))
+ pressure_tasks.append(task)
+
+ # 3. 等待所有压测任务完成(直到10分钟)
+ await asyncio.gather(*pressure_tasks)
+ print("所有客户端压测结束!")
+
+if __name__ == "__main__":
+ asyncio.run(main())
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp-server.service b/mcp_center/servers/oe_cli_mcp_server/mcp-server.service
new file mode 100644
index 00000000..16056ebe
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp-server.service
@@ -0,0 +1,29 @@
+[Unit]
+Description=MCP Tool Registration Service
+After=network.target
+After=multi-user.target
+# 新增:确保文件系统就绪(避免工作目录未挂载)
+RequiresMountsFor=/usr/lib/euler-copilot-framework/mcp_center/
+
+[Service]
+User=root
+Group=root
+# 工作目录:必须是项目根目录(已正确配置,确保 server.py 中相对路径生效)
+WorkingDirectory=/usr/lib/euler-copilot-framework/mcp_center/
+
+Environment="PATH=/usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/venv/global/bin:$PATH"
+
+ExecStart=/usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/venv/global/bin/python /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/server.py
+
+# 原有合理配置保留
+Restart=always
+RestartSec=5
+KillMode=control-group
+Environment="LANGUAGE=zh"
+Environment="LOG_LEVEL=INFO"
+StandardOutput=journal+console
+StandardError=journal+console
+
+
+[Install]
+WantedBy=multi-user.target
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/MCP_TOOLS.json b/mcp_center/servers/oe_cli_mcp_server/mcp_server/MCP_TOOLS.json
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py
new file mode 100755
index 00000000..dc0a65fc
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py
@@ -0,0 +1,50 @@
+#!/usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/venv/global/bin/python3
+import logging
+import sys
+with open("/etc/systemd/system/mcp-server.service", "r") as f:
+ for line in f:
+ if line.strip().startswith("WorkingDirectory="):
+ PROJECT_ROOT = line.strip().split("=", 1)[1]
+ break
+
+# 加入 sys.path
+sys.path.insert(0, PROJECT_ROOT)
+from servers.oe_cli_mcp_server.mcp_server.cli.parse_args import parse_args
+from servers.oe_cli_mcp_server.mcp_server.cli.handle import (
+ handle_add, handle_remove, handle_tool, handle_init,
+ handle_start, handle_log, handle_llm, handle_config, handle_stop,handle_restart
+)
+
+# 日志极简配置
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+
+def main():
+ args = parse_args()
+ success = False
+
+ # 命令调度(直接映射,无冗余)
+ if args.add:
+ success = handle_add(args.add)
+ elif args.remove:
+ success = handle_remove(args.remove)
+ elif args.tool:
+ success = handle_tool()
+ elif args.init:
+ success = handle_init()
+ elif args.start:
+ success = handle_start()
+ elif args.restart:
+ success = handle_restart()
+ elif args.log:
+ success = handle_log()
+ elif args.llm:
+ success = handle_llm(args.model, args.apikey, args.name)
+ elif args.stop:
+ success = handle_stop()
+ elif args.config:
+ success = handle_config(args.config)
+
+ raise SystemExit(0 if success else 1)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/__init__.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/handle.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/handle.py
new file mode 100644
index 00000000..000299cb
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/handle.py
@@ -0,0 +1,203 @@
+import logging
+import os
+import subprocess
+import toml
+import requests # 新增:导入 requests 库(用于 HTTP 调用)
+
+
+from config.private.mcp_server.config_loader import McpServerConfig
+from servers.oe_cli_mcp_server.mcp_tools.tool_type import ToolType
+from servers.oe_cli_mcp_server.server import config
+from servers.oe_cli_mcp_server.util.test_llm_valid import is_llm_config_valid
+
+# 新增:FastAPI 服务地址(和 api_server.py 配置一致,端口 12556)
+
+FASTAPI_BASE_URL = f"http://127.0.0.1:{config.fastapi_port}"
+
+# 路径配置(直接硬编码,简化)
+PUBLIC_CONFIG_PATH = "config/public/public_config.toml"
+
+logger = logging.getLogger(__name__)
+
+# 新增:替代 send_socket_request 的 HTTP 调用函数
+def send_http_request(action: str, params: dict = None):
+ """调用 FastAPI 接口(替代原 Socket 调用)"""
+ try:
+ if action == "add":
+ url = f"{FASTAPI_BASE_URL}/tool/add"
+ response = requests.post(url, params=params)
+ elif action == "remove":
+ url = f"{FASTAPI_BASE_URL}/tool/remove"
+ response = requests.post(url, params=params)
+ elif action == "list":
+ url = f"{FASTAPI_BASE_URL}/tool/list"
+ response = requests.get(url)
+ elif action == "init":
+ url = f"{FASTAPI_BASE_URL}/tool/init"
+ response = requests.post(url)
+ else:
+ return {"success": False, "message": f"不支持的操作:{action}"}
+
+ response.raise_for_status() # 抛出 HTTP 错误(如 404、500)
+ return response.json()
+ except requests.exceptions.RequestException as e:
+ return {"success": False, "message": f"接口调用失败:{str(e)}"}
+
+# -------------------------- 工具包操作 --------------------------
+def handle_add(pkg_input):
+ """处理 -add 命令"""
+ type_map = {"智能运维": ToolType.BASE.value, "智算调优": ToolType.AI.value,
+ "通算调优": ToolType.CAL.value, "镜像运维": ToolType.MIRROR.value,
+ "个性化": ToolType.PERSONAL.value,"知识库": ToolType.RAG.value}
+
+ if pkg_input in type_map:
+ params = {"type": "system", "value": type_map[pkg_input]}
+ elif os.path.isfile(pkg_input) and pkg_input.endswith(".zip"):
+ params = {"type": "custom", "value": os.path.abspath(pkg_input)}
+ else:
+ print(f"❌ 不支持的包类型:{pkg_input}")
+ raise SystemExit(1)
+
+ # 替换:send_socket_request → send_http_request
+ result = send_http_request("add", params)
+ print(f"✅ {result['message']}" if result["success"] else f"❌ {result['message']}")
+ return result["success"]
+
+def handle_remove(pkg_input):
+ """处理 -remove 命令"""
+ type_map = {"智能运维": ToolType.BASE.value, "智算调优": ToolType.AI.value,
+ "通算调优": ToolType.CAL.value, "镜像运维": ToolType.MIRROR.value, "个性化": ToolType.PERSONAL.value}
+
+ params = {"type": "system" if pkg_input in type_map else "custom",
+ "value": type_map.get(pkg_input, pkg_input)}
+ # 替换:send_socket_request → send_http_request
+ result = send_http_request("remove", params)
+ print(f"✅ {result['message']}" if result["success"] else f"❌ {result['message']}")
+ handle_restart()
+
+ return result["success"]
+
+def handle_tool():
+ """处理 -tool 命令"""
+ # 替换:send_socket_request → send_http_request
+ result = send_http_request("list")
+ if not result["success"]:
+ print(f"❌ {result['message']}")
+ return False
+
+ # 注意:FastAPI 接口返回的结构是 data.pkg_funcs 和 data.total_packages(需调整)
+ print(f"\n📋 当前已加载工具包(共{result['data']['total_packages']}个):")
+ for pkg, funcs in result["data"]["pkg_funcs"].items():
+ print(f"- {pkg}:{len(funcs)}个工具 → {', '.join(funcs)}")
+ return True
+
+def handle_init():
+ """处理 -init 命令"""
+ # 替换:send_socket_request → send_http_request
+ result = send_http_request("init")
+ print(f"✅ {result['message']}" if result["success"] else f"❌ {result['message']}")
+ handle_restart()
+
+ return result["success"]
+
+# -------------------------- 服务操作 --------------------------
+def handle_start():
+ """处理 -start 命令"""
+ try:
+ subprocess.run(["sudo", "systemctl", "start", "mcp-server"], check=True)
+ print("✅ 服务启动成功")
+ return True
+ except Exception as e:
+ print(f"❌ 启动失败:{str(e)}")
+ return False
+
+def handle_stop():
+ """处理 -stop 命令"""
+ try:
+ subprocess.run(["sudo", "systemctl", "stop", "mcp-server"], check=True)
+ print("✅ 服务终止成功")
+ return True
+ except Exception as e:
+ print(f"❌ 终止失败:{str(e)}")
+ return False
+
+def handle_restart():
+ """处理 -restart 命令"""
+ try:
+ subprocess.run(["sudo", "systemctl", "restart", "mcp-server"], check=True)
+ print("✅ 服务重启成功")
+ return True
+ except Exception as e:
+ print(f"❌ 重启失败:{str(e)}")
+ return False
+
+def handle_log():
+ """处理 -log 命令"""
+ try:
+ subprocess.run(["sudo", "journalctl", "-u", "mcp-server", "-f"], check=True)
+ except KeyboardInterrupt:
+ print("\n📌 日志查看退出")
+ except Exception as e:
+ print(f"❌ 查看失败:{str(e)}")
+ return True
+
+# -------------------------- 配置操作 --------------------------
+def handle_llm(model, apikey, name):
+ """处理 -llm 命令"""
+ if not all([model, apikey, name]):
+ print("❌ 缺少参数:--model、--apikey、--name 必须同时指定")
+ return False
+
+ if not is_llm_config_valid(model, apikey, name):
+ print("❌ 大模型校验失败")
+ return False
+
+ try:
+ with open(PUBLIC_CONFIG_PATH, "r") as f:
+ config = toml.load(f)
+ config["llm_remote"] = model
+ config["llm_api_key"] = apikey
+ config["llm_model"] = name
+ with open(PUBLIC_CONFIG_PATH, "w") as f:
+ toml.dump(config, f)
+
+ subprocess.run(["sudo", "systemctl", "restart", "mcp-server"], check=True)
+ print(f"✅ 大模型配置成功(模型:{name})")
+ return True
+ except Exception as e:
+ print(f"❌ 配置失败:{str(e)}")
+ return False
+
+def handle_config(key_value):
+ """处理 -config 命令"""
+ if "=" not in key_value:
+ print("❌ 格式错误:需为 键=值(如 -config port=8002)")
+ return False
+
+ key, value = key_value.split("=", 1)
+ supported = ["language", "max_tokens", "temperature", "port"]
+ if key not in supported:
+ print(f"❌ 不支持的键:{key}(支持:{', '.join(supported)})")
+ return False
+
+ try:
+ with open(PUBLIC_CONFIG_PATH, "r") as f:
+ config = toml.load(f)
+ # 类型转换
+ if key in ["max_tokens", "port"]:
+ value = int(value)
+ elif key == "temperature":
+ value = float(value)
+ config[key] = value
+ with open(PUBLIC_CONFIG_PATH, "w") as f:
+ toml.dump(config, f)
+
+ if key == "port":
+ subprocess.run(["sudo", "systemctl", "restart", "mcp-server"], check=True)
+ print(f"✅ 配置 {key}={value}(已重启服务)")
+ else:
+ print(f"✅ 配置 {key}={value}(下次重启生效)")
+ return True
+ except Exception as e:
+ print(f"❌ 配置失败:{str(e)}")
+ return False
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/parse_args.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/parse_args.py
new file mode 100644
index 00000000..d55c243e
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli/parse_args.py
@@ -0,0 +1,25 @@
+import argparse
+
+def parse_args():
+ """解析命令行参数(仅保留核心参数,对齐原有命令)"""
+ parser = argparse.ArgumentParser(description="mcp-server 命令行工具")
+
+ # 互斥命令组(一次一个核心操作)
+ command_group = parser.add_mutually_exclusive_group(required=True)
+ command_group.add_argument("-add", metavar="包名/zip路径", help="新增工具包(示例:-add 智算调优 或 -add ./custom.zip)")
+ command_group.add_argument("-remove", metavar="包名", help="删除工具包(示例:-remove 智算调优)")
+ command_group.add_argument("-tool", action="store_true", help="查看已加载工具包")
+ command_group.add_argument("-init", action="store_true", help="初始化服务(仅保留基础运维包)")
+ command_group.add_argument("-log", action="store_true", help="查看服务实时日志")
+ command_group.add_argument("-start", action="store_true", help="启动 mcp-server 服务")
+ command_group.add_argument("-stop", action="store_true", help="终止 mcp-server 服务")
+ command_group.add_argument("-restart", action="store_true", help="重启 mcp-server 服务")
+ command_group.add_argument("-llm", action="store_true", help="配置大模型(需配合 --model/--apikey/--name)")
+ command_group.add_argument("-config", metavar="键=值", help="修改公共配置(示例:-config language=en)")
+
+ # 大模型配置附属参数
+ parser.add_argument("--model", help="大模型地址(如 http://127.0.0.1:8000)")
+ parser.add_argument("--apikey", help="大模型 API 密钥")
+ parser.add_argument("--name", help="大模型名称(如 qwen、gpt-3.5)")
+
+ return parser.parse_args()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/dependency.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/dependency.py
new file mode 100644
index 00000000..d76f64af
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/dependency.py
@@ -0,0 +1,300 @@
+import os
+import subprocess
+import toml
+import sys
+import logging
+from typing import Any, Optional, Dict
+from pkg_resources import get_distribution, DistributionNotFound
+from servers.oe_cli_mcp_server.util.get_project_root import get_project_root
+
+logger = logging.getLogger(__name__)
+class DepVenvManager:
+ """
+ 依赖与虚拟环境管理类
+ 聚合功能:虚拟环境生命周期管理 + 系统/Python依赖安装 + 依赖冲突检测
+ """
+ def __init__(self):
+ """初始化:绑定虚拟环境路径、初始化日志器(公共属性一次定义)"""
+
+ self.logger = logger
+ self.logger.setLevel(logging.INFO)
+
+ # 1. 虚拟环境路径(固定规范,适配 openEuler)
+ self.project_root = get_project_root()
+ if not self.project_root:
+ self.project_root = os.getcwd()
+ self.logger.warning(f"无法获取项目根目录,使用当前工作目录:{self.project_root}")
+
+ self.venv_root = os.path.join(self.project_root, "venv")
+ self.global_venv_path = os.path.join(self.venv_root, "global")
+ self.isolated_venv_root = os.path.join(self.venv_root, "isolated")
+ self._check_venv_integrity(self.global_venv_path, is_global=True)
+ # 2. 初始化目录(确保根目录存在)
+ os.makedirs(self.venv_root, exist_ok=True)
+ os.makedirs(self.isolated_venv_root, exist_ok=True)
+ def _check_venv_integrity(self, venv_path: str, is_global: bool = False) -> bool:
+ """
+ 检测虚拟环境是否完整(核心检测项)
+ 返回:True=完整,False=不完整(自动修复或报错)
+ """
+ self.logger.debug(f"检测虚拟环境完整性:{venv_path}")
+
+ # 检测项1:虚拟环境目录是否存在
+ if not os.path.exists(venv_path):
+ self.logger.warning(f"虚拟环境目录不存在:{venv_path}")
+ if is_global:
+ self.logger.info("全局环境缺失,将自动创建")
+ return False
+
+ # 检测项2:pip 可执行文件是否存在
+ pip_path = os.path.join(venv_path, "bin", "pip")
+ if not os.path.exists(pip_path) or not os.access(pip_path, os.X_OK):
+ self.logger.error(f"虚拟环境不完整:缺少可执行的 pip → {pip_path}")
+ if is_global:
+ self.logger.info("尝试重新创建全局虚拟环境...")
+ import shutil
+ shutil.rmtree(venv_path) # 删除不完整环境
+ self.create_global_venv() # 重新创建
+ return self._check_venv_integrity(venv_path, is_global) # 重新检测
+ return False
+
+ # 检测项3:site-packages 目录是否存在(确保依赖能安装到正确位置)
+ site_packages = self._get_venv_site_packages(venv_path)
+ if not os.path.exists(site_packages):
+ self.logger.error(f"虚拟环境不完整:缺少 site-packages 目录 → {site_packages}")
+ return False
+
+ # 所有检测项通过
+ self.logger.debug(f"虚拟环境完整:{venv_path}")
+ return True
+
+ def _get_installed_packages(self, venv_path: str) -> Dict[str, str]:
+ """辅助方法:获取指定虚拟环境中已安装的Python包(包名→版本号)"""
+ pip_path = self.get_venv_pip(venv_path)
+ try:
+ # 用 pip list --format=json 获取结构化输出,解析效率高
+ result = subprocess.run(
+ [pip_path, "list", "--format=json"],
+ capture_output=True, text=True, check=True
+ )
+ packages = json.loads(result.stdout)
+ # 转为字典:{包名: 版本号}(忽略大小写,比如 requests → Requests)
+ return {pkg["name"].lower(): pkg["version"] for pkg in packages}
+ except Exception as e:
+ self.logger.error(f"获取已安装包失败:{str(e)}")
+ return {} # 异常时返回空字典,降级为全量安装(避免卡住)
+
+
+ def _get_venv_site_packages(self, venv_path: str) -> str:
+ """私有辅助方法:获取虚拟环境 site-packages 路径"""
+ python_version = f"python{sys.version_info.major}.{sys.version_info.minor}"
+ return os.path.join(venv_path, "lib", python_version, "site-packages")
+
+ # -------------------------- 1. 虚拟环境管理方法 --------------------------
+ def create_global_venv(self) -> str:
+ """创建全局共用虚拟环境"""
+ if os.path.exists(self.global_venv_path):
+ return self.global_venv_path
+
+ self.logger.info(f"创建全局虚拟环境:{self.global_venv_path}")
+ subprocess.run(
+ [sys.executable, "-m", "venv", self.global_venv_path],
+ check=True, capture_output=True, text=True
+ )
+ self.logger.info("全局虚拟环境创建完成")
+ return self.global_venv_path
+
+ def create_isolated_venv(self, tool_id: str) -> str:
+ """创建独立虚拟环境(用于依赖冲突的 tool)"""
+ venv_path = os.path.join(self.isolated_venv_root, tool_id)
+ if os.path.exists(venv_path):
+ return venv_path
+
+ self.logger.warning(f"依赖冲突,创建独立虚拟环境:{venv_path}")
+ subprocess.run(
+ [sys.executable, "-m", "venv", venv_path],
+ check=True, capture_output=True, text=True
+ )
+ self.logger.info("独立虚拟环境创建完成")
+ return venv_path
+
+ def delete_isolated_venv(self, tool_id: str) -> bool:
+ """删除指定 tool 的独立虚拟环境(保护全局环境)"""
+ venv_path = os.path.join(self.isolated_venv_root, tool_id)
+ if not os.path.exists(venv_path):
+ self.logger.debug(f"独立虚拟环境不存在:{venv_path}")
+ return True
+
+ try:
+ import shutil
+ shutil.rmtree(venv_path)
+ self.logger.info(f"删除独立虚拟环境:{venv_path}")
+ return True
+ except Exception as e:
+ self.logger.error(f"删除独立虚拟环境失败:{str(e)}")
+ return False
+
+ def get_venv_pip(self, venv_path: Optional[str] = None) -> str:
+ """获取指定虚拟环境的 pip 路径"""
+ # 优先使用指定环境,否则使用当前激活环境
+ target_venv = venv_path or os.getenv("VIRTUAL_ENV")
+ if not target_venv:
+ raise Exception("未激活虚拟环境,请先执行 source ./venv/global/bin/activate")
+ return os.path.join(target_venv, "bin", "pip")
+
+ # -------------------------- 2. 依赖冲突检测方法 --------------------------
+ def check_pip_compatibility(self, pip_deps: Dict[str, str], venv_path: str) -> bool:
+ """检测 Python 依赖与目标环境的兼容性 """
+ site_packages = self._get_venv_site_packages(venv_path)
+ sys.path.insert(0, site_packages)
+
+ try:
+ for dep_name, ver_constraint in pip_deps.items():
+ if not ver_constraint.strip():
+ continue
+
+ # 检查依赖是否已安装
+ try:
+ installed_ver = get_distribution(dep_name).version
+ except DistributionNotFound:
+ continue # 未安装,无冲突
+
+ # 版本约束校验(==/>=/<=)
+ if ver_constraint.startswith("==") and installed_ver != ver_constraint[2:].strip():
+ self.logger.debug(f"版本不兼容:{dep_name}(需{ver_constraint},当前{installed_ver})")
+ return False
+ if ver_constraint.startswith(">=") and installed_ver < ver_constraint[2:].strip():
+ self.logger.debug(f"版本过低:{dep_name}(需{ver_constraint},当前{installed_ver})")
+ return False
+ if ver_constraint.startswith("<=") and installed_ver > ver_constraint[2:].strip():
+ self.logger.debug(f"版本过高:{dep_name}(需{ver_constraint},当前{installed_ver})")
+ return False
+ return True
+ finally:
+ sys.path.remove(site_packages)
+
+ # -------------------------- 3. 依赖安装方法 --------------------------
+ def install_system_deps(self, system_deps: Dict[str, str]) -> Any:
+ """安装系统依赖"""
+ result = {"success": [], "failed": []}
+ if not system_deps:
+ return result
+
+ self.logger.info("=== 开始安装系统依赖(yum)===")
+ for dep_name, yum_cmd in system_deps.items():
+ # 检查是否已安装
+ verify_cmd = f"{dep_name} --version" if dep_name != "docker" else "docker --version"
+ if subprocess.run(verify_cmd, shell=True, capture_output=True).returncode == 0:
+ self.logger.debug(f"系统依赖[{dep_name}]已安装,跳过")
+ result["success"].append(dep_name)
+ continue
+
+ # 执行 yum 安装
+ try:
+ self.logger.info(f"安装:{dep_name} → 命令:{yum_cmd}")
+ subprocess.run(yum_cmd, shell=True, check=True, text=True)
+ result["success"].append(dep_name)
+ except subprocess.CalledProcessError as e:
+ err_msg = f"返回码{e.returncode}:{e.stderr.strip()}"
+ self.logger.error(f"系统依赖[{dep_name}]安装失败:{err_msg}")
+ result["failed"].append(f"{dep_name}({err_msg})")
+ return result
+
+ def install_pip_deps(self, pip_deps: Dict[str, str], venv_path: str, pip_index_url: Optional[str] = None) -> Any:
+ """安装Python依赖(新增:先检查已安装包,仅安装缺失的)"""
+ self.logger.info(f"开始处理依赖 | 环境:{os.path.basename(venv_path)} | 需检查依赖:{list(pip_deps.keys())}")
+ result = {"success": [], "failed": [], "skipped": []} # 新增 skipped 记录跳过的依赖
+ if not pip_deps:
+ self.logger.warning("无需要安装的Python依赖")
+ return result
+
+ pip_path = self.get_venv_pip(venv_path)
+ # 关键步骤1:获取当前环境已安装的包(格式:{包名: 版本号})
+ installed_pkgs = self._get_installed_packages(venv_path)
+
+ # 关键步骤2:筛选出「未安装」或「版本不匹配」的依赖
+ need_install = {}
+ for dep_name, ver_constraint in pip_deps.items():
+ # 处理版本约束(比如 ver_constraint 是 "==3.4.0",提取版本号 "3.4.0")
+ ver_wanted = ver_constraint.strip().lstrip("==") if ver_constraint.strip() else None
+ if dep_name not in installed_pkgs:
+ need_install[dep_name] = ver_constraint # 未安装,需要安装
+ self.logger.info(f"包 {dep_name} 未安装,需安装版本:{ver_constraint}")
+ else:
+ ver_installed = installed_pkgs[dep_name]
+ if ver_wanted and ver_installed != ver_wanted:
+ need_install[dep_name] = ver_constraint # 版本不匹配,需要更新
+ self.logger.info(f"包 {dep_name} 已安装版本 {ver_installed},需更新为:{ver_wanted}")
+ else:
+ result["skipped"].append(f"{dep_name}=={ver_installed}") # 已安装且版本匹配,跳过
+ self.logger.info(f"包 {dep_name}=={ver_installed} 已存在,跳过安装")
+
+ # 无需安装新依赖,直接返回
+ if not need_install:
+ self.logger.info("所有依赖均已安装,无需额外操作")
+ return result
+
+ # 关键步骤3:仅安装筛选出的「需要安装/更新」的依赖
+ self.logger.info(f"开始安装缺失/不匹配的依赖:{need_install}")
+ for dep_name, ver_constraint in need_install.items():
+ dep_spec = f"{dep_name}{ver_constraint.strip()}" if ver_constraint.strip() else dep_name
+ install_cmd = [pip_path, "install", "-q", "--no-cache-dir"]
+ if pip_index_url:
+ trusted_host = pip_index_url.split("://")[-1].split("/")[0]
+ install_cmd.extend(["--index-url", pip_index_url, "--trusted-host", trusted_host])
+ install_cmd.append(dep_spec)
+
+ try:
+ subprocess.run(install_cmd, check=True, capture_output=True, text=True)
+ result["success"].append(dep_spec)
+ self.logger.info(f"✅ 安装成功:{dep_spec}")
+ except subprocess.CalledProcessError as e:
+ err_msg = e.stderr.strip()
+ self.logger.error(f"❌ 安装失败:{dep_spec} | 错误:{err_msg[:100]}")
+ result["failed"].append(f"{dep_spec}({err_msg})")
+
+ self.logger.info(f"依赖处理完成 | 成功:{len(result['success'])} 个 | 失败:{len(result['failed'])} 个 | 跳过:{len(result['skipped'])} 个")
+ return result
+
+ def execute_deps_script(self, deps_script_path: str, venv_path: str) -> Any:
+ """执行完整依赖脚本(系统+Python依赖)"""
+ if not os.path.exists(deps_script_path):
+ raise FileNotFoundError(f"依赖脚本不存在:{deps_script_path}")
+
+ # 读取依赖脚本
+ with open(deps_script_path, "r", encoding="utf-8") as f:
+ deps_data = toml.load(f)
+
+ # 读取是否有配置pip源
+ pip_index_url = deps_data.get("pip_config", {}).get("index_url")
+
+ # 安装系统依赖 + Python依赖
+ system_result = self.install_system_deps(deps_data.get("system", {}))
+ pip_result = self.install_pip_deps(deps_data.get("pip", {}), venv_path, pip_index_url)
+
+ # 输出结果
+ total_ok = len(system_result["success"]) + len(pip_result["success"])
+ total_fail = len(system_result["failed"]) + len(pip_result["failed"])
+ self.logger.info(f"=== 依赖脚本执行完成 == 成功:{total_ok} 个 | 失败:{total_fail} 个 ===")
+
+ return {"system": system_result, "pip": pip_result}
+
+ # -------------------------- 4. tool的执行环境设置 --------------------------
+ def select_venv_for_tool(self, tool_id: str, deps_script_path: Optional[str] = None) -> str:
+ """为 tool 选择合适的虚拟环境(全局优先,冲突则用独立环境)"""
+ # 无依赖 → 全局环境
+ if not deps_script_path or not os.path.exists(deps_script_path):
+ self.logger.debug(f"tool[{tool_id}]无依赖,使用全局环境")
+ return self.create_global_venv()
+
+ # 有依赖 → 先检查全局兼容性
+ with open(deps_script_path, "r", encoding="utf-8") as f:
+ pip_deps = toml.load(f).get("pip", {})
+
+ global_venv = self.create_global_venv()
+ if self.check_pip_compatibility(pip_deps, global_venv):
+ self.logger.debug(f"tool[{tool_id}]依赖与全局环境兼容")
+ return global_venv
+
+ # 不兼容 → 独立环境
+ return self.create_isolated_venv(tool_id)
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/manager.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/manager.py
new file mode 100644
index 00000000..d7d71351
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/manager.py
@@ -0,0 +1,322 @@
+# mcp_server/manager/manager.py
+"""
+Tool 全局管理器(对外统一API层)
+核心职责:封装内部组件,对外提供简洁、统一的Tool操作API
+- 角色:协调者(不包含业务逻辑,仅转发调用+参数校验)
+- 设计原则:对外透明、API语义化、兼容原有使用习惯、屏蔽内部实现
+- 依赖:PackageLoader(加载)、PackageUnloader(卸载)、ToolRepository(查询)
+"""
+import json
+import logging
+import os
+from typing import Dict, List, Optional, Any
+from servers.oe_cli_mcp_server.mcp_server.manager.tool_repository import ToolRepository, tool_repository as default_repo
+from servers.oe_cli_mcp_server.mcp_server.manager.package_loader import PackageLoader, package_loader as default_loader
+from servers.oe_cli_mcp_server.mcp_server.manager.package_unloader import PackageUnloader, package_unloader as default_unloader
+
+from servers.oe_cli_mcp_server.util.get_tool_state_path import get_tool_state_path
+
+logger = logging.getLogger(__name__)
+
+# 类型别名:与内部组件保持一致,提升API可读性
+ToolType = str
+PackageName = str
+PackageDir = str
+FuncName = str
+
+
+class ToolManager:
+ """Tool全局管理器:对外提供加载/卸载/查询一站式API"""
+
+ def __init__(self,
+ tool_repository: Optional[ToolRepository] = None,
+ package_loader: Optional[PackageLoader] = None,
+ package_unloader: Optional[PackageUnloader] = None):
+ """
+ 初始化管理器(依赖注入,便于测试和组件替换)
+ :param tool_repository: 数据仓库组件(默认使用全局单例)
+ :param package_loader: 包加载器组件(默认使用全局单例)
+ :param package_unloader: 包卸载器组件(默认使用全局单例)
+ """
+ self._repo = tool_repository or default_repo
+ self._loader = package_loader or default_loader
+ self._unloader = package_unloader or default_unloader
+ self._state_file_path = get_tool_state_path()
+ logger.info("ToolManager 初始化完成 | 内部组件已就绪")
+
+ # -------------------------------------------------------------------------
+ # 加载操作API(转发给PackageLoader)
+ # -------------------------------------------------------------------------
+ def load_package(self, package_dir: PackageDir) -> Optional[PackageName]:
+ """
+ 加载单个包(最小操作单元)
+ :param package_dir: 包目录绝对路径
+ :return: 成功返回包名,失败返回None
+ """
+ if not package_dir:
+ logger.error("[Load Package Failed] 原因:包目录不能为空")
+ return None
+ result = self._loader.load_package(package_dir)
+ if result:
+ self.persist_tool_state()
+ return result
+
+ def load_tool_type(self, tool_type: ToolType) -> Dict[str, Any]:
+ """
+ 加载指定分类下所有包(批量加载)
+ :param tool_type: 分类名(支持字符串/Enum)
+ :return: 加载结果统计(总包数/成功数/失败数等)
+ """
+ if not tool_type:
+ logger.error("[Load ToolType Failed] 原因:分类名不能为空")
+ return self._init_empty_result("分类名不能为空")
+ result = self._loader.load_tool_type(tool_type)
+ self.persist_tool_state()
+ return result
+
+ # -------------------------------------------------------------------------
+ # 卸载操作API(转发给PackageUnloader)
+ # -------------------------------------------------------------------------
+ def unload_package(self, package_name: PackageName, delete_env: bool = True) -> bool:
+ """
+ 卸载单个包(最小操作单元)
+ :param package_name: 要卸载的包名
+ :param delete_env: 是否删除独立环境(默认True,全局环境不删除)
+ :return: 卸载成功返回True,失败返回False
+ """
+ if not package_name:
+ logger.error("[Unload Package Failed] 原因:包名不能为空")
+ return False
+ result = self._unloader.unload_package(package_name, delete_env)
+ if result:
+ self.persist_tool_state()
+ return result
+
+ def unload_tool_type(self, tool_type: ToolType, delete_env: bool = True) -> bool:
+ """
+ 卸载指定分类下所有包(批量卸载)
+ :param tool_type: 分类名(支持字符串/Enum)
+ :param delete_env: 是否删除独立环境(默认True)
+ :return: 整体卸载成功返回True(所有包都卸载成功),部分失败返回False
+ """
+ if not tool_type:
+ logger.error("[Unload ToolType Failed] 原因:分类名不能为空")
+ return False
+
+ result = self._unloader.unload_tool_type(tool_type, delete_env)
+ if result:
+ self.persist_tool_state()
+ return result
+
+ # -------------------------------------------------------------------------
+ # 查询操作API(转发给ToolRepository)
+ # -------------------------------------------------------------------------
+ def get_package_info(self, package_name: PackageName) -> Optional[Dict[str, Any]]:
+ """
+ 查询包完整信息(含环境、函数列表等)
+ :param package_name: 包名
+ :return: 包信息字典,包不存在返回None
+ """
+ if not package_name:
+ logger.error("[Get Package Info Failed] 原因:包名不能为空")
+ return None
+ return self._repo.get_package(package_name)
+
+ def get_func_info(self, func_name: FuncName) -> Optional[Dict[str, Any]]:
+ """
+ 查询函数详情(含所属包/分类/环境等关联信息)
+ :param func_name: 函数名(全局唯一)
+ :return: 函数信息字典,函数不存在返回None
+ """
+ if not func_name:
+ logger.error("[Get Func Info Failed] 原因:函数名不能为空")
+ return None
+ return self._repo.get_func(func_name)
+
+ def get_tool_type_info(self, tool_type: ToolType) -> Optional[Dict[str, Any]]:
+ """
+ 查询分类详情(含下属包列表、统计信息等)
+ :param tool_type: 分类名
+ :return: 分类信息字典,分类不存在返回None
+ """
+ if not tool_type:
+ logger.error("[Get ToolType Info Failed] 原因:分类名不能为空")
+ return None
+ return self._repo.get_tool_type(tool_type)
+
+ # -------------------------------------------------------------------------
+ # 列表查询API(转发给ToolRepository)
+ # -------------------------------------------------------------------------
+ def list_packages(self, tool_type: Optional[ToolType] = None) -> List[PackageName]:
+ """
+ 列出所有包名(可选按分类过滤)
+ :param tool_type: 分类名(None表示列出所有包)
+ :return: 包名列表(按添加时间升序排列)
+ """
+ return self._repo.list_packages(tool_type)
+
+ def list_funcs(self, package_name: Optional[PackageName] = None) -> List[FuncName]:
+ """
+ 列出所有函数名(可选按包过滤)
+ :param package_name: 包名(None表示列出所有函数)
+ :return: 函数名列表
+ """
+ return self._repo.list_funcs(package_name)
+
+ def list_tool_types(self) -> List[ToolType]:
+ """列出所有已注册的分类名"""
+ return self._repo.list_tool_types()
+
+ # -------------------------------------------------------------------------
+ # 持久化相关API(转发给ToolRepository)
+ # -------------------------------------------------------------------------
+ def get_serializable_data(self) -> Dict[str, Any]:
+ """获取可序列化数据(用于持久化存储)"""
+ data = self._repo.get_serializable_data()
+ logger.debug(f"[Serializable Data Got] 包数:{len(data.get('serializable_packages', {}))}")
+ return data
+
+ def get_package_path(self,package_name):
+ data = self._repo.get_serializable_data()
+ packages_info = data.get('serializable_packages', {})
+ return packages_info[package_name]["package_dir"]
+
+ def load_serializable_data(self, data: Dict[str, Any]) -> Dict[str, int]:
+ """
+ 加载序列化数据(用于重启后恢复关联关系)
+ :param data: 序列化数据(来自持久化文件)
+ :return: 恢复结果统计
+ """
+ if not isinstance(data, dict):
+ logger.error("[Load Serializable Data Failed] 原因:数据格式必须是字典")
+ return {"total_package": 0, "success_package": 0, "fail_package": 0}
+ return self._repo.load_serializable_data(data)
+
+ def reload_package_functions(self) -> Dict[str, Any]:
+ """
+ 重新导入所有已加载包的函数对象(持久化恢复后必须执行)
+ 逻辑:从 ToolRepository 获取包目录 → 用 PackageLoader 重新导入函数 → 更新到仓库
+ :return: 重新导入结果统计
+ """
+ result = {
+ "total_package": 0,
+ "success_package": 0,
+ "fail_package": [],
+ "total_func": 0
+ }
+
+ # 1. 从仓库获取所有已恢复元信息的包
+ package_names = self.list_packages()
+ result["total_package"] = len(package_names)
+ if result["total_package"] == 0:
+ logger.info("[Reload Functions] 无已加载的包,跳过重新导入")
+ return result
+
+ # 2. 逐个包重新导入函数
+ for pkg_name in package_names:
+ pkg_info = self.get_package_info(pkg_name)
+ if not pkg_info or not pkg_info.get("package_dir"):
+ logger.error(f"[Reload Functions Failed] 包 {pkg_name} 无有效目录信息")
+ result["fail_package"].append(pkg_name)
+ continue
+
+ try:
+ # 关键:调用 load_package 重新导入(依赖 PackageLoader 内部兼容“已存在包”)
+ loaded_pkg_name = self.load_package(pkg_info["package_dir"])
+
+ if loaded_pkg_name:
+ # 重新导入成功:统计包数和函数数
+ result["success_package"] += 1
+ # 获取该包重新导入后的函数数,更新统计
+ funcs = self.list_funcs(pkg_name)
+ result["total_func"] += len(funcs)
+ logger.info(f"[Reload Functions Success] 包 {pkg_name}:{len(funcs)} 个函数")
+ else:
+ # load_package 返回 None:可能是包已存在但函数导入失败,或其他错误
+ logger.warning(f"[Reload Functions Warning] 包 {pkg_name} 未成功重新导入")
+ result["fail_package"].append(pkg_name)
+ except Exception as e:
+ logger.error(f"[Reload Functions Failed] 包 {pkg_name}:{str(e)}", exc_info=True)
+ result["fail_package"].append(pkg_name)
+
+ # 补充日志:明确最终结果
+ logger.info(
+ f"[Reload Functions Done] 总包数:{result['total_package']} | "
+ f"成功:{result['success_package']} | 失败:{len(result['fail_package'])} | "
+ f"总函数数:{result['total_func']}"
+ )
+ return result
+
+ # -------------------------------------------------------------------------
+ # 持久化核心API(对外暴露,支持手动触发)
+ # -------------------------------------------------------------------------
+ def persist_tool_state(self) -> bool:
+ """
+ 持久化Tool状态(将内存中的三级关联数据写入文件)
+ :return: 持久化成功返回True,失败返回False
+ """
+ try:
+ # 1. 从仓库获取可序列化数据
+ serializable_data = self._repo.get_serializable_data()
+ # 2. 写入文件(保证原子性:先写临时文件,再替换目标文件)
+ temp_file = f"{self._state_file_path}.tmp"
+ with open(temp_file, "w", encoding="utf-8") as f:
+ json.dump(serializable_data, f, ensure_ascii=False, indent=2)
+ # 3. 替换目标文件(避免写入过程中断导致文件损坏)
+ os.replace(temp_file, self._state_file_path)
+ logger.info(f"[Tool State Persisted] 成功写入持久化文件 | 包数:{len(serializable_data.get('serializable_packages', {}))}")
+ return True
+ except Exception as e:
+ logger.error(f"[Tool State Persist Failed] 原因:{str(e)}", exc_info=True)
+ # 清理临时文件
+ if os.path.exists(f"{self._state_file_path}.tmp"):
+ os.remove(f"{self._state_file_path}.tmp")
+ return False
+
+ def restore_tool_state(self) -> Dict[str, int]:
+ """
+ 恢复Tool状态(从持久化文件加载数据到内存)
+ :return: 恢复结果统计(总包数/成功数/失败数)
+ """
+ # 1. 校验文件是否存在
+ if not os.path.exists(self._state_file_path):
+ logger.warning(f"[Tool State Restore Failed] 原因:持久化文件不存在 - {self._state_file_path}")
+ return {"total_package": 0, "success_package": 0, "fail_package": 0}
+ try:
+ # 2. 读取文件数据
+ with open(self._state_file_path, "r", encoding="utf-8") as f:
+ serializable_data = json.load(f)
+ # 3. 调用仓库加载数据到内存
+ result = self._repo.load_serializable_data(serializable_data)
+ logger.info(f"[Tool State Restored] 从持久化文件恢复 | 结果:{result}")
+ return result
+ except json.JSONDecodeError:
+ logger.error(f"[Tool State Restore Failed] 原因:持久化文件格式错误 - {self._state_file_path}")
+ # 可选:备份损坏的文件
+ if os.path.exists(self._state_file_path):
+ backup_file = f"{self._state_file_path}.corrupt.{os.path.getmtime(self._state_file_path)}"
+ os.rename(self._state_file_path, backup_file)
+ logger.info(f"[Corrupt File Backed Up] 备份路径:{backup_file}")
+ return {"total_package": 0, "success_package": 0, "fail_package": 0}
+ except Exception as e:
+ logger.error(f"[Tool State Restore Failed] 原因:{str(e)}", exc_info=True)
+ return {"total_package": 0, "success_package": 0, "fail_package": 0}
+
+ # -------------------------------------------------------------------------
+ # 内部辅助方法(私有,单一职责)
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _init_empty_result(fail_reason: str) -> Dict[str, Any]:
+ """初始化空的加载结果统计"""
+ return {
+ "tool_type": "",
+ "total_package": 0,
+ "success_package": 0,
+ "fail_package": [],
+ "success_func": 0,
+ "fail_reason": fail_reason
+ }
+
+
+# 全局单例实例(对外暴露的唯一入口,保持原有使用习惯)
+tool_manager = ToolManager()
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_loader.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_loader.py
new file mode 100644
index 00000000..1c6100fd
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_loader.py
@@ -0,0 +1,340 @@
+# mcp_server/manager/package_loader.py
+"""
+包加载器(纯加载流程层)
+核心职责:仅负责「包/分类级」加载流程,最小操作单元为包
+- 流程:校验包合法性 → 准备环境 → 安装依赖 → 导入模块 → 校验函数 → 调用仓库存储
+- 设计原则:无状态、纯流程、依赖注入、结果导向
+- 依赖:DepVenvManager(环境/依赖管理)、ToolRepository(数据存储)
+"""
+
+import os
+import sys
+import json
+import logging
+from typing import Dict, List, Optional, Any
+from importlib.util import spec_from_file_location, module_from_spec
+
+from servers.oe_cli_mcp_server.mcp_server.dependency import DepVenvManager
+from servers.oe_cli_mcp_server.util.get_project_root import get_project_root
+from servers.oe_cli_mcp_server.util.tool_package_file_check import tool_package_file_check
+from servers.oe_cli_mcp_server.mcp_server.manager.tool_repository import ToolRepository, tool_repository as default_repo
+
+logger = logging.getLogger(__name__)
+
+# 类型别名:与ToolRepository保持一致
+ToolType = str
+PackageName = str
+PackageDir = str
+FuncName = str
+FuncDetail = Dict[str, Any]
+
+
+class PackageLoader:
+ """包加载器:封装包/分类级加载流程,不直接操作数据存储"""
+
+ def __init__(self,
+ dep_manager: Optional[DepVenvManager] = None,
+ tool_repository: Optional[ToolRepository] = None):
+ """
+ 初始化加载器(依赖注入,便于测试和替换)
+ :param dep_manager: 环境/依赖管理器(默认自动创建)
+ :param tool_repository: 数据仓库(默认使用全局单例)
+ """
+ self._dep_manager = dep_manager or DepVenvManager()
+ self._tool_repo = tool_repository or default_repo
+ # 初始化全局环境(确保全局虚拟环境存在)
+ self._dep_manager.create_global_venv()
+ logger.info("PackageLoader 初始化完成 | 全局虚拟环境已就绪")
+
+ # -------------------------------------------------------------------------
+ # 公开API(对外暴露的加载方法)
+ # -------------------------------------------------------------------------
+ def load_package(self, package_dir: PackageDir) -> Optional[PackageName]:
+ """
+ 加载单个包(核心流程)
+ :param package_dir: 包目录绝对路径
+ :return: 成功返回包名,失败返回None
+ """
+ logger.info(f"[Package Load Start] 包目录:{package_dir}")
+
+ # 1. 前置准备:路径标准化 + 包合法性校验
+ normalized_dir = self._normalize_path(package_dir)
+ if not self._validate_package_dir(normalized_dir):
+ logger.error(f"[Package Load Failed] 包目录:{normalized_dir} | 原因:包合法性校验失败")
+ return None
+
+ # 2. 解析包基础信息
+ package_name = self._get_package_name(normalized_dir)
+ tool_type = self._get_tool_type_by_package_dir(normalized_dir)
+ func_configs = self._load_func_configs(normalized_dir)
+ if not func_configs:
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:无有效函数配置")
+ return None
+
+ # 3. 环境准备:选择全局/独立环境
+ env_result = self._prepare_package_env(package_name, normalized_dir)
+ if not env_result:
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:环境准备失败")
+ return None
+ venv_path, venv_type = env_result
+
+ # 4. 依赖安装:安装包所需系统/Python依赖
+ if not package_name in self._tool_repo.list_packages():
+
+ if not self._install_package_deps(package_name, normalized_dir, venv_path):
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:依赖安装失败")
+ return None
+
+ # 5. 模块导入:加载tool.py模块(注入环境依赖)
+ tool_module = self._load_tool_module(package_name, normalized_dir, venv_path)
+ if not tool_module:
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:模块导入失败")
+ return None
+
+ # 6. 函数校验:校验函数可调用性,组装函数详情
+ valid_funcs = self._validate_and_assemble_funcs(package_name, tool_module, func_configs)
+ if not valid_funcs:
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:无有效可调用函数")
+ return None
+
+ # 7. 数据存储:调用仓库保存包+函数信息
+ if self._tool_repo.add_package(
+ package_name=package_name,
+ tool_type=tool_type,
+ package_dir=normalized_dir,
+ venv_path=venv_path,
+ venv_type=venv_type,
+ funcs=valid_funcs
+ ):
+ logger.info(f"[Package Load Success] 包名:{package_name} | 分类:{tool_type} | 有效函数数:{len(valid_funcs)}")
+ return package_name
+ else:
+ logger.error(f"[Package Load Failed] 包名:{package_name} | 原因:仓库存储失败")
+ return None
+
+ def load_tool_type(self, tool_type: ToolType) -> Dict[str, Any]:
+ """
+ 加载指定分类下所有包(批量加载)
+ :param tool_type: 分类名(支持字符串/Enum)
+ :return: 加载结果统计
+ """
+ # 标准化分类名(支持Enum类型)
+ tool_type_str = self._normalize_tool_type(tool_type)
+ result = self._init_load_result(tool_type_str)
+
+ # 获取分类目录
+ type_dir = self._get_tool_type_dir(tool_type_str)
+ if not os.path.isdir(type_dir):
+ result["fail_reason"] = f"分类目录不存在:{type_dir}"
+ logger.error(f"[ToolType Load Failed] 分类:{tool_type_str} | 原因:{result['fail_reason']}")
+ return self._log_and_return_result(result)
+
+ # 遍历分类下所有包目录
+ for item in os.listdir(type_dir):
+ item_path = os.path.join(type_dir, item)
+ if not os.path.isdir(item_path):
+ logger.debug(f"[ToolType Load Skip] 分类:{tool_type_str} | 原因:非目录 - {item_path}")
+ continue
+
+ result["total_package"] += 1
+ package_name = self.load_package(item_path)
+
+ if package_name:
+ result["success_package"] += 1
+ result["success_func"] += len(self._tool_repo.list_funcs(package_name))
+ else:
+ result["fail_package"].append(item)
+
+ return self._log_and_return_result(result)
+
+ # -------------------------------------------------------------------------
+ # 内部辅助方法(私有,单一职责)
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _normalize_path(path: str) -> PackageDir:
+ """标准化路径(绝对路径+去除冗余)"""
+ return os.path.abspath(os.path.normpath(path))
+
+ @staticmethod
+ def _normalize_tool_type(tool_type: ToolType) -> str:
+ """标准化分类名(支持Enum类型)"""
+ return tool_type.value if hasattr(tool_type, "value") else str(tool_type)
+
+ @staticmethod
+ def _get_package_name(package_dir: PackageDir) -> PackageName:
+ """从包目录获取包名(目录名)"""
+ return os.path.basename(package_dir)
+
+ @staticmethod
+ def _get_tool_type_by_package_dir(package_dir: PackageDir) -> ToolType:
+ """从包目录获取所属分类(父目录名)"""
+ return os.path.basename(os.path.dirname(package_dir))
+
+ def _get_tool_type_dir(self, tool_type: str) -> PackageDir:
+ """获取分类目录路径"""
+ root_dir = get_project_root()
+ if not root_dir:
+ logger.error("[ToolType Dir Get Failed] 原因:tool_package根目录未配置")
+ return ""
+ return os.path.join(self._normalize_path(root_dir), "mcp_tools", tool_type)
+
+ def _validate_package_dir(self, package_dir: PackageDir) -> bool:
+ """校验包目录合法性(存在tool.py + 通过基础校验)"""
+ if not tool_package_file_check(package_dir):
+ logger.error(f"[Package Validate Failed] 包目录:{package_dir} | 原因:包文件校验失败")
+ return False
+ return True
+
+ def _load_func_configs(self, package_dir: PackageDir) -> Dict[FuncName, str]:
+ """从config.json加载函数配置(函数名→描述)"""
+ config_path = os.path.join(package_dir, "config.json")
+ package_name = self._get_package_name(package_dir)
+
+ try:
+ with open(config_path, "r", encoding="utf-8") as f:
+ config_data = json.load(f)
+ # 提取tools节点下的函数配置(key=函数名,value=描述)
+ func_configs = config_data.get("tools", {})
+ if not isinstance(func_configs, dict) or len(func_configs) == 0:
+ logger.warning(f"[Func Config Load Empty] 包名:{package_name} | 原因:config.json中tools节点为空")
+ return {}
+ logger.debug(f"[Func Config Load Success] 包名:{package_name} | 加载函数配置数:{len(func_configs)}")
+ return func_configs
+ except FileNotFoundError:
+ logger.error(f"[Func Config Load Failed] 包名:{package_name} | 原因:config.json不存在 - {config_path}")
+ return {}
+ except json.JSONDecodeError:
+ logger.error(f"[Func Config Load Failed] 包名:{package_name} | 原因:config.json格式错误")
+ return {}
+ except Exception as e:
+ logger.error(f"[Func Config Load Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ return {}
+
+ def _prepare_package_env(self, package_name: PackageName, package_dir: PackageDir) -> Optional[tuple[str, str]]:
+ """为包准备环境(选择全局/独立环境)"""
+ try:
+ deps_path = os.path.join(package_dir, "deps.toml")
+ deps_exists = os.path.exists(deps_path)
+
+ # 调用环境管理器选择环境(包名为环境标识)
+ venv_path = self._dep_manager.select_venv_for_tool(
+ tool_id=package_name,
+ deps_script_path=deps_path if deps_exists else None
+ )
+ venv_type = "global" if venv_path == self._dep_manager.global_venv_path else "isolated"
+ logger.debug(f"[Package Env Prepared] 包名:{package_name} | 环境路径:{venv_path} | 环境类型:{venv_type}")
+ return venv_path, venv_type
+ except Exception as e:
+ logger.error(f"[Package Env Prepare Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ return None
+
+ def _install_package_deps(self, package_name: PackageName, package_dir: PackageDir, venv_path: str) -> bool:
+ """安装包的依赖(系统依赖+Python依赖)"""
+ deps_path = os.path.join(package_dir, "deps.toml")
+ if not os.path.exists(deps_path):
+ logger.debug(f"[Package Deps Skip] 包名:{package_name} | 原因:无deps.toml依赖配置")
+ return True
+
+ try:
+ # 调用环境管理器执行依赖安装
+ logger.info("-------------正在安装相关依赖---------------")
+ install_result = self._dep_manager.execute_deps_script(
+ deps_script_path=deps_path,
+ venv_path=venv_path
+ )
+ # 收集失败的依赖(仅报警,不中断流程)
+ failed_deps = install_result["system"]["failed"] + install_result["pip"]["failed"]
+ if failed_deps:
+ logger.warning(f"[Package Deps Partial Failed] 包名:{package_name} | 失败依赖:{failed_deps}")
+ logger.debug(f"[Package Deps Installed] 包名:{package_name} | 依赖安装完成")
+ return True
+ except Exception as e:
+ logger.error(f"[Package Deps Install Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ return False
+
+ def _load_tool_module(self, package_name: PackageName, package_dir: PackageDir, venv_path: str) -> Optional[Any]:
+ """加载tool.py模块(注入环境的site-packages路径)"""
+ tool_py_path = os.path.join(package_dir, "tool.py")
+ module_name = f"package_{package_name}_module" # 唯一模块名,避免冲突
+
+ # 注入环境的site-packages路径(确保依赖可导入)
+ site_packages = self._dep_manager._get_venv_site_packages(venv_path)
+ sys.path.insert(0, site_packages)
+
+ try:
+ # 清理旧模块缓存(避免重复加载导致的问题)
+ if module_name in sys.modules:
+ del sys.modules[module_name]
+
+ # 动态导入模块
+ spec = spec_from_file_location(module_name, tool_py_path)
+ if not spec or not spec.loader:
+ raise ValueError("模块规范无效,无法加载")
+
+ module = module_from_spec(spec)
+ sys.modules[module_name] = module
+ spec.loader.exec_module(module)
+
+ logger.debug(f"[Tool Module Loaded] 包名:{package_name} | 模块名:{module_name}")
+ return module
+ except Exception as e:
+ logger.error(f"[Tool Module Load Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ return None
+ finally:
+ # 清理临时路径,避免污染全局sys.path
+ if site_packages in sys.path:
+ sys.path.remove(site_packages)
+
+ def _validate_and_assemble_funcs(self,
+ package_name: PackageName,
+ tool_module: Any,
+ func_configs: Dict[FuncName, str]) -> Dict[FuncName, FuncDetail]:
+ """校验函数可调用性,组装函数详情字典"""
+ valid_funcs = {}
+ for func_name, description in func_configs.items():
+ # 校验函数是否存在且可调用
+ if not hasattr(tool_module, func_name):
+ logger.warning(f"[Func Validate Skipped] 包名:{package_name} | 函数名:{func_name} | 原因:模块中不存在该函数")
+ continue
+ func_obj = getattr(tool_module, func_name)
+ if not callable(func_obj):
+ logger.warning(f"[Func Validate Skipped] 包名:{package_name} | 函数名:{func_name} | 原因:非可调用对象")
+ continue
+
+ # 组装函数详情(深拷贝避免外部修改)
+ valid_funcs[func_name] = {
+ "func": func_obj,
+ "description": description
+ }
+
+ logger.debug(f"[Func Validate Completed] 包名:{package_name} | 有效函数数:{len(valid_funcs)} | 总配置数:{len(func_configs)}")
+ return valid_funcs
+
+ @staticmethod
+ def _init_load_result(tool_type: str) -> Dict[str, Any]:
+ """初始化分类加载结果统计字典"""
+ return {
+ "tool_type": tool_type,
+ "total_package": 0,
+ "success_package": 0,
+ "fail_package": [],
+ "success_func": 0,
+ "fail_reason": ""
+ }
+
+ @staticmethod
+ def _log_and_return_result(result: Dict[str, Any]) -> Dict[str, Any]:
+ """日志输出加载结果并返回"""
+ logger.info(
+ f"[ToolType Load Completed] 分类:{result['tool_type']} | "
+ f"总包数:{result['total_package']} | "
+ f"成功包数:{result['success_package']} | "
+ f"失败包数:{len(result['fail_package'])} | "
+ f"成功函数数:{result['success_func']} | "
+ f"失败原因:{result['fail_reason']}"
+ )
+ return result
+
+
+# 单例实例(默认使用全局依赖和仓库)
+package_loader = PackageLoader()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_unloader.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_unloader.py
new file mode 100644
index 00000000..81fd0b2e
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/package_unloader.py
@@ -0,0 +1,140 @@
+# mcp_server/manager/package_unloader.py
+"""
+包卸载器(纯卸载流程层)
+核心职责:仅负责「包/分类级」卸载流程,最小操作单元为包
+- 流程:查询包信息 → 清理函数关联 → 删除独立环境 → 调用仓库删除数据
+- 设计原则:无状态、纯流程、依赖注入、结果导向
+- 依赖:DepVenvManager(环境删除)、ToolRepository(数据查询/删除)
+"""
+
+import logging
+from typing import Optional
+
+from servers.oe_cli_mcp_server.mcp_server.dependency import DepVenvManager
+from servers.oe_cli_mcp_server.mcp_server.manager.tool_repository import ToolRepository, tool_repository as default_repo
+
+logger = logging.getLogger(__name__)
+
+# 类型别名:与ToolRepository、PackageLoader保持一致
+ToolType = str
+PackageName = str
+
+
+class PackageUnloader:
+ """包卸载器:封装包/分类级卸载流程,不直接操作数据存储"""
+
+ def __init__(self,
+ dep_manager: Optional[DepVenvManager] = None,
+ tool_repository: Optional[ToolRepository] = None):
+ """
+ 初始化卸载器(依赖注入,便于测试和替换)
+ :param dep_manager: 环境/依赖管理器(默认自动创建)
+ :param tool_repository: 数据仓库(默认使用全局单例)
+ """
+ self._dep_manager = dep_manager or DepVenvManager()
+ self._tool_repo = tool_repository or default_repo
+ logger.info("PackageUnloader 初始化完成")
+
+ # -------------------------------------------------------------------------
+ # 公开API(对外暴露的卸载方法)
+ # -------------------------------------------------------------------------
+ def unload_package(self, package_name: PackageName, delete_env: bool = True) -> bool:
+ """
+ 卸载单个包(核心流程)
+ :param package_name: 要卸载的包名
+ :param delete_env: 是否删除独立环境(默认True,全局环境不删除)
+ :return: 卸载成功返回True,失败返回False
+ """
+ logger.info(f"[Package Unload Start] 包名:{package_name} | 删除环境:{delete_env}")
+
+ # 1. 查询包信息(校验存在性+获取关联数据)
+ package_info = self._tool_repo.get_package(package_name)
+ if not package_info:
+ logger.error(f"[Package Unload Failed] 包名:{package_name} | 原因:包不存在")
+ return False
+
+ venv_type = package_info["venv_type"]
+ venv_path = package_info["venv_path"]
+ tool_type = package_info["tool_type"]
+
+ # 2. 条件删除独立环境(全局环境不删除)
+ if delete_env and venv_type == "isolated":
+ if not self._delete_isolated_env(package_name, venv_path):
+ # 环境删除失败不中断包卸载,仅报警
+ logger.warning(f"[Package Env Delete Failed] 包名:{package_name} | 环境路径:{venv_path}")
+
+ # 3. 调用仓库删除包数据(连带函数/分类关联)
+ if self._tool_repo.delete_package(package_name):
+ logger.info(f"[Package Unload Success] 包名:{package_name} | 分类:{tool_type} | 环境类型:{venv_type}")
+ return True
+ else:
+ logger.error(f"[Package Unload Failed] 包名:{package_name} | 原因:仓库删除失败")
+ return False
+
+ def unload_tool_type(self, tool_type: ToolType, delete_env: bool = True) -> bool:
+ """
+ 卸载指定分类下所有包(批量卸载)
+ :param tool_type: 分类名(支持字符串/Enum)
+ :param delete_env: 是否删除独立环境(默认True)
+ :return: 整体卸载成功返回True(所有包都卸载成功),部分失败返回False
+ """
+ # 标准化分类名(支持Enum类型)
+ tool_type_str = self._normalize_tool_type(tool_type)
+ logger.info(f"[ToolType Unload Start] 分类:{tool_type_str} | 删除环境:{delete_env}")
+
+ # 1. 查询分类信息(校验存在性+获取下属包)
+ type_info = self._tool_repo.get_tool_type(tool_type_str)
+ if not type_info:
+ logger.error(f"[ToolType Unload Failed] 分类:{tool_type_str} | 原因:分类不存在")
+ return False
+
+ package_names = type_info["package_names"]
+ if not package_names:
+ logger.info(f"[ToolType Unload Completed] 分类:{tool_type_str} | 原因:无下属包")
+ return True
+
+ # 2. 批量卸载下属包
+ unload_results = []
+ for pkg_name in package_names:
+ result = self.unload_package(pkg_name, delete_env)
+ unload_results.append(result)
+ if not result:
+ logger.error(f"[ToolType Unload Package Failed] 分类:{tool_type_str} | 包名:{pkg_name}")
+
+ # 3. 校验整体结果(所有包都卸载成功才返回True)
+ all_success = all(unload_results)
+ logger.info(
+ f"[ToolType Unload Completed] 分类:{tool_type_str} | "
+ f"处理包数:{len(package_names)} | "
+ f"成功数:{sum(unload_results)} | "
+ f"失败数:{len(unload_results) - sum(unload_results)} | "
+ f"整体成功:{all_success}"
+ )
+ return all_success
+
+ # -------------------------------------------------------------------------
+ # 内部辅助方法(私有,单一职责)
+ # -------------------------------------------------------------------------
+ @staticmethod
+ def _normalize_tool_type(tool_type: ToolType) -> str:
+ """标准化分类名(支持Enum类型)"""
+ return tool_type.value if hasattr(tool_type, "value") else str(tool_type)
+
+ def _delete_isolated_env(self, package_name: PackageName, venv_path: str) -> bool:
+ """删除包的独立环境(封装环境管理器调用)"""
+ try:
+ # 调用环境管理器删除独立环境(包名为环境标识)
+ delete_success = self._dep_manager.delete_isolated_venv(tool_id=package_name)
+ if delete_success:
+ logger.debug(f"[Isolated Env Deleted] 包名:{package_name} | 环境路径:{venv_path}")
+ return delete_success
+ except Exception as e:
+ logger.error(
+ f"[Isolated Env Delete Failed] 包名:{package_name} | 环境路径:{venv_path} | 原因:{str(e)}",
+ exc_info=True
+ )
+ return False
+
+
+# 单例实例(默认使用全局依赖和仓库)
+package_unloader = PackageUnloader()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/tool_repository.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/tool_repository.py
new file mode 100644
index 00000000..5aadfeec
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/manager/tool_repository.py
@@ -0,0 +1,452 @@
+# mcp_server/manager/tool_repository.py
+"""
+Tool 数据仓库(纯数据操作层)
+核心职责:维护「分类→包→函数」三级关联数据,提供原子化增删改查方法
+- 核心操作单元:包(函数为包的附属资源,不支持单独操作)
+- 设计原则:纯数据处理、无业务逻辑、强数据一致性、API语义化
+- 数据隔离:对外暴露数据均为深拷贝,避免外部修改内部状态
+"""
+
+import logging
+from typing import Dict, List, Optional, Any
+from datetime import datetime
+from copy import deepcopy
+
+logger = logging.getLogger(__name__)
+
+# 类型别名:提升代码可读性与类型一致性
+ToolType = str
+PackageName = str
+PackageDir = str
+FuncName = str
+FuncDetail = Dict[str, Any] # 格式:{"func": callable, "description": str}
+
+
+class ToolRepository:
+ """Tool数据仓库:封装三级关联数据的原子操作,确保数据一致性"""
+
+ def __init__(self):
+ """初始化内存存储结构(三级关联,仅内存持有)"""
+ # 1. 包核心存储:key=包名,value=包完整信息(含函数详情)
+ self._packages: Dict[PackageName, Dict[str, Any]] = {}
+ # 2. 分类-包映射:key=分类名,value=包名列表(优化分类查询性能)
+ self._type_package_map: Dict[ToolType, List[PackageName]] = {}
+ # 3. 函数-包映射:key=函数名,value=包名(快速反向查询函数所属包)
+ self._func_package_map: Dict[FuncName, PackageName] = {}
+
+ # -------------------------------------------------------------------------
+ # 包级核心操作(核心API:添加/删除/查询/列表)
+ # -------------------------------------------------------------------------
+ def add_package(self,
+ package_name: PackageName,
+ tool_type: ToolType,
+ package_dir: PackageDir,
+ venv_path: str,
+ venv_type: str,
+ funcs: Dict[FuncName, FuncDetail]) -> bool:
+ """
+ 原子化添加包(含下属函数),确保三级关联数据同步更新
+ :param package_name: 包名(全局唯一)
+ :param tool_type: 所属分类(如 base/docker)
+ :param package_dir: 包目录绝对路径
+ :param venv_path: 包关联的虚拟环境路径
+ :param venv_type: 环境类型(global/isolated)
+ :param funcs: 包内函数详情字典(key=函数名,value=FuncDetail)
+ :return: 添加成功返回True,失败返回False
+ """
+ # 1. 前置参数校验(避免无效数据入库)
+ if not self._validate_add_package_params(package_name, tool_type, package_dir, venv_path, venv_type, funcs):
+ return False
+
+ # 2. 原子化添加(所有关联操作要么全成功,要么全回滚)
+ try:
+ # 2.1 存储包完整信息(深拷贝避免外部修改影响内部状态)
+ self._packages[package_name] = {
+ "package_name": package_name,
+ "tool_type": tool_type,
+ "package_dir": package_dir,
+ "venv_path": venv_path,
+ "venv_type": venv_type,
+ "funcs": deepcopy(funcs),
+ "create_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ }
+
+ # 2.2 更新分类-包关联映射
+ self._add_to_type_package_map(tool_type, package_name)
+
+ # 2.3 更新函数-包关联映射
+ self._add_to_func_package_map(funcs.keys(), package_name)
+
+ logger.info(
+ f"[Package Added] 包名:{package_name} | 分类:{tool_type} | 函数数:{len(funcs)} | 环境类型:{venv_type}"
+ )
+ return True
+ except Exception as e:
+ logger.error(f"[Package Add Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ self._rollback_add_package(package_name, tool_type, funcs.keys())
+ return False
+
+ def delete_package(self, package_name: PackageName) -> bool:
+ """
+ 原子化删除包(连带删除下属函数、分类关联)
+ :param package_name: 要删除的包名
+ :return: 删除成功返回True,失败返回False
+ """
+ # 1. 校验包是否存在
+ if package_name not in self._packages:
+ logger.warning(f"[Package Delete Failed] 包名:{package_name} | 原因:包不存在")
+ return False
+
+ # 2. 缓存关联数据(用于后续清理映射)
+ package = self._packages[package_name]
+ tool_type = package["tool_type"]
+ func_names = list(package["funcs"].keys())
+
+ # 3. 原子化删除操作
+ try:
+ # 3.1 删除包核心数据
+ del self._packages[package_name]
+
+ # 3.2 清理分类-包关联(空分类自动删除)
+ self._remove_from_type_package_map(tool_type, package_name)
+
+ # 3.3 清理函数-包关联
+ self._remove_from_func_package_map(func_names)
+
+ logger.info(
+ f"[Package Deleted] 包名:{package_name} | 分类:{tool_type} | 连带删除函数数:{len(func_names)}"
+ )
+ return True
+ except Exception as e:
+ logger.error(f"[Package Delete Failed] 包名:{package_name} | 原因:{str(e)}", exc_info=True)
+ return False
+
+ def get_package(self, package_name: PackageName) -> Optional[Dict[str, Any]]:
+ """
+ 查询包完整信息(含下属函数),返回深拷贝避免外部修改
+ :param package_name: 包名
+ :return: 包完整信息字典,包不存在返回None
+ """
+ package = self._packages.get(package_name)
+ return deepcopy(package) if package else None
+
+ def list_packages(self, tool_type: Optional[ToolType] = None) -> List[PackageName]:
+ """
+ 列出包名列表(可选按分类过滤),按添加时间升序排列
+ :param tool_type: 分类名(None表示列出所有包)
+ :return: 包名列表(拷贝版,避免外部修改)
+ """
+ if tool_type:
+ # 按分类过滤,返回拷贝避免外部修改内部列表
+ return self._type_package_map.get(tool_type, []).copy()
+ # 列出所有包,按创建时间升序排序
+ return sorted(
+ self._packages.keys(),
+ key=lambda pkg_name: self._packages[pkg_name]["create_time"]
+ )
+
+ # -------------------------------------------------------------------------
+ # 函数级查询操作(附属API:仅查询,无添加/删除)
+ # -------------------------------------------------------------------------
+ def get_func(self, func_name: FuncName) -> Optional[Dict[str, Any]]:
+ """
+ 查询函数详情(含所属包/分类信息)
+ :param func_name: 函数名(全局唯一)
+ :return: 函数详情字典,函数不存在返回None
+ """
+ # 1. 快速定位所属包
+ package_name = self._func_package_map.get(func_name)
+ if not package_name:
+ logger.debug(f"[Func Query Failed] 函数名:{func_name} | 原因:函数不存在")
+ return None
+
+ # 2. 校验数据一致性(避免映射失效导致的脏数据)
+ package = self._packages.get(package_name)
+ if not package or func_name not in package["funcs"]:
+ logger.warning(f"[Data Inconsistent] 函数名:{func_name} | 原因:函数-包映射失效,已清理")
+ self._remove_from_func_package_map([func_name])
+ return None
+
+ # 3. 组装函数完整信息(含关联的包/分类信息)
+ func_detail = package["funcs"][func_name]
+ return deepcopy({
+ "func_name": func_name,
+ "func": func_detail["func"],
+ "description": func_detail["description"],
+ "package_name": package_name,
+ "tool_type": package["tool_type"],
+ "package_dir": package["package_dir"],
+ "venv_path": package["venv_path"],
+ "venv_type": package["venv_type"]
+ })
+
+ def list_funcs(self, package_name: Optional[PackageName] = None) -> List[FuncName]:
+ """
+ 列出函数名列表(可选按包过滤)
+ :param package_name: 包名(None表示列出所有函数)
+ :return: 函数名列表(拷贝版)
+ """
+ if package_name:
+ package = self._packages.get(package_name)
+ return list(package["funcs"].keys()) if package else []
+ # 列出所有函数,按所属包的创建时间排序
+ sorted_packages = self.list_packages()
+ all_funcs = []
+ for pkg_name in sorted_packages:
+ pkg_funcs = list(self._packages[pkg_name]["funcs"].keys())
+ all_funcs.extend(pkg_funcs)
+ return all_funcs
+
+ # -------------------------------------------------------------------------
+ # 分类级操作(聚合API:查询/删除)
+ # -------------------------------------------------------------------------
+ def get_tool_type(self, tool_type: ToolType) -> Optional[Dict[str, Any]]:
+ """
+ 查询分类详情(含下属包列表、统计信息)
+ :param tool_type: 分类名
+ :return: 分类详情字典,分类不存在返回None
+ """
+ package_names = self._type_package_map.get(tool_type)
+ if not package_names:
+ logger.debug(f"[ToolType Query Failed] 分类名:{tool_type} | 原因:分类不存在")
+ return None
+
+ # 统计分类下的有效包数和函数总数
+ valid_packages = [pkg_name for pkg_name in package_names if pkg_name in self._packages]
+ total_funcs = 0
+ for pkg_name in valid_packages:
+ total_funcs += len(self._packages[pkg_name]["funcs"])
+
+ return {
+ "tool_type": tool_type,
+ "package_names": valid_packages.copy(),
+ "total_package": len(valid_packages),
+ "total_func": total_funcs
+ }
+
+ def list_tool_types(self) -> List[ToolType]:
+ """列出所有已注册的分类名"""
+ return list(self._type_package_map.keys())
+
+ def delete_tool_type(self, tool_type: ToolType) -> bool:
+ """
+ 原子化删除分类(连带删除下属所有包)
+ :param tool_type: 分类名
+ :return: 整体删除成功返回True,部分失败返回False
+ """
+ # 1. 校验分类是否存在
+ if tool_type not in self._type_package_map:
+ logger.warning(f"[ToolType Delete Failed] 分类名:{tool_type} | 原因:分类不存在")
+ return False
+
+ # 2. 缓存分类下所有包(避免删除过程中映射变化)
+ package_names = self._type_package_map[tool_type].copy()
+ if not package_names:
+ logger.info(f"[ToolType Delete] 分类名:{tool_type} | 无下属包,直接删除分类")
+ del self._type_package_map[tool_type]
+ return True
+
+ # 3. 批量删除下属包
+ delete_results = [self.delete_package(pkg_name) for pkg_name in package_names]
+ all_success = all(delete_results)
+
+ logger.info(
+ f"[ToolType Delete Completed] 分类名:{tool_type} | 处理包数:{len(package_names)} | 全部成功:{all_success}"
+ )
+ return all_success
+
+ # -------------------------------------------------------------------------
+ # 序列化/反序列化(用于持久化与重启恢复)
+ # -------------------------------------------------------------------------
+ def get_serializable_data(self) -> Dict[str, Any]:
+ """
+ 获取可序列化的数据(用于持久化)
+ 说明:剔除不可序列化的函数对象,仅保留元信息
+ """
+ serializable_packages = {}
+
+ for pkg_name, pkg_info in self._packages.items():
+ serializable_packages[pkg_name] = {
+ "package_name": pkg_name,
+ "tool_type": pkg_info["tool_type"],
+ "package_dir": pkg_info["package_dir"],
+ "venv_path": pkg_info["venv_path"],
+ "venv_type": pkg_info["venv_type"],
+ "func_names": list(pkg_info["funcs"].keys()),
+ "create_time": pkg_info["create_time"]
+ }
+
+ return {
+ "serializable_packages": serializable_packages,
+ "type_package_map": self._type_package_map.copy(),
+ "func_package_map": self._func_package_map.copy(),
+ "serialize_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ }
+
+ def load_serializable_data(self, data: Dict[str, Any]) -> Dict[str, int]:
+ """
+ 加载序列化数据(用于重启后恢复关联关系)
+ 说明:仅恢复元信息和关联映射,函数对象需通过PackageLoader重新导入
+ :param data: 序列化数据(来自持久化文件)
+ :return: 恢复结果统计(成功/失败包数)
+ """
+ result = {
+ "total_package": 0,
+ "success_package": 0,
+ "fail_package": 0
+ }
+
+ try:
+ serializable_packages = data.get("serializable_packages", {})
+ type_package_map = data.get("type_package_map", {})
+ func_package_map = data.get("func_package_map", {})
+
+ result["total_package"] = len(serializable_packages)
+
+ # 恢复包元信息(无函数对象)
+ for pkg_name, pkg_meta in serializable_packages.items():
+ self._packages[pkg_name] = {
+ "package_name": pkg_name,
+ "tool_type": pkg_meta["tool_type"],
+ "package_dir": pkg_meta["package_dir"],
+ "venv_path": pkg_meta["venv_path"],
+ "venv_type": pkg_meta["venv_type"],
+ "funcs": {}, # 函数对象后续通过加载包补充
+ "create_time": pkg_meta["create_time"]
+ }
+ result["success_package"] += 1
+
+ # 恢复分类-包映射
+ self._type_package_map = deepcopy(type_package_map)
+
+ # 恢复函数-包映射
+ self._func_package_map = deepcopy(func_package_map)
+
+ result["fail_package"] = result["total_package"] - result["success_package"]
+ logger.info(
+ f"[Serializable Data Loaded] 总包数:{result['total_package']} | "
+ f"成功恢复:{result['success_package']} | 失败:{result['fail_package']}"
+ )
+ return result
+ except Exception as e:
+ logger.error(f"[Serializable Data Load Failed] 原因:{str(e)}", exc_info=True)
+ # 异常时清空已恢复数据,避免脏数据
+ self._reset_all_data()
+ return result
+
+ def update_package_functions(self, package_name: str, funcs: Dict[str, callable]):
+ """
+ 更新包的函数对象(持久化恢复后补充)
+ :param package_name: 包名
+ :param funcs: 函数名→函数对象的映射
+ """
+ if package_name not in self._packages:
+ raise ValueError(f"包 {package_name} 不存在于仓库中")
+ # 更新包的函数对象(覆盖原有空的 funcs)
+ self._packages[package_name]["funcs"] = funcs
+ # 同步更新函数-包映射(确保 get_func 能找到函数)
+ for func_name in funcs.keys():
+ self._func_package_map[func_name] = package_name
+
+ # -------------------------------------------------------------------------
+ # 内部辅助方法(私有,不对外暴露)
+ # -------------------------------------------------------------------------
+ def _validate_add_package_params(self,
+ package_name: PackageName,
+ tool_type: ToolType,
+ package_dir: PackageDir,
+ venv_path: str,
+ venv_type: str,
+ funcs: Dict[FuncName, FuncDetail]) -> bool:
+ """校验添加包的参数合法性"""
+ # 1. 非空校验
+ if not all([package_name, tool_type, package_dir, venv_path, venv_type]):
+ logger.error(f"[Param Invalid] 包名:{package_name} | 原因:必填参数不能为空")
+ return False
+
+ # 2. 包名唯一性校验
+ if package_name in self._packages:
+ logger.warning(f"[Param Invalid] 包名:{package_name} | 原因:包名已存在")
+ #return False
+
+ # 3. 环境类型合法性校验
+ if venv_type not in ["global", "isolated"]:
+ logger.error(f"[Param Invalid] 包名:{package_name} | 原因:环境类型必须是 global/isolated")
+ return False
+
+ # 4. 函数参数校验
+ if not isinstance(funcs, dict) or len(funcs) == 0:
+ logger.error(f"[Param Invalid] 包名:{package_name} | 原因:函数列表不能为空字典")
+ return False
+
+ # 5. 函数名唯一性校验(全局唯一)
+ duplicate_funcs = [func_name for func_name in funcs if func_name in self._func_package_map]
+ if duplicate_funcs:
+ logger.warning(f"[Param Invalid] 包名:{package_name} | 原因:函数名重复 - {duplicate_funcs}")
+ #return False
+
+ # 6. 函数详情格式校验
+ for func_name, func_detail in funcs.items():
+ if not isinstance(func_detail, dict) or "func" not in func_detail or "description" not in func_detail:
+ logger.error(f"[Param Invalid] 包名:{package_name} | 原因:函数[{func_name}]格式错误(需包含func和description)")
+ return False
+ if not callable(func_detail["func"]):
+ logger.error(f"[Param Invalid] 包名:{package_name} | 原因:函数[{func_name}]必须是可调用对象")
+ return False
+
+ return True
+
+ def _add_to_type_package_map(self, tool_type: ToolType, package_name: PackageName) -> None:
+ """添加分类-包关联"""
+ if tool_type not in self._type_package_map:
+ self._type_package_map[tool_type] = []
+ if package_name not in self._type_package_map[tool_type]:
+ self._type_package_map[tool_type].append(package_name)
+
+ def _remove_from_type_package_map(self, tool_type: ToolType, package_name: PackageName) -> None:
+ """移除分类-包关联(空分类自动删除)"""
+ if tool_type not in self._type_package_map:
+ return
+ if package_name in self._type_package_map[tool_type]:
+ self._type_package_map[tool_type].remove(package_name)
+ # 空分类自动清理
+ if not self._type_package_map[tool_type]:
+ del self._type_package_map[tool_type]
+ logger.debug(f"[ToolType Cleaned] 分类名:{tool_type} | 原因:无下属包")
+
+ def _add_to_func_package_map(self, func_names: List[FuncName], package_name: PackageName) -> None:
+ """添加函数-包关联"""
+ for func_name in func_names:
+ self._func_package_map[func_name] = package_name
+
+ def _remove_from_func_package_map(self, func_names: List[FuncName]) -> None:
+ """移除函数-包关联"""
+ for func_name in func_names:
+ if func_name in self._func_package_map:
+ del self._func_package_map[func_name]
+
+ def _rollback_add_package(self,
+ package_name: PackageName,
+ tool_type: ToolType,
+ func_names: List[FuncName]) -> None:
+ """添加包失败时回滚数据,确保数据一致性"""
+ logger.debug(f"[Add Package Rollback] 包名:{package_name} | 开始回滚数据")
+ # 回滚包数据
+ if package_name in self._packages:
+ del self._packages[package_name]
+ # 回滚分类-包关联
+ self._remove_from_type_package_map(tool_type, package_name)
+ # 回滚函数-包关联
+ self._remove_from_func_package_map(func_names)
+ logger.debug(f"[Add Package Rollback] 包名:{package_name} | 回滚完成")
+
+ def _reset_all_data(self) -> None:
+ """重置所有数据(异常恢复时使用)"""
+ self._packages.clear()
+ self._type_package_map.clear()
+ self._func_package_map.clear()
+ logger.warning("[Data Reset] 所有存储数据已清空")
+
+
+# 单例实例(全局共享一个数据仓库,确保数据一致性)
+tool_repository = ToolRepository()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_server/mcp_manager.py b/mcp_center/servers/oe_cli_mcp_server/mcp_server/mcp_manager.py
new file mode 100644
index 00000000..229b6db8
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_server/mcp_manager.py
@@ -0,0 +1,380 @@
+# mcp_server/server.py
+import os.path
+import sys
+import threading
+import signal
+from functools import wraps
+from fastapi import FastAPI
+import uvicorn
+
+# 第三方/内部导入
+from mcp.server import FastMCP
+from config.private.mcp_server.config_loader import McpServerConfig
+from servers.oe_cli_mcp_server.mcp_server.manager.manager import ToolManager, logger
+from servers.oe_cli_mcp_server.mcp_tools.tool_type import ToolType
+from servers.oe_cli_mcp_server.util.get_project_root import get_project_root
+from servers.oe_cli_mcp_server.util.zip_tool_util import unzip_tool
+
+# 全局配置
+PERSIST_FILE = os.path.join(get_project_root(), "data/tool_state.json")
+CONFIG = McpServerConfig().get_config()
+FASTAPI_PORT = CONFIG.private_config.fastapi_port # 确保配置中是 12556
+FASTMCP_PORT = CONFIG.private_config.port # 确保配置中是 12555(区分FastMCP端口)
+
+# -------------------------- 线程安全单例装饰器(保留) --------------------------
+def singleton(cls):
+ _instance = None
+ _lock = threading.Lock()
+
+ @wraps(cls)
+ def wrapper(*args, **kwargs):
+ nonlocal _instance
+ if _instance is None:
+ with _lock:
+ if _instance is None:
+ _instance = cls(*args, **kwargs)
+ return _instance
+
+ def reset_instance():
+ nonlocal _instance
+ with _lock:
+ if _instance:
+ _instance.release_resources() # 重置前释放资源
+ _instance = None
+
+ wrapper.reset_instance = reset_instance
+ return wrapper
+
+# -------------------------- 核心服务类(关键修复) --------------------------
+@singleton
+class McpServer(ToolManager):
+ def __init__(self, name="mcp_server", host="0.0.0.0", port=FASTMCP_PORT):
+ super().__init__()
+ # 基础配置
+ self.name = name
+ self.host = host
+ self.port = port # FastMCP 端口(12555)
+ self.language = CONFIG.public_config.language
+ # 核心实例(延迟初始化)
+ self.mcp = None
+ self.fastapi_app = None
+ self.fastapi_thread = None
+ self.uvicorn_server = None
+ self._fastapi_started = False # 防止FastAPI重复启动的标记
+ # 注册信号处理(优雅退出)
+ signal.signal(signal.SIGTERM, self._handle_sigterm)
+ signal.signal(signal.SIGINT, self._handle_sigterm)
+
+ # -------------------------- 资源释放核心方法(优化) --------------------------
+ def release_resources(self):
+ """统一释放所有资源(单例重置/服务退出时调用)"""
+ logger.info("开始释放服务资源...")
+ # 1. 关闭 FastAPI 服务(优化关闭逻辑)
+ if self.uvicorn_server:
+ try:
+ self.uvicorn_server.should_exit = True
+ # 等待FastAPI线程退出(最多等3秒)
+ if self.fastapi_thread and self.fastapi_thread.is_alive():
+ self.fastapi_thread.join(timeout=3)
+ logger.info("FastAPI 服务已关闭")
+ except Exception as e:
+ logger.error(f"关闭 FastAPI 失败:{str(e)}")
+ # 2. 关闭 FastMCP 实例(兼容不同版本)
+ if self.mcp:
+ try:
+ if hasattr(self.mcp, "close"):
+ self.mcp.close()
+ elif hasattr(self.mcp, "stop"):
+ self.mcp.stop()
+ logger.info("FastMCP 实例已关闭")
+ except Exception as e:
+ logger.error(f"关闭 FastMCP 失败:{str(e)}")
+ # 3. 清理引用(便于GC回收)
+ self.mcp = None
+ self.fastapi_app = None
+ self.fastapi_thread = None
+ self.uvicorn_server = None
+ self._fastapi_started = False # 重置启动标记
+ logger.info("资源释放完成")
+
+ # -------------------------- 信号处理(保留) --------------------------
+ def _handle_sigterm(self, signum, frame):
+ logger.info(f"收到终止信号({signum}),准备退出...")
+ self.release_resources()
+ sys.exit(0)
+
+ # -------------------------- FastAPI 相关(修复重复启动) --------------------------
+ def _create_fastapi_app(self):
+ """创建 FastAPI 应用(保留核心接口,优化返回信息)"""
+ app = FastAPI(title="MCP Tool API", version="1.0")
+
+ @app.get("/tool/list", summary="查询所有已加载工具包")
+ def list_tools():
+ try:
+ pkg_funcs = {pkg: self.list_funcs(pkg) for pkg in self.list_packages()}
+ return {
+ "success": True,
+ "data": {"pkg_funcs": pkg_funcs, "total_packages": len(pkg_funcs)},
+ "message": "查询成功"
+ }
+ except Exception as e:
+ logger.error(f"查询工具包失败:{str(e)}", exc_info=True)
+ return {"success": False, "message": f"查询失败:{str(e)}"}
+
+ @app.post("/tool/add", summary="添加工具包(system/custom)")
+ def add_tool(type: str, value: str):
+ try:
+ if type not in ["system", "custom"]:
+ return {"success": False, "message": "type必须是system或custom"}
+ if type == "system":
+ self.load(ToolType(value))
+ else:
+ self.load(value)
+ return {"success": True, "message": f"添加 {value} 成功(实时生效)"}
+ except Exception as e:
+ logger.error(f"添加工具包 {value} 失败:{str(e)}", exc_info=True)
+ return {"success": False, "message": f"添加失败:{str(e)}"}
+
+ @app.post("/tool/remove", summary="删除工具包(system/custom)")
+ def remove_tool(type: str, value: str):
+ try:
+ if type not in ["system", "custom"]:
+ return {"success": False, "message": "type必须是system或custom"}
+ target = ToolType(value) if type == "system" else value
+ self.remove(target)
+ return {"success": True, "message": f"删除 {value} 成功(实时生效)"}
+ except Exception as e:
+ logger.error(f"删除工具包 {value} 失败:{str(e)}", exc_info=True)
+ return {"success": False, "message": f"删除失败:{str(e)}"}
+
+ @app.post("/tool/init", summary="初始化工具包(仅保留基础包)")
+ def init_tool():
+ try:
+ self.init()
+ return {"success": True, "message": "初始化成功(仅保留基础运维包,实时生效)"}
+ except Exception as e:
+ logger.error(f"初始化失败:{str(e)}", exc_info=True)
+ return {"success": False, "message": f"初始化失败:{str(e)}"}
+
+ return app
+
+ def _start_fastapi(self):
+ """启动 FastAPI 服务(修复重复启动+协程警告)"""
+ # 防止重复启动(核心修复)
+ if self._fastapi_started or (self.fastapi_thread and self.fastapi_thread.is_alive()):
+ logger.warning("FastAPI 服务已启动,无需重复启动")
+ return
+
+ self.fastapi_app = self._create_fastapi_app()
+
+ def run_server():
+ try:
+ config = uvicorn.Config(
+ self.fastapi_app,
+ host="0.0.0.0",
+ port=FASTAPI_PORT, # 固定FastAPI端口(12556)
+ log_level="warning",
+ access_log=False,
+ reload=False # 禁用热重载(避免重复进程)
+ )
+ self.uvicorn_server = uvicorn.Server(config)
+ self.uvicorn_server.run() # 同步运行,修复协程警告
+ except Exception as e:
+ logger.error(f"FastAPI 启动失败:{str(e)}", exc_info=True)
+ finally:
+ self._fastapi_started = False # 线程退出后重置标记
+
+ self.fastapi_thread = threading.Thread(target=run_server, daemon=True)
+ self.fastapi_thread.start()
+ self._fastapi_started = True # 标记已启动
+ logger.info(f"FastAPI 服务启动:http://0.0.0.0:{FASTAPI_PORT}/docs")
+
+ # -------------------------- 核心业务方法(关键修复) --------------------------
+ def _reset(self):
+ """重置 MCP 实例(修复重复加载+资源泄漏)"""
+ logger.info("重置 MCP 实例...")
+ # 1. 释放旧实例资源
+ if self.mcp:
+ try:
+ if hasattr(self.mcp, "close"):
+ self.mcp.close()
+ except Exception as e:
+ logger.error(f"关闭旧 FastMCP 实例失败:{str(e)}")
+ # 2. 重新创建 FastMCP 实例(确保干净)
+ self.mcp = FastMCP(self.name, host=self.host, port=self.port)
+ # 3. 恢复工具状态+重新加载函数(无需重复load包!)
+ self.restore_tool_state()
+ self.reload_package_functions() # 已包含包的加载逻辑
+ # 4. 重新注册所有工具(仅注册一次)
+ self._register_all_tools()
+ logger.info(f"MCP 实例重置完成,当前加载 {len(self.list_packages())} 个包")
+
+ def _register_all_tools(self):
+ """统一注册所有工具(去掉工具已注册判断,直接注册/覆盖)"""
+ for pkg in self.list_packages():
+ for func_name in self.list_funcs(pkg):
+ func_info = self.get_func_info(func_name)
+ desc = func_info["description"]["zh"] if self.language == "zh" else func_info["description"]["en"]
+ # 直接注册:FastMCP 重复注册同一名称工具会自动覆盖,无需判断
+ self.mcp.tool(name=func_name, description=desc)(func_info["func"])
+ logger.info(f"工具注册完成:共注册 {sum(len(self.list_funcs(pkg)) for pkg in self.list_packages())} 个函数")
+
+ def load(self, mcp_collection: ToolType | str):
+ """加载工具包(修复重复加载+实时注册)"""
+ before_count = len(self.list_packages())
+ try:
+ packages_to_register = []
+ if isinstance(mcp_collection, str):
+ # 处理自定义 zip 包
+ if mcp_collection.endswith(".zip"):
+ unzip_tool(mcp_collection)
+ pkg_name = os.path.basename(mcp_collection)[:-4]
+ pkg_dir = os.path.join(get_project_root(), "mcp_tools/personal_tools", pkg_name)
+ else:
+ pkg_name = mcp_collection
+ pkg_dir = self.get_package_path(pkg_name)
+ # 双重去重(避免重复加载)
+ if not pkg_dir or pkg_name in self.list_packages():
+ logger.warning(f"包 {pkg_name} 已加载或路径无效,跳过")
+ return
+ # 加载包(ToolManager的核心方法)
+ if not self.load_package(pkg_dir):
+ logger.error(f"加载包目录 {pkg_dir} 失败")
+ return
+ packages_to_register = [pkg_name]
+ elif isinstance(mcp_collection, ToolType):
+ tool_type = mcp_collection.value
+ # 去重:避免重复加载同类型系统包
+ if tool_type in self.list_tool_types():
+ logger.warning(f"系统包类型 {tool_type} 已加载,跳过")
+ return
+ # 加载系统包类型
+ load_result = self.load_tool_type(tool_type)
+ if load_result["success_package"] == 0:
+ logger.error(f"系统包类型 {tool_type} 加载失败:{load_result['fail_reason']}")
+ return
+ # 筛选新增的包(避免重复注册)
+ packages_to_register = [
+ pkg for pkg in self.list_packages(tool_type)
+ if pkg not in self.list_packages()[:before_count]
+ ]
+ else:
+ logger.error(f"不支持的加载类型:{type(mcp_collection)}")
+ return
+
+ # 实时注册新增工具(无需等_reset)
+ if packages_to_register:
+ self._register_all_tools() # 注册所有工具(确保不遗漏)
+ logger.info(f"加载成功:新增 {len(packages_to_register)} 个包,当前共 {len(self.list_packages())} 个(实时生效)")
+ except Exception as e:
+ logger.error(f"加载 {mcp_collection} 失败:{str(e)}", exc_info=True)
+
+ def remove(self, mcp_collection):
+ """卸载工具包(修复内存泄漏+实时生效)"""
+ try:
+ pkg_names = []
+ # 1. 卸载工具包(更新ToolManager状态)
+ if isinstance(mcp_collection, ToolType):
+ tool_type = mcp_collection.value
+ pkg_names = self.list_packages(tool_type) # 获取该类型下所有包
+ self.unload_tool_type(tool_type)
+ else:
+ pkg_name = mcp_collection
+ pkg_names = [pkg_name]
+ self.unload_package(pkg_name)
+
+ # 2. 清理模块缓存(修复内存泄漏)
+ for pkg_name in pkg_names:
+ for mod_name in list(sys.modules.keys()):
+ if mod_name.startswith(f"{pkg_name}.") or mod_name == pkg_name:
+ del sys.modules[mod_name]
+ logger.debug(f"清理模块缓存:{mod_name}")
+
+ # 3. 实时重置MCP(更新注册状态,无需重启)
+ self._reset()
+ logger.info(f"删除成功:已卸载 {len(pkg_names)} 个包(实时生效)")
+ except Exception as e:
+ logger.error(f"卸载 {mcp_collection} 失败:{str(e)}", exc_info=True)
+
+ def init(self):
+ """初始化工具包(修复实时生效+重复加载)"""
+ logger.info("开始初始化工具包...")
+ # 1. 卸载所有工具类型
+ all_types = self.list_tool_types()
+ for tool_type in all_types:
+ self.unload_tool_type(tool_type)
+ # 2. 清理模块缓存(彻底释放内存)
+ base_tools_dir = "/usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools"
+ # 确保目录存在
+ if os.path.exists(base_tools_dir):
+ # 读取目录下的所有子目录(排除文件和隐藏目录)
+ module_prefixes = [
+ dir_name for dir_name in os.listdir(base_tools_dir)
+ if os.path.isdir(os.path.join(base_tools_dir, dir_name))
+ and not dir_name.startswith(".") # 排除隐藏目录(如 .git)
+ ]
+
+ # 2. 清理这些模块的缓存(动态前缀)
+ for mod_name in list(sys.modules.keys()):
+ # 判断模块名是否以任一基础工具目录名为前缀(或完全匹配)
+ if any(
+ mod_name == prefix or mod_name.startswith(f"{prefix}.")
+ for prefix in module_prefixes
+ ):
+ del sys.modules[mod_name]
+ logger.debug(f"清理模块缓存:{mod_name}")
+ else:
+ logger.warning(f"base_tools 目录不存在:{base_tools_dir},跳过模块缓存清理")
+ # 3. 重置配置(保留基础包配置)
+ self.reload_config() # 确保加载最新配置
+ # 4. 重置MCP实例(干净状态)
+ self.release_resources()
+ self.mcp = FastMCP(self.name, host=self.host, port=self.port)
+ # 5. 加载基础包(实时注册)
+ self.load(ToolType.BASE)
+ logger.info("初始化完成:仅保留基础运维包(实时生效)")
+
+ def reload_config(self):
+ """重新加载配置(补充实现,确保配置生效)"""
+ global CONFIG
+ CONFIG = McpServerConfig().get_config()
+ self.language = CONFIG.public_config.language
+ self.port = CONFIG.private_config.port
+
+ def restart(self):
+ """重启服务(优化流程)"""
+ logger.info("重启 MCP 服务...")
+ self.release_resources()
+ self.start()
+
+ # -------------------------- 服务启动入口(优化) --------------------------
+ def start(self):
+ """启动核心服务(FastAPI + FastMCP,避免重复加载)"""
+ logger.info("启动 MCP 服务...")
+ # 1. 初始化MCP实例(确保干净)
+ if not self.mcp:
+ self.mcp = FastMCP(self.name, host=self.host, port=self.port)
+ # 2. 恢复工具状态+加载函数(避免重复load)
+ self.restore_tool_state()
+ self.reload_package_functions()
+ # 3. 加载基础包(无包时)
+ if not self.list_packages():
+ self.load(ToolType.BASE)
+ # 4. 注册所有工具(仅一次)
+ self._register_all_tools()
+ # 5. 启动FastAPI(防止重复)
+ self._start_fastapi()
+ # 6. 启动FastMCP(阻塞主线程)
+ logger.info(f"FastMCP 服务启动:{self.host}:{self.port}(持久化文件:{PERSIST_FILE})")
+ try:
+ self.mcp.run(transport='sse')
+ except Exception as e:
+ logger.error(f"FastMCP 服务运行失败:{str(e)}", exc_info=True)
+ self.release_resources()
+ sys.exit(1)
+
+# -------------------------- 启动入口 --------------------------
+if __name__ == "__main__":
+ # 初始化并启动服务
+ mcp_server = McpServer()
+ mcp_server.start()
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/base.py
new file mode 100644
index 00000000..d0ba531e
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/base.py
@@ -0,0 +1,159 @@
+import subprocess
+from typing import Any, Dict, Optional
+import paramiko
+
+from servers.oe_cli_mcp_server.config.base_config_loader import LanguageEnum
+
+
+def _format_gpu_info(raw_info: Dict[str, Any],
+ host: str, include_processes: bool, lang: Optional[LanguageEnum]) -> Dict[str, Any]:
+ """格式化输出(双语适配描述)"""
+ no_process_msg = "未开启进程查询(需设置include_processes=True)" if lang == LanguageEnum.ZH else "Process query not enabled (set include_processes=True)"
+ return {
+ "host": host,
+ "gpu_count": len(raw_info["gpu"]),
+ "gpu_details": raw_info["gpu"],
+ "include_processes": include_processes,
+ "process_details": raw_info["processes"] if include_processes else no_process_msg
+ }
+
+def _get_local_gpu_status(gpu_index: Optional[int], include_processes: bool, lang: Optional[LanguageEnum]) -> Dict[str, Any]:
+ """本地GPU查询(双语错误提示)"""
+ try:
+ # 构建基础查询命令
+ cmd = "/usr/bin/nvidia-smi --query-gpu=index,name,memory.used,memory.total,utilization.gpu --format=csv,noheader,nounits"
+ if gpu_index is not None:
+ cmd += f" -i {gpu_index}"
+
+ # 执行本地命令
+ result = subprocess.run(
+ cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8"
+ )
+
+ # 解析GPU信息
+ gpu_info = []
+ for line in result.stdout.strip().split('\n'):
+ if not line:
+ continue
+ try:
+ idx, name, mem_used, mem_total, gpu_util = line.split(', ')
+ gpu_info.append({
+ "index": int(idx),
+ "name": name.strip(),
+ "memory_used": int(mem_used),
+ "memory_total": int(mem_total),
+ "gpu_utilization": int(gpu_util)
+ })
+ except ValueError:
+ # 解析行数据失败(双语提示)
+ warn_msg = f"跳过无效的GPU信息行: {line}" if lang == LanguageEnum.ZH else f"Skipping invalid GPU info line: {line}"
+ print(warn_msg) # 或使用logger
+ continue
+
+ # 处理进程信息
+ proc_info = []
+ if include_processes:
+ proc_cmd = "/usr/bin/nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader,nounits"
+ try:
+ proc_result = subprocess.run(
+ proc_cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8"
+ )
+ for line in proc_result.stdout.strip().split('\n'):
+ if not line:
+ continue
+ try:
+ pid, proc_name, used_mem = line.split(', ')
+ proc_info.append({
+ "pid": int(pid),
+ "name": proc_name.strip(),
+ "used_memory": int(used_mem)
+ })
+ except ValueError:
+ # 进程信息解析失败(双语提示)
+ warn_msg = f"跳过无效的进程信息行: {line}" if lang == LanguageEnum.ZH else f"Skipping invalid process info line: {line}"
+ print(warn_msg)
+ continue
+ except subprocess.CalledProcessError as e:
+ # 进程查询命令执行失败(双语提示)
+ err_msg = f"查询GPU进程信息失败: {e.stderr}" if lang == LanguageEnum.ZH else f"Failed to query GPU process info: {e.stderr}"
+ raise RuntimeError(err_msg)
+
+ return {"gpu": gpu_info, "processes": proc_info}
+
+ except FileNotFoundError:
+ # 未找到nvidia-smi命令(双语提示)
+ err_msg = "未找到nvidia-smi命令,请确认已安装NVIDIA驱动" if lang == LanguageEnum.ZH else "/usr/bin/nvidia-smi command not found, please ensure NVIDIA driver is installed"
+ raise RuntimeError(err_msg)
+ except subprocess.CalledProcessError as e:
+ # GPU基础查询失败(双语提示)
+ err_msg = f"执行GPU查询命令失败: {e.stderr}" if lang == LanguageEnum.ZH else f"Failed to execute GPU query command: {e.stderr}"
+ raise RuntimeError(err_msg)
+
+
+def _get_remote_gpu_status_via_ssh(ssh: paramiko.SSHClient, gpu_index: Optional[int],
+ include_processes: bool, lang: Optional[LanguageEnum]) -> Dict[str, Any]:
+ """远程GPU查询(双语错误提示)"""
+ # 1. 查询GPU基础信息
+ cmd = "/usr/bin/nvidia-smi --query-gpu=index,name,memory.used,memory.total,utilization.gpu --format=csv,noheader,nounits"
+ if gpu_index is not None:
+ cmd += f" -i {gpu_index}"
+
+ stdin, stdout, stderr = ssh.exec_command(cmd)
+ exit_status = stdout.channel.recv_exit_status()
+ error = stderr.read().decode("utf-8").strip()
+
+ if exit_status != 0:
+ # 远程命令执行失败(双语提示)
+ if "command not found" in error:
+ err_msg = "远程主机未找到nvidia-smi命令,可能未安装NVIDIA驱动" if lang == LanguageEnum.ZH else "/usr/bin/nvidia-smi command not found on remote host, possibly no NVIDIA driver installed"
+ else:
+ err_msg = f"远程GPU查询命令执行失败: {error}" if lang == LanguageEnum.ZH else f"Remote GPU query command failed: {error}"
+ raise RuntimeError(err_msg)
+
+ # 解析GPU信息
+ gpu_info = []
+ for line in stdout.read().decode("utf-8").strip().split('\n'):
+ if not line:
+ continue
+ try:
+ idx, name, mem_used, mem_total, gpu_util = line.split(', ')
+ gpu_info.append({
+ "index": int(idx),
+ "name": name.strip(),
+ "memory_used": int(mem_used),
+ "memory_total": int(mem_total),
+ "gpu_utilization": int(gpu_util)
+ })
+ except ValueError:
+ warn_msg = f"跳过远程无效的GPU信息行: {line}" if lang == LanguageEnum.ZH else f"Skipping invalid remote GPU info line: {line}"
+ print(warn_msg)
+ continue
+
+ # 2. 查询进程信息(按需)
+ proc_info = []
+ if include_processes:
+ proc_cmd = "/usr/bin/nvidia-smi --query-compute-apps=pid,name,used_memory --format=csv,noheader,nounits"
+ stdin_proc, stdout_proc, stderr_proc = ssh.exec_command(proc_cmd)
+ exit_status_proc = stdout_proc.channel.recv_exit_status()
+ error_proc = stderr_proc.read().decode("utf-8").strip()
+
+ if exit_status_proc != 0:
+ err_msg = f"远程GPU进程查询失败: {error_proc}" if lang == LanguageEnum.ZH else f"Remote GPU process query failed: {error_proc}"
+ raise RuntimeError(err_msg)
+
+ for line in stdout_proc.read().decode("utf-8").strip().split('\n'):
+ if not line:
+ continue
+ try:
+ pid, proc_name, used_mem = line.split(', ')
+ proc_info.append({
+ "pid": int(pid),
+ "name": proc_name.strip(),
+ "used_memory": int(used_mem)
+ })
+ except ValueError:
+ warn_msg = f"跳过远程无效的进程信息行: {line}" if lang == LanguageEnum.ZH else f"Skipping invalid remote process info line: {line}"
+ print(warn_msg)
+ continue
+
+ return {"gpu": gpu_info, "processes": proc_info}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/config.json
new file mode 100644
index 00000000..d6a9ef52
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "nvidia_smi_status": {
+ "zh": "使用nvidia-smi获取本地或远程服务器的GPU状态信息(远程需提供SSH信息)。返回GPU的利用率、显存使用量、温度等关键指标。\n支持本地和远程的GPU状态查询工具\n\n本地查询:不填host、username、password即可\n远程查询:必须提供host、username、password(port可选,默认22)\n\n1. 输入值如下:\n - host:远程主机IP或hostname,不填则查询本地\n - port:SSH端口,默认22\n - username:SSH用户名,远程查询时必填\n - password:SSH密码,远程查询时必填\n - gpu_index:GPU索引(0-based,可选,不填则查询所有GPU)\n - include_processes:是否包含占用GPU的进程信息(默认False)\n\n2. 返回值为包含查询结果的字典\n - success:布尔值,表示查询是否成功\n - message:字符串,描述查询结果(成功信息或错误原因)\n - data:字典,包含GPU状态详细信息\n - host:查询的主机(本地为\"localhost\")\n - gpus:列表,每个元素为GPU信息字典\n - index:GPU索引(整数)\n - name:GPU型号名称\n - utilization_gpu:GPU利用率(百分比)\n - utilization_memory:显存利用率(百分比)\n - temperature:温度(摄氏度)\n - memory_total:总显存(MB)\n - memory_used:已用显存(MB)\n - memory_free:空闲显存(MB)\n - processes:占用进程列表(仅当include_processes=True时返回)\n - pid:进程ID\n - name:进程名称\n - memory_used:进程占用显存(MB)",
+ "en": "GPU status query tool supporting local and remote servers using nvidia-smi\n\nLocal query: Leave host, username, password empty\nRemote query: Must provide host, username, password (port is optional, default 22)\n\n1. Input values are as follows:\n - host: Remote host IP or hostname, leave empty for local query\n - port: SSH port, default 22\n - username: SSH username, required for remote query\n - password: SSH password, required for remote query\n - gpu_index: GPU index (0-based, optional, all GPUs if not specified)\n - include_processes: Whether to include GPU-using processes (default False)\n\n2. Return value is a dictionary containing query results\n - success: Boolean, indicating whether the query was successful\n - message: String, describing the query result (success information or error reason)\n - data: Dictionary, containing detailed GPU status information\n - host: Queried host (\"localhost\" for local)\n - gpus: List, each element is a GPU information dictionary\n - index: GPU index (integer)\n - name: GPU model name\n - utilization_gpu: GPU utilization (percentage)\n - utilization_memory: Memory utilization (percentage)\n - temperature: Temperature (celsius)\n - memory_total: Total memory (MB)\n - memory_used: Used memory (MB)\n - memory_free: Free memory (MB)\n - processes: List of using processes (returned only if include_processes=True)\n - pid: Process ID\n - name: Process name\n - memory_used: Memory used by process (MB)"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/deps.toml
new file mode 100644
index 00000000..debad42b
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/deps.toml
@@ -0,0 +1,23 @@
+# deps.toml 简化版(仅保留 Python+系统依赖,文档2-64节核心需求)
+# 说明:用于自定义mcptool,定义运行必需的系统级工具和Python库
+
+
+# ===================== 1. 系统依赖(system_deps)=====================
+# 用途:安装mcptool依赖的系统级工具(需通过yum/apt安装)
+# 格式:{依赖名 = ["openEuler/CentOS安装命令(yum)", "Ubuntu/Debian安装命令(apt)"]}
+# 备注:无需手动验证是否已安装,venv_util.py会自动检查并跳过已安装项
+
+[system]
+
+# 示例3:若mcptool是智算调优类(如GPU监控工具)
+#nvidia_driver = [
+# "yum install -y nvidia-driver-latest-dkms" # openEuler 安装NVIDIA驱动
+#]
+
+# ===================== 2. Python依赖(pip_deps)=====================
+# 用途:安装mcptool依赖的Python库(会安装到mcp虚拟环境,文档2-144节)
+# 格式:{依赖名 = "版本约束"}(版本约束可选,如"==2.31.0"或">=2.25.0")
+
+[pip]
+# 示例1:基础网络请求(若mcptool需调用远程API)
+requests = "==2.31.0" # 固定版本,避免版本冲突
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/tool.py
new file mode 100644
index 00000000..c7642286
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/AI_tools/nvidia_tool/tool.py
@@ -0,0 +1,81 @@
+from typing import Any, Dict, Optional, Union
+import paramiko
+from servers.oe_cli_mcp_server.config.base_config_loader import LanguageEnum,BaseConfig
+from servers.oe_cli_mcp_server.mcp_tools.AI_tools.nvidia_tool.base import _format_gpu_info, _get_local_gpu_status, _get_remote_gpu_status_via_ssh
+
+
+async def nvidia_smi_status(
+ host: Union[str, None] = None,
+ gpu_index: Optional[int] = None,
+ include_processes: bool = False,
+ lang: Optional[LanguageEnum] = LanguageEnum.ZH,
+ config: Optional[Any] = None
+ ) -> Dict[str, Any]:
+ """获取GPU状态信息"""
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ # 1. 本地查询分支(host为空)
+ if host is None:
+ try:
+ raw_info = _get_local_gpu_status(gpu_index, include_processes, lang)
+ formatted_data = _format_gpu_info(raw_info, "localhost", include_processes, lang)
+
+ result["success"] = True
+ result["message"] = "成功获取本地主机的GPU状态信息" if lang == LanguageEnum.ZH else "Successfully obtained GPU status information for the local host"
+ result["data"] = formatted_data
+ return result
+ except Exception as e:
+ error_msg = f"获取本地GPU状态信息失败: {str(e)}" if lang == LanguageEnum.ZH else f"Failed to obtain local GPU status information: {str(e)}"
+ result["message"] = error_msg
+ return result
+
+ # 2. 远程查询分支(host不为空)
+ else:
+ for host_config in BaseConfig().get_config().public_config.remote_hosts:
+ if host == host_config.name or host == host_config.host:
+ try:
+ # 建立SSH连接
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(
+ hostname=host_config.host,
+ port=host_config.port,
+ username=host_config.username,
+ password=host_config.password
+ )
+
+ # 远程查询GPU状态
+ raw_info = _get_remote_gpu_status_via_ssh(ssh, gpu_index, include_processes, lang)
+ ssh.close()
+
+ # 格式化结果
+ formatted_data = _format_gpu_info(raw_info, host_config.host, include_processes, lang)
+ result["success"] = True
+ result["message"] = f"成功获取远程主机 {host_config.host} 的GPU状态信息" if lang == LanguageEnum.ZH else f"Successfully obtained GPU status information for remote host {host_config.host}"
+ result["data"] = formatted_data
+ return result
+
+ except paramiko.AuthenticationException:
+ # 认证失败(双语提示)
+ if 'ssh' in locals():
+ ssh.close()
+ err_msg = "SSH认证失败,请检查用户名和密码" if lang == LanguageEnum.ZH else "SSH authentication failed, please check username and password"
+ result["message"] = err_msg
+ return result
+ except Exception as e:
+ # 其他远程执行异常(双语提示)
+ if 'ssh' in locals():
+ ssh.close()
+ err_msg = f"远程主机 {host_config.host} 查询异常: {str(e)}" if lang == LanguageEnum.ZH else f"Remote host {host_config.host} query error: {str(e)}"
+ result["message"] = err_msg
+ return result
+
+ # 未匹配到远程主机(双语异常)
+ if lang == LanguageEnum.ZH:
+ raise ValueError(f"未找到远程主机配置: {host}")
+ else:
+ raise ValueError(f"Remote host configuration not found: {host}")
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/base.py
new file mode 100644
index 00000000..1a84c497
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/base.py
@@ -0,0 +1,3 @@
+from config.public.base_config_loader import BaseConfig
+
+lang = BaseConfig().get_config().public_config.language
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/config.json
new file mode 100644
index 00000000..8a104bd8
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "cmd_executor_tool": {
+ "zh": "【统一命令执行工具】\n功能:支持本地执行shell命令/Shell脚本(openEuler操作系统命令),支持按指令类型自动设置超时时间,超时自动终止执行(纯Python实现,兼容常见命令与脚本),返回结构化多语言结果\n\n【核心提示】\n1. 命令参数(command)为必填项,且为openEuler操作系统的命令,不可是其他操作系统,不可传入空值;\n2. 超时参数(timeout)为可选项,若不传入则按指令类型自动匹配默认超时时间,传入时需为正整数(单位:秒),非正整数将自动使用15秒默认值;\n4. 指令类型与默认超时时间映射规则(优先级:用户指定timeout > Shell脚本指令 > 普通指令 > 全局默认):\n - Shell脚本指令(包含.sh、以bash / sh 开头):默认600秒;\n - 长耗时指令(yum/apt/docker/scp):分别为300/300/600/600秒;\n - 中等耗时指令(ping/curl):均为30秒;\n - 快速指令(ls/pwd/echo/cat/grep):分别为5/5/5/10/10秒;\n - 未匹配指令:全局默认15秒;\n5. \n6. 命令格式要求:支持标准shell命令与Shell脚本路径(如\"/tmp/run.sh\"、\"bash /root/install.sh\"),且必须是openEuler系统可用的,避免非法命令导致执行失败。\n\n【枚举类定义(必须遵守)】\n- CommandTimeoutEnum(指令超时枚举):LS=5 / PWD=5 / ECHO=5 / CAT=10 / GREP=10 / PING=30 / CURL=30 / YUM=300 / APT=300 / DOCKER=600 / SCP=600 / SHELL_SCRIPT=600 / DEFAULT=15\n- LanguageEnum(语言枚举):ZH / EN\n\n【参数详情】\n- host:远程主机标识(兼容保留项,可选,无实际作用)\n- command:需要执行的shell命令/Shell脚本(必填,标准shell格式)\n- timeout:执行超时时间(可选,正整数,单位:秒,默认按指令匹配)\n- lang:语言类型(可选,枚举值:ZH/EN,默认读取配置文件)\n\n【返回值说明】\n- success:执行结果(True=成功,False=失败)\n- message:执行信息/错误提示(多语言,如命令成功、超时、执行失败等)\n- result:命令执行结果(成功时返回命令输出内容,失败/超时返回空字符串)\n- target:执行目标(固定为127.0.0.1,本地执行)\n- timeout_used:实际使用的超时时间(单位:秒,便于排查超时问题)",
+ "en": "【Unified Command Execution Tool】\nFunction: Supports executing shell commands/Shell scripts locally, automatically sets timeout time by command type, terminates execution automatically when timed out (Python native implementation, compatible with common commands and scripts), returns structured multilingual results\n\n【Core Guidelines】\n1. Command parameter (command) is required, empty value is not allowed;\n2. Timeout parameter (timeout) is optional, if not passed, the default timeout time is automatically matched by command type; if passed, it must be a positive integer (unit: seconds), non-positive integers will automatically use the default value of 15 seconds;\n3. Language parameter (lang) is optional, if not passed, the default language in the configuration file is read; if passed, it must be a LanguageEnum enum value (ZH/EN);\n4. Mapping rules for command types and default timeout time (priority: user-specified timeout > Shell script command > normal command > global default):\n - Shell script commands (containing .sh, starting with bash / sh ): default 600 seconds;\n - Long-time-consuming commands (yum/apt/docker/scp): 300/300/600/600 seconds respectively;\n - Medium-time-consuming commands (ping/curl): 30 seconds each;\n - Fast commands (ls/pwd/echo/cat/grep): 5/5/5/10/10 seconds respectively;\n - Unmatched commands: global default 15 seconds;\n5. The host parameter is a compatibility reserved item, the current version only supports local execution (target fixed as 127.0.0.1), and passing it does not affect the execution logic;\n6. Command format requirement: Supports standard shell commands and Shell script paths (e.g.\"/tmp/run.sh\", \"bash /root/install.sh\"), avoid execution failure caused by illegal commands.\n\n【Enum Class Definition (Must Follow)】\n- CommandTimeoutEnum (Command Timeout Enum): LS=5 / PWD=5 / ECHO=5 / CAT=10 / GREP=10 / PING=30 / CURL=30 / YUM=300 / APT=300 / DOCKER=600 / SCP=600 / SHELL_SCRIPT=600 / DEFAULT=15\n- LanguageEnum (Language Enum): ZH / EN\n\n【Parameter Details】\n- host: Remote host identifier (compatibility reserved item, optional, no actual effect)\n- command: Shell command/Shell script to execute (required, standard shell format)\n- timeout: Execution timeout time (optional, positive integer, unit: seconds, default matched by command)\n- lang: Language type (optional, enum values: ZH/EN, default read from configuration file)\n\n【Return Value Explanation】\n- success: Execution result (True=success, False=failure)\n- message: Execution info/error prompt (multilingual, such as success, timeout, execution failure, etc.)\n- result: Command execution result (returns command output content on success, empty string on failure/timeout)\n- target: Execution target (fixed as 127.0.0.1, local execution)\n- timeout_used: The actual timeout time used (unit: seconds, convenient for troubleshooting timeout issues)"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/deps.toml.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/deps.toml.py
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/tool.py
new file mode 100644
index 00000000..a55fc51d
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/cmd_executor_tool/tool.py
@@ -0,0 +1,129 @@
+import asyncio
+from typing import Union, Optional
+import subprocess
+from config.public.base_config_loader import BaseConfig,LanguageEnum
+
+
+# Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+
+
+async def cmd_executor_tool(
+ command: str = "",
+ timeout: Optional[int] = None,
+) -> dict:
+ """
+ 本地命令执行工具,支持按指令类型自动设置超时,返回结构化字典结果(多语言)
+ :param host: 兼容保留参数,无实际作用
+ :param command: 需要执行的shell命令/脚本(必填)
+ :param timeout: 手动指定超时时间(秒),可选
+ :return: 结构化字典结果,包含success、message、result、target、timeout_used
+ """
+ # -------------------------- 读取语言配置(优先级:传入参数 > 配置文件) --------------------------
+
+ lang = BaseConfig().get_config().public_config.language
+
+ # -------------------------- 初始化返回结果字典 --------------------------
+ response = {
+ "success": False, # 执行状态:True成功/False失败
+ "message": "", # 提示信息(多语言)
+ "result": "", # 命令执行结果(成功时为输出内容,失败时为空)
+ "target": "127.0.0.1", # 执行目标,固定为本地
+ "timeout_used": 0 # 实际使用的超时时间(秒)
+ }
+
+ # -------------------------- 命令为空校验 --------------------------
+ if not command:
+ response["message"] = "请提供需要执行的命令" if lang == LanguageEnum.ZH else "please give me the command to execute"
+ return response
+
+ # -------------------------- 超时时间配置与处理 --------------------------
+ # 定义常见指令的默认超时时间(秒),可根据需求扩展
+ cmd_timeout_map = {
+ # 快速指令:短超时
+ "ls": 5,
+ "pwd": 5,
+ "echo": 5,
+ "cat": 10,
+ "grep": 10,
+ # 中等耗时指令
+ "ping": 30,
+ "curl": 30,
+ # 长耗时指令
+ "yum": 300,
+ "apt": 300,
+ "docker": 600,
+ "scp": 600,
+ }
+ # Shell脚本执行的默认超时时间(秒)
+ SHELL_SCRIPT_DEFAULT_TIMEOUT = 600
+
+ def get_final_timeout(cmd: str) -> int:
+ """确定最终超时时间,优先级:用户指定 > Shell脚本默认 > 普通指令默认 > 全局默认15秒"""
+ # 优先级1:用户手动指定超时(校验合法性)
+ if timeout is not None:
+ try:
+ t = int(timeout)
+ return t if t > 0 else 15
+ except (ValueError, TypeError):
+ return 15
+ # 优先级2:执行Shell脚本的指令,使用专属超时
+ cmd_lower = cmd.lower()
+ if ".sh" in cmd_lower or cmd_lower.startswith("bash ") or cmd_lower.startswith("sh "):
+ return SHELL_SCRIPT_DEFAULT_TIMEOUT
+ # 优先级3:匹配普通指令的默认超时
+ for cmd_key, t in cmd_timeout_map.items():
+ if cmd_key in cmd_lower:
+ return t
+ # 优先级4:全局默认超时
+ return 15
+
+ final_timeout = get_final_timeout(command)
+ response["timeout_used"] = final_timeout # 记录实际使用的超时时间
+
+ # -------------------------- 本地命令执行(同步逻辑,供线程池调用) --------------------------
+ def local_exec_sync():
+ """同步执行本地命令,返回执行结果和错误信息"""
+ try:
+ result = subprocess.run(
+ command,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True
+ )
+ # 执行成功,返回输出内容
+ return True, result.stdout.strip(), ""
+ except subprocess.CalledProcessError as e:
+ # 命令执行返回非0,返回错误信息
+ return False, "", e.stderr.strip()
+ except Exception as e:
+ # 其他异常,返回异常信息
+ return False, "", str(e)
+
+ # -------------------------- 超时控制执行 --------------------------
+ try:
+ loop = asyncio.get_running_loop()
+ # 用线程池执行同步的本地命令,并用wait_for控制超时
+ exec_success, exec_result, exec_error = await asyncio.wait_for(
+ loop.run_in_executor(None, local_exec_sync),
+ timeout=final_timeout
+ )
+
+ if exec_success:
+ # 命令执行成功
+ response["success"] = True
+ response["message"] = "命令执行成功" if lang == LanguageEnum.ZH else "Command executed successfully"
+ response["result"] = exec_result
+ else:
+ # 命令执行失败
+ response["message"] = f"命令执行出错:{exec_error}" if lang == LanguageEnum.ZH else f"Command execution failed: {exec_error}"
+
+ except asyncio.TimeoutError:
+ # 命令执行超时
+ response["message"] = f"本地执行命令超时({final_timeout}秒),已终止执行" if lang == LanguageEnum.ZH else f"Local command execution timed out ({final_timeout} seconds), terminated"
+ except Exception as e:
+ # 其他执行异常(如线程池错误)
+ response["message"] = f"本地执行命令出错:{str(e)}" if lang == LanguageEnum.ZH else f"Local command execution failed: {str(e)}"
+
+ return response
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/base.py
new file mode 100644
index 00000000..2cc858ff
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/base.py
@@ -0,0 +1,253 @@
+import logging
+import os
+import shutil
+from enum import Enum
+from typing import Dict, List, Optional
+from pydantic import Field
+from config.public.base_config_loader import BaseConfig, LanguageEnum
+
+# 初始化日志
+logger = logging.getLogger("file_tool")
+logger.setLevel(logging.INFO)
+lang = BaseConfig().get_config().public_config.language
+# ========== 枚举类定义(提升大模型识别性) ==========
+class FileActionEnum(str, Enum):
+ """文件操作类型枚举"""
+ LS = "ls" # 查看文件/目录
+ CAT = "cat" # 读取文件内容
+ ADD = "add" # 新建空文件
+ APPEND = "append" # 追加内容
+ EDIT = "edit" # 覆盖内容
+ RENAME = "rename" # 重命名
+ CHMOD = "chmod" # 修改权限
+ DELETE = "delete" # 删除文件/目录
+
+class FileEncodingEnum(str, Enum):
+ """文件编码枚举"""
+ UTF8 = "utf-8"
+ GBK = "gbk"
+ GB2312 = "gb2312"
+ ASCII = "ascii"
+
+class CacheTypeEnum(str, Enum):
+ """缓存类型枚举(预留扩展)"""
+ ALL = "all"
+ PACKAGES = "packages"
+ METADATA = "metadata"
+
+# ========== 通用工具函数 ==========
+def get_language() -> bool:
+ """获取语言配置:True=中文,False=英文"""
+ return BaseConfig().get_config().public_config.language == LanguageEnum.ZH
+
+def init_result_dict(
+ target_host: str = "127.0.0.1",
+ result_type: str = "list",
+ include_file_path: bool = True
+) -> Dict:
+ """初始化返回结果字典(默认包含file_path字段)"""
+ result = {
+ "success": False,
+ "message": "",
+ "result": [] if result_type == "list" else "",
+ "target": target_host,
+ "file_path": ""
+ }
+ return result
+
+# ========== 文件管理核心类 ==========
+class FileManager:
+ """文件管理核心类(Python原生实现,无shell依赖)"""
+ def __init__(self, lang: LanguageEnum = LanguageEnum.ZH):
+ self.is_zh = lang
+
+ def _get_error_msg(self, zh_msg: str, en_msg: str) -> str:
+ """多语言错误提示"""
+ return zh_msg if self.is_zh else en_msg
+
+ def ls(self, file_path: str, detail: bool = Field(False, description="是否显示详细信息")) -> List[Dict]:
+ """
+ 查看文件/目录状态(Python实现ls功能)
+ :param file_path: 文件/目录路径(必填)
+ :param detail: 是否显示详细信息(默认False)
+ :return: 结构化文件信息列表
+ """
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(self._get_error_msg(f"路径不存在:{file_path}", f"Path not found: {file_path}"))
+
+ result = []
+ if os.path.isfile(file_path):
+ # 单个文件
+ stat = os.stat(file_path)
+ file_info = {
+ "name": os.path.basename(file_path),
+ "path": file_path,
+ "size": stat.st_size,
+ "mtime": stat.st_mtime,
+ "mode": oct(stat.st_mode)[-3:],
+ "type": "file"
+ }
+ result.append(file_info)
+ else:
+ # 目录
+ for item in os.listdir(file_path):
+ full_path = os.path.join(file_path, item)
+ stat = os.stat(full_path)
+ item_info = {
+ "name": item,
+ "path": full_path,
+ "size": stat.st_size,
+ "mtime": stat.st_mtime,
+ "mode": oct(stat.st_mode)[-3:],
+ "type": "file" if os.path.isfile(full_path) else "dir"
+ }
+ result.append(item_info)
+ return result
+
+ def cat(self,
+ file_path: str,
+ encoding: FileEncodingEnum = Field(FileEncodingEnum.UTF8, description="文件编码")) -> List[str]:
+ """
+ 读取文件内容(Python实现cat功能)
+ :param file_path: 文件路径(必填)
+ :param encoding: 文件编码(默认utf-8)
+ :return: 按行拆分的内容列表
+ """
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(self._get_error_msg(f"文件不存在:{file_path}", f"File not found: {file_path}"))
+ if os.path.isdir(file_path):
+ raise IsADirectoryError(self._get_error_msg(f"路径是目录,无法读取内容:{file_path}", f"Path is directory, cannot read content: {file_path}"))
+
+ with open(file_path, "r", encoding=encoding.value, errors="ignore") as f:
+ content = [line.rstrip("\n") for line in f.readlines()]
+ return content
+
+ def add(self,
+ file_path: str,
+ overwrite: bool = Field(False, description="是否覆盖已存在文件"),
+ content: Optional[str] = Field(None, description="创建文件时写入的内容,为None则创建空文件"),
+ encoding: str = Field(FileEncodingEnum.UTF8.value, description="文件编码")
+ ) -> None:
+ """
+ 新建文件(支持创建空文件或写入指定内容,Python实现touch功能)
+ :param file_path: 文件路径(必填,绝对路径)
+ :param overwrite: 已存在时是否覆盖(默认False)
+ :param content: 创建文件时写入的内容,为None则创建空文件(新增参数)
+ :param encoding: 文件编码(默认utf-8,新增参数用于内容写入)
+ """
+ # 1. 检查文件是否存在,且不允许覆盖则跳过
+ if os.path.exists(file_path) and not overwrite:
+ logger.info(self._get_error_msg(f"文件已存在,跳过创建:{file_path}", f"File exists, skip creation: {file_path}"))
+ return
+
+ # 2. 确保父目录存在
+ parent_dir = os.path.dirname(file_path)
+ if parent_dir and not os.path.exists(parent_dir):
+ os.makedirs(parent_dir, exist_ok=True)
+
+ # 3. 根据content参数决定写入内容还是创建空文件
+ with open(file_path, "w", encoding=encoding) as f:
+ if content is not None:
+ f.write(content) # 写入指定内容
+ # content为None时,仅打开文件后关闭(创建空文件)
+
+ # 4. 日志提示(区分空文件和带内容文件)
+ if content is None:
+ logger.info(self._get_error_msg(f"空文件创建成功:{file_path}", f"Empty file created successfully: {file_path}"))
+ else:
+ logger.info(self._get_error_msg(f"文件创建并写入内容成功:{file_path}", f"File created and content written successfully: {file_path}"))
+
+ def append(self,
+ file_path: str,
+ content: str = Field(..., description="追加内容"),
+ encoding: FileEncodingEnum = Field(FileEncodingEnum.UTF8, description="文件编码")) -> None:
+ """
+ 追加内容到文件
+ :param file_path: 文件路径(必填)
+ :param content: 追加内容(必填)
+ :param encoding: 文件编码(默认utf-8)
+ """
+ if not content:
+ raise ValueError(self._get_error_msg("追加内容不能为空", "Append content cannot be empty"))
+
+ parent_dir = os.path.dirname(file_path)
+ if parent_dir and not os.path.exists(parent_dir):
+ os.makedirs(parent_dir, exist_ok=True)
+
+ with open(file_path, "a", encoding=encoding.value) as f:
+ f.write(content + "\n")
+ logger.info(self._get_error_msg(f"内容追加成功:{file_path}", f"Content appended successfully: {file_path}"))
+
+ def edit(self,
+ file_path: str,
+ content: str = Field(..., description="覆盖内容"),
+ encoding: FileEncodingEnum = Field(FileEncodingEnum.UTF8, description="文件编码")) -> None:
+ """
+ 覆盖写入文件内容
+ :param file_path: 文件路径(必填)
+ :param content: 覆盖内容(必填)
+ :param encoding: 文件编码(默认utf-8)
+ """
+ if not content:
+ raise ValueError(self._get_error_msg("覆盖内容不能为空", "Edit content cannot be empty"))
+
+ parent_dir = os.path.dirname(file_path)
+ if parent_dir and not os.path.exists(parent_dir):
+ os.makedirs(parent_dir, exist_ok=True)
+
+ with open(file_path, "w", encoding=encoding.value) as f:
+ f.write(content)
+ logger.info(self._get_error_msg(f"文件内容覆盖成功:{file_path}", f"File content overwritten successfully: {file_path}"))
+
+ def rename(self,
+ old_path: str,
+ new_path: str = Field(..., description="新路径")) -> None:
+ """
+ 重命名文件/目录
+ :param old_path: 原路径(必填)
+ :param new_path: 新路径(必填)
+ """
+ if not os.path.exists(old_path):
+ raise FileNotFoundError(self._get_error_msg(f"原路径不存在:{old_path}", f"Old path not found: {old_path}"))
+ if os.path.exists(new_path):
+ raise FileExistsError(self._get_error_msg(f"新路径已存在:{new_path}", f"New path already exists: {new_path}"))
+
+ shutil.move(old_path, new_path)
+ logger.info(self._get_error_msg(f"文件重命名成功:{old_path} → {new_path}", f"File renamed successfully: {old_path} → {new_path}"))
+
+ def chmod(self,
+ file_path: str,
+ mode: str = Field(..., description="权限模式(如755)")) -> None:
+ """
+ 修改文件权限
+ :param file_path: 文件路径(必填)
+ :param mode: 权限模式(必填,如755)
+ """
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(self._get_error_msg(f"文件不存在:{file_path}", f"File not found: {file_path}"))
+
+ try:
+ numeric_mode = int(mode, 8)
+ os.chmod(file_path, numeric_mode)
+ logger.info(self._get_error_msg(f"权限修改成功:{file_path} → {mode}", f"Permission modified successfully: {file_path} → {mode}"))
+ except ValueError:
+ raise ValueError(self._get_error_msg(f"权限格式错误(需为8进制,如755):{mode}", f"Invalid mode format (must be octal, e.g.755): {mode}"))
+
+ def delete(self,
+ file_path: str,
+ recursive: bool = Field(False, description="是否递归删除目录")) -> None:
+ """
+ 删除文件/目录
+ :param file_path: 文件/目录路径(必填)
+ :param recursive: 是否递归删除目录(默认False)
+ """
+ if not os.path.exists(file_path):
+ raise FileNotFoundError(self._get_error_msg(f"路径不存在:{file_path}", f"Path not found: {file_path}"))
+
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+ else:
+ if not recursive:
+ raise IsADirectoryError(self._get_error_msg(f"路径是目录,需开启recursive=True递归删除:{file_path}", f"Path is directory, set recursive=True to delete: {file_path}"))
+ shutil.rmtree(file_path)
+ logger.info(self._get_error_msg(f"路径删除成功:{file_path}", f"Path deleted successfully: {file_path}"))
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/config.json
new file mode 100644
index 00000000..d9688dac
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "file_tool": {
+ "zh": "【统一文件管理工具】\n功能:支持本地文件/目录的查、增、改、删全操作(纯Python实现,无shell依赖),add操作支持创建空文件或写入指定内容\n\n【核心提示】\n1. 操作类型(action)必须从以下枚举值中选择:ls/cat/add/append/edit/rename/chmod/delete,不可传入其他值;\n2. 文件编码(encoding)必须从以下枚举值中选择:utf-8/gbk/gb2312/ascii,默认值为utf-8;\n3. 不同操作类型(action)对应不同必填参数,未满足则执行失败,请严格遵守:\n - ls(查看列表)/cat(读取内容)/delete(删除):仅需传入 file_path(文件/目录路径);\n - add(新建文件):需传入 file_path,可选传入 overwrite(是否覆盖已存在文件,默认False)、content(创建文件时写入的内容,默认None)、encoding(文件编码,默认utf-8);\n - append(追加内容)/edit(覆盖内容):必须传入 file_path + content(写入/追加的内容,不能为空);\n - rename(重命名):必须传入 file_path(原路径) + new_path(新路径);\n - chmod(修改权限):必须传入 file_path + mode(权限模式,如755/644,需为8进制格式);\n4. 非必填参数默认值:detail(ls是否显示详细信息)=False,recursive(删除目录是否递归)=False;\n5. 路径格式要求:file_path/new_path需传入绝对路径(如\"/tmp/test.txt\"),避免相对路径导致的找不到文件问题。\n\n【枚举类定义(必须遵守)】\n- FileActionEnum(操作类型枚举):ls / cat / add / append / edit / rename / chmod / delete\n- FileEncodingEnum(文件编码枚举):utf-8 / gbk / gb2312 / ascii\n\n【参数详情】\n- action:操作类型(必填,枚举值见上方)\n- file_path:目标文件/目录路径(必填,绝对路径)\n- content:写入/追加内容(edit/append操作必填,add操作可选,默认None)\n- new_path:重命名后的新路径(rename操作必填,绝对路径)\n- mode:权限模式(chmod操作必填,如755)\n- detail:ls操作是否显示详细信息(默认False)\n- overwrite:add操作是否覆盖已存在文件(默认False)\n- recursive:delete操作是否递归删除目录(默认False)\n- encoding:文件编码(枚举值见上方,默认utf-8)\n【返回值说明】\n- success:执行结果(True=成功,False=失败)\n- message:执行信息/错误提示(多语言,add操作区分空文件和带内容文件创建)\n- result:操作结果(ls/cat返回结构化列表,其他操作返回空列表)\n- file_path:操作的文件路径(rename后自动更新为新路径)\n- target:执行目标(固定为127.0.0.1,本地执行)",
+ "en": "【Unified File Management Tool】\nFunction: Supports query/add/edit/delete operations for local files/directories (Python native implementation, no shell dependency), the add operation supports creating empty files or writing specified content\n\n【Core Guidelines】\n1. Operation type (action) must be selected from the following enum values: ls/cat/add/append/edit/rename/chmod/delete, other values are not allowed;\n2. File encoding (encoding) must be selected from the following enum values: utf-8/gbk/gb2312/ascii, default value is utf-8;\n3. Different action types correspond to different required parameters, execution will fail if not met, please strictly follow:\n - ls (list files)/cat (read content)/delete (delete): Only need to pass file_path (file/directory path);\n - add (create file): Need to pass file_path, optionally pass overwrite (whether to overwrite existing file, default False), content (content to write when creating file, default None), encoding (file encoding, default utf-8);\n - append (append content)/edit (overwrite content): Must pass file_path + content (content to write/append, cannot be empty);\n - rename (rename file/directory): Must pass file_path (old path) + new_path (new path);\n - chmod (modify permission): Must pass file_path + mode (permission mode, e.g.755/644, must be octal format);\n4. Default values for optional parameters: detail (whether to show detailed info for ls)=False, recursive (whether to delete directory recursively)=False;\n5. Path format requirement: file_path/new_path must be absolute path (e.g.\"/tmp/test.txt\"), avoid file not found due to relative path.\n\n【Enum Class Definition (Must Follow)】\n- FileActionEnum (Operation Type Enum): ls / cat / add / append / edit / rename / chmod / delete\n- FileEncodingEnum (File Encoding Enum): utf-8 / gbk / gb2312 / ascii\n\n【Parameter Details】\n- action: Operation type (required, enum values see above)\n- file_path: Target file/directory path (required, absolute path)\n- content: Content to write/append (required for edit/append, optional for add, default None)\n- new_path: New path after rename (required for rename, absolute path)\n- mode: Permission mode (required for chmod, e.g.755)\n- detail: Whether to show detailed info for ls (default False)\n- overwrite: Whether to overwrite existing file for add (default False)\n- recursive: Whether to delete directory recursively for delete (default False)\n- encoding: File encoding (enum values see above, default utf-8)\n【Return Value Explanation】\n- success: Execution result (True=success, False=failure)\n- message: Execution info/error prompt (multilingual, the add operation distinguishes between empty file and file with content creation)\n- result: Operation result (structured list for ls/cat, empty list for others)\n- file_path: Operated file path (automatically updated to new path after rename)\n- target: Execution target (fixed as 127.0.0.1, local execution)"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/deps.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/tool.py
new file mode 100644
index 00000000..b62cfd18
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/file_tool/tool.py
@@ -0,0 +1,107 @@
+from typing import Dict, Optional
+from pydantic import Field
+from config.public.base_config_loader import LanguageEnum
+from servers.oe_cli_mcp_server.mcp_tools.base_tools.file_tool.base import (
+ init_result_dict,
+ FileManager,
+ FileActionEnum,
+ FileEncodingEnum,
+ logger, lang
+)
+
+def file_tool(
+ action: FileActionEnum = Field(..., description="操作类型(枚举:ls/cat/add/append/edit/rename/chmod/delete)"),
+ file_path: str = Field(..., description="文件/目录路径(必填)"),
+ content: Optional[str] = Field(None, description="写入/追加内容(add/edit/append必填)"),
+ new_path: Optional[str] = Field(None, description="新路径(rename必填)"),
+ mode: Optional[str] = Field(None, description="权限模式(chmod必填,如755)"),
+ detail: bool = Field(False, description="ls是否显示详细信息"),
+ overwrite: bool = Field(False, description="add是否覆盖已存在文件"),
+ recursive: bool = Field(False, description="delete是否递归删除目录"),
+ encoding: FileEncodingEnum = Field(FileEncodingEnum.UTF8, description="文件编码(默认utf-8)"),
+) -> Dict:
+ """
+ 统一文件管理工具(精简参数+枚举入参,适配大模型识别)
+ """
+ # 初始化结果字典
+ result = init_result_dict()
+ result["file_path"] = file_path.strip()
+ fm = FileManager(lang=lang)
+ is_zh = lang == LanguageEnum.ZH
+
+ # 1. 核心参数校验
+ if not file_path.strip():
+ result["message"] = "文件路径不能为空" if is_zh else "File path cannot be empty"
+ return result
+
+ # 2. 按枚举操作类型执行
+ try:
+ if action == FileActionEnum.LS:
+ result["result"] = fm.ls(file_path.strip(), detail=detail)
+ result["success"] = True
+ result["message"] = f"本地文件列表查询完成(路径:{file_path})" if is_zh else f"Local file list queried (path: {file_path})"
+
+ elif action == FileActionEnum.CAT:
+ result["result"] = fm.cat(file_path.strip(), encoding=encoding)
+ result["success"] = True
+ result["message"] = f"本地文件内容读取完成(路径:{file_path})" if is_zh else f"Local file content read (path: {file_path})"
+
+ elif action == FileActionEnum.ADD:
+ add_content = content.strip() if content is not None else None
+ fm.add(
+ file_path=file_path.strip(),
+ overwrite=overwrite,
+ content=add_content,
+ encoding=encoding.value # 传递编码(枚举值转字符串)
+ )
+ result["success"] = True
+ # 优化提示:区分是否写入内容
+ if add_content:
+ result["message"] = f"本地文件创建并写入内容成功(路径:{file_path})" if is_zh else f"Local file created and content written (path: {file_path})"
+ else:
+ result["message"] = f"本地文件创建成功(路径:{file_path})" if is_zh else f"Local file created (path: {file_path})"
+
+
+ elif action == FileActionEnum.APPEND:
+ if not content:
+ result["message"] = "追加内容不能为空" if is_zh else "Append content cannot be empty"
+ return result
+ fm.append(file_path.strip(), content.strip(), encoding=encoding)
+ result["success"] = True
+ result["message"] = f"本地文件内容追加成功(路径:{file_path})" if is_zh else f"Local file content appended (path: {file_path})"
+
+ elif action == FileActionEnum.EDIT:
+ if not content:
+ result["message"] = "覆盖内容不能为空" if is_zh else "Edit content cannot be empty"
+ return result
+ fm.edit(file_path.strip(), content.strip(), encoding=encoding)
+ result["success"] = True
+ result["message"] = f"本地文件内容覆盖成功(路径:{file_path})" if is_zh else f"Local file content overwritten (path: {file_path})"
+
+ elif action == FileActionEnum.RENAME:
+ if not new_path:
+ result["message"] = "新路径不能为空" if is_zh else "New path cannot be empty"
+ return result
+ fm.rename(file_path.strip(), new_path.strip())
+ result["success"] = True
+ result["file_path"] = new_path.strip()
+ result["message"] = f"本地文件重命名成功({file_path} → {new_path})" if is_zh else f"Local file renamed ({file_path} → {new_path})"
+
+ elif action == FileActionEnum.CHMOD:
+ if not mode:
+ result["message"] = "权限模式不能为空(如755)" if is_zh else "Permission mode cannot be empty (e.g.755)"
+ return result
+ fm.chmod(file_path.strip(), mode.strip())
+ result["success"] = True
+ result["message"] = f"本地文件权限修改成功(路径:{file_path},权限:{mode})" if is_zh else f"Local file permission modified (path: {file_path}, mode: {mode})"
+
+ elif action == FileActionEnum.DELETE:
+ fm.delete(file_path.strip(), recursive=recursive)
+ result["success"] = True
+ result["message"] = f"本地文件删除成功(路径:{file_path})" if is_zh else f"Local file deleted (path: {file_path})"
+
+ except Exception as e:
+ result["message"] = f"操作失败:{str(e)}" if is_zh else f"Operation failed: {str(e)}"
+ logger.error(f"File manager error: {str(e)}")
+
+ return result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/base.py
new file mode 100644
index 00000000..ac29b6ca
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/base.py
@@ -0,0 +1,224 @@
+import logging
+import os
+import subprocess
+from typing import Dict, Optional, List
+
+import paramiko
+
+from config.public.base_config_loader import BaseConfig, LanguageEnum
+
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+
+def get_language() -> bool:
+ """获取语言配置:True=中文,False=英文"""
+ return BaseConfig().get_config().public_config.language == LanguageEnum.ZH
+
+
+def get_remote_auth(ip: str) -> Optional[Dict]:
+ """
+ 获取服务器认证信息:匹配IP/主机名对应的连接配置
+ """
+ for host_config in BaseConfig().get_config().public_config.remote_hosts:
+ if ip in [host_config.host, host_config.name]:
+ return {
+ "host": host_config.host,
+ "port": host_config.port,
+ "username": host_config.username,
+ "password": host_config.password,
+ }
+ return None
+
+
+def init_result(target_host: str) -> Dict:
+ """初始化统一结果结构"""
+ return {
+ "success": False,
+ "message": "",
+ "result": [],
+ "target": target_host,
+ }
+
+
+def _open_ssh(remote_auth: Dict) -> paramiko.SSHClient:
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(
+ hostname=remote_auth["host"],
+ port=remote_auth["port"],
+ username=remote_auth["username"],
+ password=remote_auth["password"],
+ timeout=10,
+ banner_timeout=10,
+ )
+ return ssh
+
+
+def _find_cmd_absolute_path(cmd: str) -> Optional[str]:
+ """查找命令的绝对路径(兼容特殊情况)"""
+ common_paths = ["/usr/bin", "/bin", "/usr/sbin", "/sbin", "/usr/local/bin"]
+ for path in common_paths:
+ cmd_path = os.path.join(path, cmd)
+ if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
+ return cmd_path
+ return None
+
+
+def _build_fix_command() -> str:
+ """
+ 构造修复 ifcfg-ens3 中 BOOTPROTO 并重启 NetworkManager 的命令
+ 仅做问题描述中的三件事:
+ 1. 校验文件存在
+ 2. 设置 BOOTPROTO=dhcp
+ 3. systemctl restart NetworkManager
+ """
+ # 查找命令的绝对路径
+ sed_path = _find_cmd_absolute_path("sed") or "sed"
+ grep_path = _find_cmd_absolute_path("grep") or "grep"
+ systemctl_path = _find_cmd_absolute_path("systemctl") or "systemctl"
+
+ ifcfg_path = "/etc/sysconfig/network-scripts/ifcfg-ens3"
+ parts: List[str] = []
+ # 确认配置文件存在
+ parts.append(
+ f"IFCFG='{ifcfg_path}'; "
+ "if [ ! -f \"$IFCFG\" ]; then "
+ "echo 'ifcfg-ens3 not found'; "
+ "exit 1; "
+ "fi"
+ )
+ # 规范化 BOOTPROTO 行:若存在则直接替换为 dhcp
+ parts.append(
+ f"{sed_path} -i -e 's/^[#]*[[:space:]]*BOOTPROTO=.*/BOOTPROTO=dhcp/' \"$IFCFG\""
+ )
+ # 若不存在 BOOTPROTO 行,则追加一行
+ parts.append(
+ f"{grep_path} -q '^[[:space:]]*BOOTPROTO=' \"$IFCFG\" || echo 'BOOTPROTO=dhcp' >> \"$IFCFG\""
+ )
+ # 重启 NetworkManager 服务
+ parts.append(f"{systemctl_path} restart NetworkManager")
+ return " && ".join(parts)
+
+
+def fix_network_bootproto_issue(target: Optional[str] = None) -> Dict:
+ """
+ 修复 NetworkManager 未自动获取 IP 的问题:
+ - 编辑 /etc/sysconfig/network-scripts/ifcfg-ens3,将 BOOTPROTO 修正为 dhcp
+ - 重启 NetworkManager 服务
+ """
+ target_host = target.strip() if (target and isinstance(target, str)) else "127.0.0.1"
+ is_zh = get_language()
+ result = init_result(target_host)
+ steps: List[str] = []
+
+ fix_cmd = _build_fix_command()
+
+ # 本地修复逻辑
+ if target_host == "127.0.0.1":
+ try:
+ steps.append(
+ "开始修复本机 ifcfg-ens3 配置并重启 NetworkManager"
+ if is_zh
+ else "Start fixing local ifcfg-ens3 and restart NetworkManager"
+ )
+ completed = subprocess.run(
+ fix_cmd,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+ stdout = completed.stdout.strip()
+ stderr = completed.stderr.strip()
+ if stdout:
+ steps.extend(stdout.splitlines())
+ if stderr:
+ steps.extend(stderr.splitlines())
+
+ result["success"] = True
+ result["message"] = (
+ "网络配置已修复:BOOTPROTO=dhcp,NetworkManager 已重启"
+ if is_zh
+ else "Network configuration fixed: BOOTPROTO=dhcp and NetworkManager restarted"
+ )
+ except subprocess.CalledProcessError as e:
+ err = e.stderr.strip() if e.stderr else str(e)
+ result["message"] = (
+ f"修复网络配置失败:{err}"
+ if is_zh
+ else f"Failed to fix network configuration: {err}"
+ )
+ steps.append(result["message"])
+
+ result["result"] = steps
+ return result
+
+ # 远程修复逻辑(与本地相同操作,通过 SSH 执行)
+ remote_auth = get_remote_auth(target_host)
+ if not remote_auth or not (remote_auth["username"] and remote_auth["password"]):
+ result["message"] = (
+ f"未找到远程主机({target_host})的认证配置"
+ if is_zh
+ else f"Authentication config for remote host ({target_host}) not found"
+ )
+ result["result"] = [result["message"]]
+ return result
+
+ ssh: Optional[paramiko.SSHClient] = None
+ try:
+ ssh = _open_ssh(remote_auth)
+ steps.append(
+ f"开始修复远程主机({target_host})的 ifcfg-ens3 配置并重启 NetworkManager"
+ if is_zh
+ else f"Start fixing remote host ({target_host}) ifcfg-ens3 and restart NetworkManager"
+ )
+ stdin, stdout, stderr = ssh.exec_command(fix_cmd)
+ out = stdout.read().decode("utf-8", errors="replace").strip()
+ err = stderr.read().decode("utf-8", errors="replace").strip()
+
+ if out:
+ steps.extend(out.splitlines())
+ if err:
+ steps.extend(err.splitlines())
+
+ exit_status = stdout.channel.recv_exit_status()
+ if exit_status == 0:
+ result["success"] = True
+ result["message"] = (
+ f"远程主机({target_host})网络配置已修复:BOOTPROTO=dhcp,NetworkManager 已重启"
+ if is_zh
+ else f"Remote host ({target_host}) network configuration fixed: BOOTPROTO=dhcp and NetworkManager restarted"
+ )
+ else:
+ result["message"] = (
+ f"远程修复网络配置失败:{err}"
+ if is_zh
+ else f"Failed to fix remote network configuration: {err}"
+ )
+ steps.append(result["message"])
+ except paramiko.AuthenticationException:
+ result["message"] = (
+ "SSH 认证失败,请检查用户名和密码"
+ if is_zh
+ else "SSH authentication failed, check username and password"
+ )
+ steps.append(result["message"])
+ except Exception as e:
+ result["message"] = (
+ f"远程修复网络配置异常:{str(e)}"
+ if is_zh
+ else f"Remote network fix exception: {str(e)}"
+ )
+ steps.append(result["message"])
+ finally:
+ if ssh:
+ ssh.close()
+
+ result["result"] = steps
+ return result
+
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/config.json
new file mode 100644
index 00000000..55100bcd
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/config.json
@@ -0,0 +1,10 @@
+{
+ "tools": {
+ "network_fix_bootproto_tool": {
+ "zh": "网络自动修复工具:解决NetworkManager启动后未自动获取IP的问题。\n\n该工具会自动执行以下步骤:\n1. 编辑网卡配置文件 /etc/sysconfig/network-scripts/ifcfg-ens3\n2. 检查并修正 BOOTPROTO 字段,确保其值为 \"dhcp\"(全小写,无拼写错误)\n3. 保存修改并重启 NetworkManager 服务(systemctl restart NetworkManager)\n\n参数:\n - target: 目标主机IP或主机名,None或\"127.0.0.1\"表示本机\n - lang: 语言设置(可选,当前根据全局配置自动切换中英文)\n\n返回:\n - success: 是否修复成功(True/False)\n - message: 执行结果说明(整体修复结果)\n - target: 目标主机\n - result: 执行过程中的详细步骤信息列表\n\n使用场景:\n - openEuler 24.03 系统中 NetworkManager 正常运行但 ens3 未自动获取IP\n - ifcfg-ens3 中 BOOTPROTO 拼写错误导致 DHCP 未生效\n - 需要一键修复网卡DHCP配置并恢复自动获取IP功能",
+ "en": "Network auto-fix tool: resolve the issue where NetworkManager does not automatically obtain an IP address.\n\nThe tool performs the following steps:\n1. Edit NIC config file /etc/sysconfig/network-scripts/ifcfg-ens3\n2. Check and correct the BOOTPROTO field to \"dhcp\" (all lowercase, no typos)\n3. Save the change and restart NetworkManager (systemctl restart NetworkManager)\n\nParameters:\n - target: Target host IP or hostname, None or \"127.0.0.1\" for localhost\n - lang: Language setting (optional, currently determined by global configuration)\n\nReturns:\n - success: Whether the fix succeeded (True/False)\n - message: Overall result description\n - target: Target host\n - result: List of detailed step messages during execution\n\nUse cases:\n - On openEuler 24.03, NetworkManager is running but ens3 does not get an IP via DHCP\n - BOOTPROTO in ifcfg-ens3 is misspelled, causing DHCP not to be triggered\n - Need a one-click fix for NIC DHCP configuration and restore automatic IP acquisition"
+ }
+ }
+}
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/deps.toml
new file mode 100644
index 00000000..3dbbbbb9
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/deps.toml
@@ -0,0 +1,11 @@
+[system]
+# NetworkManager / 网卡配置相关的系统工具(通常系统已自带,此处仅用于需要时安装)
+#network_tools = [
+# "yum install -y NetworkManager",
+# "apt-get update && apt-get install -y network-manager"
+#]
+
+[pip]
+# Python 依赖(与其他 mcptool 保持一致的基础依赖)
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/tool.py
new file mode 100644
index 00000000..94fb8a15
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/network_tools/tool.py
@@ -0,0 +1,24 @@
+from typing import Dict, Optional
+
+from config.public.base_config_loader import LanguageEnum
+
+from mcp_tools.base_tools.network_tools.base import fix_network_bootproto_issue
+
+
+def network_fix_bootproto_tool(
+ target: Optional[str] = None,
+ lang: Optional[LanguageEnum] = LanguageEnum.ZH,
+) -> Dict:
+ """
+ 网络自动修复工具:解决 NetworkManager 启动后未自动获取 IP 的问题。
+
+ 按照问题描述自动执行以下操作:
+ 1. 编辑 /etc/sysconfig/network-scripts/ifcfg-ens3
+ 2. 检查并修正 BOOTPROTO,确保为 \"dhcp\"(全小写)
+ 3. 重启 NetworkManager 服务:systemctl restart NetworkManager
+ """
+ # 目前语言仍由全局配置控制,此处保留 lang 以保持接口风格统一
+ return fix_network_bootproto_issue(target=target)
+
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/base.py
new file mode 100644
index 00000000..12a5afed
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/base.py
@@ -0,0 +1,273 @@
+import logging
+import os
+import subprocess
+import re
+from enum import Enum
+from typing import Dict, List, Optional
+from pydantic import Field
+from config.public.base_config_loader import BaseConfig, LanguageEnum
+
+# 初始化日志(保留基础配置)
+logger = logging.getLogger("pkg_tool")
+logger.setLevel(logging.INFO)
+lang = BaseConfig().get_config().public_config.language
+
+# ========== 枚举类定义(不变) ==========
+class PkgActionEnum(str, Enum):
+ LIST = "list" # 列出已安装包
+ INFO = "info" # 查询包详情
+ INSTALL = "install" # 在线安装
+ LOCAL_INSTALL = "local-install" # 离线RPM安装
+ UPDATE = "update" # 更新包(单个/所有)
+ UPDATE_SEC = "update-sec" # 仅更新安全补丁
+ REMOVE = "remove" # 卸载包
+ CLEAN = "clean" # 清理缓存
+
+class PkgCacheTypeEnum(str, Enum):
+ ALL = "all" # 清理所有缓存
+ PACKAGES = "packages" # 清理包缓存
+ METADATA = "metadata" # 清理元数据缓存
+
+# ========== 通用工具函数(精简) ==========
+def get_language() -> bool:
+ """获取语言配置:True=中文,False=英文"""
+ return BaseConfig().get_config().public_config.language == LanguageEnum.ZH
+
+def init_result_dict(
+ target_host: str = "127.0.0.1",
+ result_type: str = "list",
+ include_pkg_name: bool = True
+) -> Dict:
+ """初始化返回结果字典"""
+ result = {
+ "success": False,
+ "message": "",
+ "result": [] if result_type == "list" else "",
+ "target": target_host,
+ "pkg_name": ""
+ }
+ return result
+
+def parse_pkg_action(action_str: str) -> PkgActionEnum:
+ """解析字符串为PkgActionEnum"""
+ try:
+ return PkgActionEnum(action_str.strip().lower())
+ except ValueError:
+ valid_values = [e.value for e in PkgActionEnum]
+ raise ValueError(f"无效的操作类型,可选枚举值:{','.join(valid_values)}")
+
+def parse_cache_type(cache_type_str: str) -> PkgCacheTypeEnum:
+ """解析字符串为PkgCacheTypeEnum"""
+ try:
+ return PkgCacheTypeEnum(cache_type_str.strip().lower())
+ except ValueError:
+ valid_values = [e.value for e in PkgCacheTypeEnum]
+ raise ValueError(f"无效的缓存类型,可选枚举值:{','.join(valid_values)}")
+
+def safe_split_log(log):
+ # 兼容:字符串→splitlines(),其他类型(字典/None)→转为列表
+ return log.splitlines() if isinstance(log, str) else [str(log) if log else "无执行日志"]
+
+# ========== 核心命令工具(极简实现,解决命令不存在问题) ==========
+def get_cmd_path(cmd: str) -> Optional[str]:
+ """获取命令绝对路径(仅遍历OpenEuler常见路径)"""
+ common_paths = ["/usr/bin", "/bin", "/usr/sbin", "/sbin"]
+ for path in common_paths:
+ cmd_path = os.path.join(path, cmd)
+ if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
+ return cmd_path
+ logger.warning(f"命令不存在:{cmd}")
+ return None
+
+# ========== 包管理核心类(极简修复,易读易维护) ==========
+class PackageManager:
+ """OpenEuler软件包管理类(精简命令调用,解决系统调用异常)"""
+ def __init__(self, lang: LanguageEnum = LanguageEnum.ZH):
+ self.is_zh = lang
+ # 提前获取命令绝对路径(避免重复查找)
+ self.dnf_path = get_cmd_path("dnf") or get_cmd_path("yum") # 兼容yum
+ self.rpm_path = get_cmd_path("rpm")
+
+ def _msg(self, zh: str, en: str) -> str:
+ """简洁多语言提示"""
+ return zh if self.is_zh else en
+
+ def _run_cmd(self, cmd: List[str]) -> Optional[str]:
+ """执行命令(确保返回字符串或None,不返回字典)"""
+ if not cmd:
+ logger.error(self._msg("命令不能为空", "Command cannot be empty"))
+ return None
+
+ # 替换命令为绝对路径
+ cmd_name = cmd[0]
+ if cmd_name == "dnf" and self.dnf_path:
+ cmd[0] = self.dnf_path
+ elif cmd_name == "rpm" and self.rpm_path:
+ cmd[0] = self.rpm_path
+
+ try:
+ logger.info(f"执行命令:{' '.join(cmd)}")
+ result = subprocess.run(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ check=True,
+ timeout=30
+ )
+ # 确保返回的是字符串(命令输出)
+ return result.stdout.strip() or "操作执行成功"
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
+ err_msg = e.stderr.strip() or str(e)
+ logger.error(f"命令执行失败:{err_msg}")
+ return f"命令执行失败:{err_msg}"
+ except Exception as e:
+ logger.error(f"系统调用异常:{str(e)}")
+ return f"系统调用异常:{str(e)}"
+ # ---------- 核心功能(仅修复命令调用,不改动原有解析逻辑) ----------
+ def list(self, filter_key: Optional[str] = Field(None, description="包名过滤关键词(可选)")) -> List[Dict]:
+ """列出已安装软件包"""
+ if not self.rpm_path:
+ return [{"error": self._msg("未找到rpm命令,无法列出包", "rpm command not found")}]
+
+ cmd = [self.rpm_path, "-qa", "--queryformat", "%{NAME}\t%{VERSION}\t%{RELEASE}\t%{ARCH}\n"]
+ output = self._run_cmd(cmd)
+ if not output:
+ return [{"error": self._msg("列出包失败", "Failed to list packages")}]
+
+ pkg_list = []
+ for line in output.splitlines():
+ if not line.strip():
+ continue
+ name, version, release, arch = line.split("\t", 3)
+ if filter_key and filter_key.lower() not in name.lower():
+ continue
+ pkg_list.append({
+ "name": name,
+ "version": f"{version}-{release}",
+ "arch": arch,
+ "full_name": f"{name}-{version}-{release}.{arch}"
+ })
+ return pkg_list
+
+ def info(self, pkg_name: str = Field(..., description="包名(必填)")) -> Dict:
+ """查询软件包详情"""
+ if not pkg_name.strip():
+ return {"error": self._msg("包名不能为空", "Package name cannot be empty")}
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法查询包详情", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "info", pkg_name.strip()]
+ output = self._run_cmd(cmd)
+ if not output:
+ return {"error": self._msg(f"查询包{pkg_name}详情失败", f"Failed to get info for {pkg_name}")}
+
+ # 原有解析逻辑不变
+ info_patterns = {
+ "name": r"Name\s*:\s*(.+)",
+ "version": r"Version\s*:\s*(.+)",
+ "release": r"Release\s*:\s*(.+)",
+ "arch": r"Architecture\s*:\s*(.+)",
+ "installed_size": r"Installed Size\s*:\s*(.+)",
+ "repo": r"From Repository\s*:\s*(.+)",
+ "summary": r"Summary\s*:\s*(.+)",
+ "license": r"License\s*:\s*(.+)",
+ "url": r"URL\s*:\s*(.+)"
+ }
+ pkg_info = {}
+ for key, pattern in info_patterns.items():
+ match = re.search(pattern, output, re.IGNORECASE)
+ if match:
+ pkg_info[key] = match.group(1).strip()
+
+ return pkg_info if pkg_info else {"error": self._msg(f"未找到包{pkg_name}的信息", f"No info found for {pkg_name}")}
+
+ def install(self,
+ pkg_name: str = Field(..., description="包名(必填)"),
+ yes: bool = Field(True, description="自动确认(默认True)")) -> Dict:
+ """在线安装软件包"""
+ if not pkg_name.strip():
+ return {"error": self._msg("包名不能为空", "Package name cannot be empty")}
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法安装包", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "install", pkg_name.strip()]
+ if yes:
+ cmd.append("-y")
+ output = self._run_cmd(cmd)
+ return {"message": output} if output else {"error": self._msg(f"安装包{pkg_name}失败", f"Failed to install {pkg_name}")}
+
+ def local_install(self,
+ rpm_path: str = Field(..., description="RPM文件路径(必填)"),
+ yes: bool = Field(True, description="自动确认(默认True)")) -> Dict:
+ """离线安装RPM包"""
+ if not rpm_path.strip():
+ return {"error": self._msg("RPM路径不能为空", "RPM path cannot be empty")}
+ if not os.path.exists(rpm_path.strip()):
+ return {"error": self._msg(f"RPM文件不存在:{rpm_path}", f"RPM file not found: {rpm_path}")}
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法安装RPM", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "localinstall", rpm_path.strip()]
+ if yes:
+ cmd.append("-y")
+ output = self._run_cmd(cmd)
+ return {"message": output} if output else {"error": self._msg("安装RPM失败", "Failed to install RPM")}
+
+ def update(self,
+ pkg_name: Optional[str] = Field(None, description="包名(为空则更新所有)"),
+ yes: bool = Field(True, description="自动确认(默认True)")) -> Dict:
+ """更新软件包"""
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法更新包", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "update"]
+ if pkg_name:
+ cmd.append(pkg_name.strip())
+ if yes:
+ cmd.append("-y")
+ output = self._run_cmd(cmd)
+ return {"message": output} if output else {"error": self._msg("更新包失败", "Failed to update package")}
+
+ def update_sec(self,
+ yes: bool = Field(True, description="自动确认(默认True)")) -> Dict:
+ """仅更新安全补丁"""
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法更新安全补丁", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "update", "--security"]
+ if yes:
+ cmd.append("-y")
+ output = self._run_cmd(cmd)
+ return {"message": output} if output else {"error": self._msg("更新安全补丁失败", "Failed to update security patches")}
+
+ def remove(self,
+ pkg_name: str = Field(..., description="包名(必填)"),
+ yes: bool = Field(True, description="自动确认(默认True)")) -> Dict:
+ """卸载软件包"""
+ if not pkg_name.strip():
+ return {"error": self._msg("包名不能为空", "Package name cannot be empty")}
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法卸载包", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "remove", pkg_name.strip()]
+ if yes:
+ cmd.append("-y")
+ output = self._run_cmd(cmd)
+ return {"message": output} if output else {"error": self._msg(f"卸载包{pkg_name}失败", f"Failed to remove {pkg_name}")}
+
+ def clean(self,
+ cache_type: PkgCacheTypeEnum = Field(PkgCacheTypeEnum.ALL, description="缓存类型(默认all)")) -> Dict:
+ """清理缓存(修复结果处理逻辑)"""
+ if not self.dnf_path:
+ return {"error": self._msg("未找到dnf/yum命令,无法清理缓存", "dnf/yum command not found")}
+
+ cmd = [self.dnf_path, "clean", cache_type.value]
+ output = self._run_cmd(cmd) # 此时 output 是字符串或None
+
+ # 直接返回结果,不调用 splitlines()(或只在 output 是字符串时调用)
+ if output and "失败" not in output:
+ return {"message": self._msg(f"缓存清理成功(类型:{cache_type.value})", f"Cache cleaned successfully (type: {cache_type.value})")}
+ else:
+ return {"error": self._msg(f"缓存清理失败:{output or '未知错误'}", f"Failed to clean cache: {output or 'Unknown error'}")}
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/config.json
new file mode 100644
index 00000000..3b15b022
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "pkg_tool": {
+ "zh": "【OpenEuler软件包管理工具】\n功能:支持OpenEuler系统软件包的查、装、更、卸、缓存清理(基于dnf/rpm命令,安全调用)\n\n【核心提示】\n1. 操作类型(action)必须从以下枚举值中选择:list/info/install/local-install/update/update-sec/remove/clean,不可传入其他值;\n2. 缓存类型(cache_type)必须从以下枚举值中选择:all/packages/metadata,clean操作默认值为all;\n3. 不同操作类型(action)对应不同必填参数,未满足则执行失败,请严格遵守:\n - list(列出已安装包):可选传入filter_key(包名过滤关键词),无必填参数;\n - info(查询包详情)/install(在线安装)/update(更新包)/remove(卸载包):必须传入pkg_name(包名);\n - local-install(离线安装):必须传入rpm_path(RPM文件绝对路径);\n - update-sec(仅更新安全补丁)/clean(清理缓存):无额外必填参数;\n4. 非必填参数默认值:yes(自动确认)=True,lang(语言)=ZH;\n5. 路径格式要求:rpm_path需传入绝对路径(如\"/tmp/nginx-1.24.0-1.el9.x86_64.rpm\"),避免相对路径导致的找不到文件问题;\n6. 权限说明:安装/更新/卸载/清理缓存操作需root权限,否则会执行失败。\n\n【枚举类定义(必须遵守)】\n- PkgActionEnum(操作类型枚举):list / info / install / local-install / update / update-sec / remove / clean\n- PkgCacheTypeEnum(缓存类型枚举):all / packages / metadata\n\n【参数详情】\n- action:操作类型(必填,枚举值见上方)\n- pkg_name:包名(info/install/update/remove操作必填)\n- filter_key:包名过滤关键词(list操作可选,模糊匹配)\n- rpm_path:RPM文件路径(local-install操作必填,绝对路径)\n- cache_type:缓存类型(clean操作可选,枚举值见上方,默认all)\n- yes:自动确认操作(install/update/remove/clean默认True,无需手动输入y)\n\n【返回值说明】\n- success:执行结果(True=成功,False=失败)\n- message:执行信息/错误提示(多语言)\n- result:操作结果(list/info返回结构化列表,install/update等返回命令日志列表)\n- pkg_name:操作的包名(无包名操作返回空字符串)\n- target:执行目标(固定为127.0.0.1,本地执行)",
+ "en": "【OpenEuler Package Management Tool】\nFunction: Supports query/install/update/remove/cache clean for OpenEuler packages (based on dnf/rpm commands, safe call)\n\n【Core Guidelines】\n1. Operation type (action) must be selected from the following enum values: list/info/install/local-install/update/update-sec/remove/clean, other values are not allowed;\n2. Cache type (cache_type) must be selected from the following enum values: all/packages/metadata, default value for clean is all;\n3. Different action types correspond to different required parameters, execution will fail if not met, please strictly follow:\n - list (list installed packages): Optionally pass filter_key (package name filter), no required parameters;\n - info (query package details)/install (online install)/update (update package)/remove (uninstall package): Must pass pkg_name (package name);\n - local-install (offline RPM install): Must pass rpm_path (absolute path of RPM file);\n - update-sec (update security patches only)/clean (clean cache): No additional required parameters;\n4. Default values for optional parameters: yes (auto confirm)=True, lang (language)=ZH;\n5. Path format requirement: rpm_path must be absolute path (e.g.\"/tmp/nginx-1.24.0-1.el9.x86_64.rpm\"), avoid file not found due to relative path;\n6. Permission note: Install/update/remove/clean operations require root privileges, otherwise execution will fail.\n\n【Enum Class Definition (Must Follow)】\n- PkgActionEnum (Operation Type Enum): list / info / install / local-install / update / update-sec / remove / clean\n- PkgCacheTypeEnum (Cache Type Enum): all / packages / metadata\n\n【Parameter Details】\n- action: Operation type (required, enum values see above)\n- pkg_name: Package name (required for info/install/update/remove)\n- filter_key: Package name filter (optional for list, fuzzy match)\n- rpm_path: RPM file path (required for local-install, absolute path)\n- cache_type: Cache type (optional for clean, enum values see above, default all)\n- yes: Auto confirm operation (default True for install/update/remove/clean, no need to input y manually)\n \n【Return Value Explanation】\n- success: Execution result (True=success, False=failure)\n- message: Execution info/error prompt (multilingual)\n- result: Operation result (structured list for list/info, command log list for install/update etc.)\n- pkg_name: Operated package name (empty string for operations without package name)\n- target: Execution target (fixed as 127.0.0.1, local execution)"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/deps.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/tool.py
new file mode 100644
index 00000000..ae22fd96
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/pkg_tool/tool.py
@@ -0,0 +1,115 @@
+from typing import Dict, Optional
+from pydantic import Field
+from config.public.base_config_loader import LanguageEnum
+from servers.oe_cli_mcp_server.mcp_tools.base_tools.pkg_tool.base import (
+ init_result_dict,
+ PackageManager,
+ PkgActionEnum,
+ PkgCacheTypeEnum,
+ parse_pkg_action,
+ parse_cache_type,
+ logger, lang, safe_split_log
+)
+
+def pkg_tool(
+ action: str = Field(
+ ...,
+ description="操作类型(枚举值:list/info/install/local-install/update/update-sec/remove/clean)"
+ ),
+ pkg_name: Optional[str] = Field(None, description="包名(info/install/update/remove必填)"),
+ filter_key: Optional[str] = Field(None, description="包名过滤关键词(list操作可选)"),
+ rpm_path: Optional[str] = Field(None, description="RPM文件路径(local-install必填)"),
+ cache_type: str = Field(
+ PkgCacheTypeEnum.ALL.value,
+ description="缓存类型(枚举值:all/packages/metadata,clean操作默认all)"
+ ),
+ yes: bool = Field(True, description="自动确认(install/update/remove默认True)"),
+) -> Dict:
+ """
+ OpenEuler软件包管理工具(枚举化参数,适配大模型识别)
+ """
+ # 初始化结果字典
+ result = init_result_dict()
+ result["pkg_name"] = pkg_name.strip() if pkg_name else ""
+ pm = PackageManager(lang=lang)
+ is_zh = lang == LanguageEnum.ZH
+
+ # 1. 枚举值解析(兼容大模型字符串输入)
+ try:
+ action_enum = parse_pkg_action(action)
+ cache_type_enum = parse_cache_type(cache_type)
+ except ValueError as e:
+ result["message"] = str(e) if is_zh else str(e)
+ return result
+
+ # 2. 核心参数校验
+ try:
+ if action_enum == PkgActionEnum.LIST:
+ # 列出已安装包
+ result["result"] = pm.list(filter_key=filter_key)
+ result["success"] = True
+ filter_desc = filter_key or "无" if is_zh else filter_key or "none"
+ result["message"] = f"已安装包列表查询完成(过滤关键词:{filter_desc})" if is_zh else f"Installed packages listed (filter: {filter_desc})"
+
+ elif action_enum == PkgActionEnum.INFO:
+ # 查询包详情
+ if not pkg_name:
+ raise ValueError("包名不能为空" if is_zh else "Package name cannot be empty")
+ result["result"] = [pm.info(pkg_name.strip())] # 统一列表格式
+ result["success"] = True
+ result["message"] = f"包{pkg_name}详情查询完成" if is_zh else f"Package {pkg_name} info queried"
+
+ elif action_enum == PkgActionEnum.INSTALL:
+ # 在线安装
+ if not pkg_name:
+ raise ValueError("包名不能为空" if is_zh else "Package name cannot be empty")
+ log = pm.install(pkg_name.strip(), yes=yes)
+ result["success"] = True
+ result["message"] = f"包{pkg_name}在线安装成功" if is_zh else f"Package {pkg_name} installed successfully"
+ result["result"] = safe_split_log(log)
+
+ elif action_enum == PkgActionEnum.LOCAL_INSTALL:
+ # 离线安装RPM
+ if not rpm_path:
+ raise ValueError("RPM路径不能为空" if is_zh else "RPM path cannot be empty")
+ log = pm.local_install(rpm_path.strip(), yes=yes)
+ result["success"] = True
+ result["message"] = f"RPM包{rpm_path}安装成功" if is_zh else f"RPM package {rpm_path} installed successfully"
+ result["result"] = safe_split_log(log)
+
+ elif action_enum == PkgActionEnum.UPDATE:
+ # 更新包
+ log = pm.update(pkg_name=pkg_name.strip() if pkg_name else None, yes=yes)
+ target = pkg_name or "所有包" if is_zh else pkg_name or "all packages"
+ result["success"] = True
+ result["message"] = f"{target}更新完成" if is_zh else f"{target} updated successfully"
+ result["result"] = safe_split_log(log)
+
+ elif action_enum == PkgActionEnum.UPDATE_SEC:
+ # 仅更新安全补丁
+ log = pm.update_sec(yes=yes)
+ result["success"] = True
+ result["message"] = "安全补丁更新完成" if is_zh else "Security patches updated successfully"
+ result["result"] = safe_split_log(log)
+
+ elif action_enum == PkgActionEnum.REMOVE:
+ # 卸载包
+ if not pkg_name:
+ raise ValueError("包名不能为空" if is_zh else "Package name cannot be empty")
+ log = pm.remove(pkg_name.strip(), yes=yes)
+ result["success"] = True
+ result["message"] = f"包{pkg_name}卸载完成" if is_zh else f"Package {pkg_name} removed successfully"
+ result["result"] = safe_split_log(log)
+
+ elif action_enum == PkgActionEnum.CLEAN:
+ # 清理缓存
+ log = pm.clean(cache_type=cache_type_enum)
+ result["success"] = True
+ result["message"] = f"{cache_type_enum.value}缓存清理完成" if is_zh else f"{cache_type_enum.value} cache cleaned successfully"
+ result["result"] = safe_split_log(log)
+
+ except Exception as e:
+ result["message"] = f"操作失败:{str(e)}" if is_zh else f"Operation failed: {str(e)}"
+ logger.error(f"Package manager error: {str(e)}")
+
+ return result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/base.py
new file mode 100644
index 00000000..7b3c1e23
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/base.py
@@ -0,0 +1,240 @@
+import logging
+import os
+import subprocess
+import re
+from enum import Enum
+from typing import Dict, List, Optional, Union
+from config.public.base_config_loader import LanguageEnum, BaseConfig
+
+# 初始化日志(仅保留基础配置)
+logger = logging.getLogger("proc_tool")
+logger.setLevel(logging.INFO)
+
+# ========== 枚举类定义(不变,保留核心操作) ==========
+class ProcActionEnum(str, Enum):
+ LIST = "list" # 所有进程
+ FIND = "find" # 按名称/PID查询
+ STAT = "stat" # 进程资源占用
+ START = "start" # 启动systemd服务
+ RESTART = "restart" # 重启systemd服务
+ STOP = "stop" # 停止systemd服务
+ KILL = "kill" # 强制终止进程
+
+# ========== 通用工具函数(精简,保留必要功能) ==========
+def get_language() -> LanguageEnum:
+ return BaseConfig().get_config().public_config.language
+
+def is_zh() -> bool:
+ return get_language() == LanguageEnum.ZH
+
+def init_result_dict(target_host: str = "127.0.0.1") -> Dict:
+ """初始化返回结果(简洁格式)"""
+ return {
+ "success": False,
+ "message": "",
+ "result": {},
+ "target": target_host,
+ "proc_actions": []
+ }
+
+def parse_proc_actions(action_list: List[str]) -> List[ProcActionEnum]:
+ """解析操作类型(仅保留必要校验)"""
+ valid_values = [e.value for e in ProcActionEnum]
+ if not action_list:
+ msg = f"进程操作列表不能为空,可选值:{','.join(valid_values)}"
+ raise ValueError(msg)
+
+ parsed = []
+ for action in action_list:
+ action = action.strip().lower()
+ if action not in valid_values:
+ msg = f"无效操作:{action},可选值:{','.join(valid_values)}"
+ raise ValueError(msg)
+ parsed.append(ProcActionEnum(action))
+ return parsed
+
+# ========== 核心工具函数(精简,无复杂嵌套) ==========
+def get_cmd_path(cmd: str) -> Optional[str]:
+ """获取命令绝对路径(简洁实现,避免复杂逻辑)"""
+ # 优先遍历常见路径(OpenEuler默认路径)
+ common_paths = ["/usr/bin", "/bin", "/usr/sbin", "/sbin"]
+ for path in common_paths:
+ cmd_path = os.path.join(path, cmd)
+ if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
+ return cmd_path
+ # 路径不存在返回None
+ logger.warning(f"命令不存在:{cmd}(未在常见路径中找到)")
+ return None
+
+def run_cmd(cmd: List[str]) -> Optional[str]:
+ """执行命令(精简异常处理,只捕获关键错误)"""
+ cmd_path = get_cmd_path(cmd[0])
+ if not cmd_path:
+ return None # 命令不存在直接返回None
+ cmd[0] = cmd_path # 替换为绝对路径
+
+ try:
+ logger.info(f"执行命令:{' '.join(cmd)}")
+ result = subprocess.run(
+ cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ text=True, check=True, timeout=10
+ )
+ return result.stdout.strip()
+ except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
+ logger.error(f"命令执行失败:{e.stderr.strip() or str(e)}")
+ return None
+ except Exception as e:
+ logger.error(f"系统调用异常:{str(e)}")
+ return None
+
+# ========== 进程管理核心类(极简实现,易阅读) ==========
+class ProcessManager:
+ def __init__(self):
+ self.zh = is_zh() # 提前判断语言,避免重复调用
+
+ def _msg(self, zh: str, en: str) -> str:
+ """简洁多语言提示(直接返回对应语言)"""
+ return zh if self.zh else en
+
+ # ---------- 查:进程查询(核心功能,无多余嵌套) ----------
+ def list_all_procs(self) -> List[Dict]:
+ """列出所有进程(精简解析逻辑)"""
+ output = run_cmd(["ps", "aux"])
+ if not output:
+ return [{"error": self._msg("ps命令执行失败", "ps command failed")}]
+
+ lines = [line.strip() for line in output.splitlines() if line.strip()]
+ if len(lines) < 2:
+ return [{"error": self._msg("无有效进程信息", "No valid process info")}]
+
+ # 解析表头和数据
+ headers = [h.lower() for h in lines[0].split()]
+ procs = []
+ for line in lines[1:]:
+ parts = re.split(r"\s+", line, maxsplit=len(headers)-1)
+ proc = dict(zip(headers, parts))
+ # 只转换关键数字字段(避免复杂处理)
+ for key in ["pid", "cpu", "mem"]:
+ if key in proc:
+ proc[key] = float(proc[key]) if "." in proc[key] else int(proc[key])
+ procs.append(proc)
+ return procs
+
+ def find_proc(self, name: Optional[str] = None, pid: Optional[int] = None) -> List[Dict]:
+ """按名称/PID查询(避免管道符,简洁实现)"""
+ if not (name or pid):
+ return [{"error": self._msg("必须指定进程名称或PID", "Must specify proc name or PID")}]
+
+ # 按PID查询
+ if pid:
+ output = run_cmd(["ps", "aux", "-p", str(pid)])
+ return self._parse_ps_output(output) if output else []
+ # 按名称查询(用ps+字符串过滤,避免grep管道)
+ else:
+ output = run_cmd(["ps", "aux"])
+ if not output:
+ return [{"error": self._msg("查询进程失败", "Failed to find process")}]
+ procs = self._parse_ps_output(output)
+ # 过滤目标进程(排除grep自身)
+ return [p for p in procs if name.lower() in p.get("command", "").lower() and "grep" not in p.get("command", "")]
+
+ def get_proc_stat(self, pid: int) -> Dict:
+ """获取进程资源占用(精简参数校验和解析)"""
+ if not isinstance(pid, int) or pid <= 0:
+ return {"error": self._msg("PID必须为正整数", "PID must be positive integer")}
+
+ output = run_cmd(["ps", "-p", str(pid), "-o", "pid,%cpu,%mem,rss,vsz,etime"])
+ if not output:
+ return {"error": self._msg(f"PID {pid} 不存在或查询失败", f"PID {pid} not found or query failed")}
+
+ lines = [line.strip() for line in output.splitlines() if line.strip()]
+ if len(lines) < 2:
+ return {"error": self._msg(f"PID {pid} 不存在", f"PID {pid} does not exist")}
+
+ # 解析状态信息
+ headers = [h.lower().replace("%", "pct") for h in lines[0].split()]
+ parts = re.split(r"\s+", lines[1])
+ stat = dict(zip(headers, parts))
+ # 转换数字字段
+ for key in ["pid", "cpupct", "mempct", "rss", "vsz"]:
+ if key in stat:
+ stat[key] = float(stat[key]) if "." in stat[key] else int(stat[key])
+ return stat
+
+ # ---------- 启/停/重启:systemd服务操作(简洁实现) ----------
+ def _service_op(self, action: str, service_name: str) -> Dict:
+ """统一处理systemd服务操作(减少重复代码)"""
+ if not service_name:
+ return {"error": self._msg("必须指定服务名称", "Must specify service name")}
+
+ output = run_cmd(["systemctl", action, service_name])
+ if output is not None:
+ return {"message": self._msg(f"服务 {service_name} {action} 成功", f"Service {service_name} {action} success")}
+ else:
+ return {"error": self._msg(f"服务 {service_name} {action} 失败", f"Service {service_name} {action} failed")}
+
+ def start_proc(self, service_name: str) -> Dict:
+ return self._service_op("start", service_name)
+
+ def restart_proc(self, service_name: str) -> Dict:
+ return self._service_op("restart", service_name)
+
+ def stop_proc(self, service_name: str) -> Dict:
+ return self._service_op("stop", service_name)
+
+ # ---------- 强制终止进程(简洁实现) ----------
+ def kill_proc(self, pid: int) -> Dict:
+ if not isinstance(pid, int) or pid <= 0:
+ return {"error": self._msg("PID必须为正整数", "PID must be positive integer")}
+
+ # 先尝试正常终止,失败则强制终止(无嵌套try)
+ output = run_cmd(["kill", str(pid)])
+ if output is not None:
+ return {"message": self._msg(f"已向PID {pid} 发送终止信号", f"Termination signal sent to PID {pid}")}
+
+ output = run_cmd(["kill", "-9", str(pid)])
+ if output is not None:
+ return {"message": self._msg(f"已强制终止PID {pid}", f"Forcibly terminated PID {pid}")}
+ else:
+ return {"error": self._msg(f"终止PID {pid} 失败", f"Failed to terminate PID {pid}")}
+
+ # ---------- 辅助函数(精简) ----------
+ def _parse_ps_output(self, output: str) -> List[Dict]:
+ """解析ps输出(无多余逻辑)"""
+ if not output:
+ return []
+ lines = [line.strip() for line in output.splitlines() if line.strip()]
+ if len(lines) < 2:
+ return []
+ headers = [h.lower() for h in lines[0].split()]
+ return [dict(zip(headers, re.split(r"\s+", line, maxsplit=len(headers)-1))) for line in lines[1:]]
+
+ # ---------- 批量执行(精简校验和执行逻辑) ----------
+ def exec_batch(
+ self,
+ actions: List[ProcActionEnum],
+ proc_name: Optional[str] = None,
+ pid: Optional[int] = None,
+ service_name: Optional[str] = None
+ ) -> Dict[str, Union[List[Dict], Dict]]:
+ batch_result = {}
+ # 操作映射(减少if-else)
+ action_map = {
+ ProcActionEnum.LIST: self.list_all_procs,
+ ProcActionEnum.FIND: lambda: self.find_proc(name=proc_name, pid=pid),
+ ProcActionEnum.STAT: lambda: self.get_proc_stat(pid=pid),
+ ProcActionEnum.START: lambda: self.start_proc(service_name=service_name),
+ ProcActionEnum.RESTART: lambda: self.restart_proc(service_name=service_name),
+ ProcActionEnum.STOP: lambda: self.stop_proc(service_name=service_name),
+ ProcActionEnum.KILL: lambda: self.kill_proc(pid=pid)
+ }
+
+ # 执行每个操作(无复杂嵌套)
+ for action in actions:
+ try:
+ batch_result[action.value] = action_map[action]()
+ except Exception as e:
+ batch_result[action.value] = {"error": str(e)}
+ logger.error(f"操作 {action.value} 失败:{str(e)}")
+
+ return batch_result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/config.json
new file mode 100644
index 00000000..978cd9ae
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "proc_tool": {
+ "zh": "【进程管理工具】\n功能:支持进程的查看、启动、停止、强制终止等管理操作(基于ps/systemctl/kill命令)\n\n【核心提示】\n1. 操作类型(proc_actions)为枚举值列表,支持多选,可选枚举值及对应参数:\n - 查类:\n · list:所有进程(无需额外参数)\n · find:按名称/PID查询(需传proc_name或pid)\n · stat:进程资源占用(需传pid)\n - 启类:\n · start:启动服务(需传service_name,对应systemd服务名)\n · restart:重启服务(需传service_name)\n - 停类:\n · stop:停止服务(需传service_name)\n · kill:强制终止进程(需传pid)\n2. 输入格式示例:\n - 批量查询:[\"list\", \"find\"] + proc_name=\"nginx\"\n - 启停服务:[\"stop\", \"start\"] + service_name=\"nginx\"\n3. 语言配置:自动读取系统全局配置,不提供外部接口;\n4. 权限说明:启停/终止进程需root权限。\n\n【枚举类定义(必须遵守)】\n- ProcActionEnum(进程操作枚举):list / find / stat / start / restart / stop / kill\n\n【参数详情】\n- proc_actions:进程操作列表(必填,枚举值见上方)\n- proc_name:进程名称(find操作必填)\n- pid:进程PID(find/stat/kill操作必填,正整数)\n- service_name:服务名称(start/restart/stop操作必填,对应systemd服务名)\n\n【返回值说明】\n- success:操作结果(True=成功,False=失败)\n- message:操作信息/错误提示(自动切换中英文)\n- result:操作结果(查类返回结构化列表/字典,启/停类返回命令日志)\n- target:执行目标(固定127.0.0.1)\n- proc_actions:已执行的操作列表",
+ "en": "【Process Management Tool】\nFunction: Supports process management (view/start/stop/force terminate) via ps/systemctl/kill commands\n\n【Core Guidelines】\n1. Proc actions (proc_actions) is an enum list, supports multiple selection. Enum values and required parameters:\n - Query:\n · list: All processes (no extra params)\n · find: Query by name/PID (requires proc_name or pid)\n · stat: Process resource usage (requires pid)\n - Start:\n · start: Start service (requires service_name, systemd service name)\n · restart: Restart service (requires service_name)\n - Stop:\n · stop: Stop service (requires service_name)\n · kill: Force terminate process (requires pid)\n2. Input examples:\n - Batch query: [\"list\", \"find\"] + proc_name=\"nginx\"\n - Start/stop service: [\"stop\", \"start\"] + service_name=\"nginx\"\n3. Language config: Auto-read global system config, no external interface;\n4. Permission note: Start/stop/terminate requires root privileges.\n\n【Enum Class Definition (Must Follow)】\n- ProcActionEnum (Proc Action Enum): list / find / stat / start / restart / stop / kill\n\n【Parameter Details】\n- proc_actions: Proc action list (required, enum values see above)\n- proc_name: Proc name (required for find)\n- pid: Proc PID (required for find/stat/kill, positive integer)\n- service_name: Service name (required for start/restart/stop, systemd service name)\n\n【Return Value Explanation】\n- success: Operation result (True=success, False=failure)\n- message: Operation info/error prompt (auto-switch between Chinese/English)\n- result: Operation result (query returns structured list/dict, start/stop returns command log)\n- target: Execution target (fixed 127.0.0.1)\n- proc_actions: Executed action list"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/deps.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/tool.py
new file mode 100644
index 00000000..82ccbc68
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/proc_tool/tool.py
@@ -0,0 +1,56 @@
+from typing import Dict, List, Optional
+from pydantic import Field
+from servers.oe_cli_mcp_server.mcp_tools.base_tools.proc_tool.base import (
+ init_result_dict,
+ ProcessManager,
+ parse_proc_actions,
+ is_zh,
+ logger
+)
+
+def proc_tool(
+ proc_actions: List[str] = Field(
+ ...,
+ description="进程操作列表(枚举值:list/find/stat/start/restart/stop/kill,支持多选)"
+ ),
+ proc_name: Optional[str] = Field(None, description="进程名称(find操作必填)"),
+ pid: Optional[int] = Field(None, description="进程PID(find/stat/kill操作必填)"),
+ service_name: Optional[str] = Field(None, description="服务名称(start/restart/stop操作必填)")
+) -> Dict:
+ """
+ 进程管理工具(支持查/启/停批量操作)
+ 语言配置从全局BaseConfig读取,不提供外部接口
+ """
+ # 初始化结果字典
+ result = init_result_dict()
+ proc_mgr = ProcessManager()
+ use_zh = is_zh()
+
+ # 解析操作类型列表
+ try:
+ parsed_actions = parse_proc_actions(proc_actions)
+ result["proc_actions"] = [a.value for a in parsed_actions]
+ except ValueError as e:
+ result["message"] = str(e)
+ return result
+
+ # 执行批量操作
+ try:
+ batch_result = proc_mgr.exec_batch(
+ actions=parsed_actions,
+ proc_name=proc_name,
+ pid=pid,
+ service_name=service_name
+ )
+ result["result"] = batch_result
+ result["success"] = True
+
+ # 生成提示信息
+ action_str = ",".join(result["proc_actions"])
+ result["message"] = f"以下进程操作执行完成:{action_str}" if use_zh else f"Proc actions executed: {action_str}"
+
+ except Exception as e:
+ result["message"] = f"进程操作失败:{str(e)}" if use_zh else f"Proc action failed: {str(e)}"
+ logger.error(f"Proc manager error: {str(e)}")
+
+ return result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/base.py
new file mode 100644
index 00000000..48f254de
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/base.py
@@ -0,0 +1,385 @@
+import logging
+import os
+import socket
+import subprocess
+from typing import Dict, Optional, List
+
+import paramiko
+
+from config.public.base_config_loader import BaseConfig, LanguageEnum
+
+
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+
+
+def get_language() -> bool:
+ """获取语言配置:True=中文,False=英文"""
+ return BaseConfig().get_config().public_config.language == LanguageEnum.ZH
+
+
+def get_remote_auth(ip: str) -> Optional[Dict]:
+ """
+ 获取服务器认证信息:匹配IP/主机名对应的连接配置
+ """
+ for host_config in BaseConfig().get_config().public_config.remote_hosts:
+ if ip in [host_config.host, host_config.name]:
+ return {
+ "host": host_config.host,
+ "port": host_config.port,
+ "username": host_config.username,
+ "password": host_config.password,
+ }
+ return None
+
+
+def init_result(target_host: str) -> Dict:
+ """初始化统一结果结构"""
+ return {
+ "success": False,
+ "message": "",
+ "result": [],
+ "target": target_host,
+ }
+
+
+def ssh_port_ping(target: str, port: int = 22, timeout: int = 5) -> Dict:
+ """通过 TCP 连接检测 SSH 端口连通性"""
+ result = init_result(target)
+ is_zh = get_language()
+ try:
+ with socket.create_connection((target, port), timeout=timeout):
+ result["success"] = True
+ result["message"] = (
+ f"SSH 端口 {port} 可达" if is_zh else f"SSH port {port} is reachable"
+ )
+ except Exception as e:
+ result["message"] = (
+ f"SSH 端口 {port} 不可达:{str(e)}"
+ if is_zh
+ else f"SSH port {port} is unreachable: {str(e)}"
+ )
+ result["result"] = [result["message"]]
+ return result
+
+
+def _find_cmd_absolute_path(cmd: str) -> Optional[str]:
+ """查找命令的绝对路径(兼容特殊情况)"""
+ common_paths = ["/usr/bin", "/bin", "/usr/sbin", "/sbin", "/usr/local/bin"]
+ for path in common_paths:
+ cmd_path = os.path.join(path, cmd)
+ if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
+ return cmd_path
+ return None
+
+
+def _run_local(cmd: List[str]) -> subprocess.CompletedProcess:
+ """执行本地命令,自动查找命令的绝对路径"""
+ # 查找命令的绝对路径
+ cmd_name = cmd[0]
+ cmd_path = _find_cmd_absolute_path(cmd_name)
+ if not cmd_path:
+ raise FileNotFoundError(f"命令不存在:{cmd_name}")
+
+ # 替换为绝对路径
+ cmd[0] = cmd_path
+ return subprocess.run(
+ cmd,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+
+
+def _open_ssh(remote_auth: Dict) -> paramiko.SSHClient:
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ ssh.connect(
+ hostname=remote_auth["host"],
+ port=remote_auth["port"],
+ username=remote_auth["username"],
+ password=remote_auth["password"],
+ timeout=10,
+ banner_timeout=10,
+ )
+ return ssh
+
+
+def check_sshd_status(target: Optional[str]) -> Dict:
+ """执行 systemctl status sshd"""
+ target_host = target.strip() if target else "127.0.0.1"
+ result = init_result(target_host)
+ is_zh = get_language()
+
+ cmd = ["/usr/bin/systemctl ", "status", "sshd"]
+
+ # 本地
+ if target_host == "127.0.0.1":
+ try:
+ cp = _run_local(cmd)
+ result["success"] = True
+ result["message"] = (
+ "本地 sshd 状态获取成功" if is_zh else "Local sshd status fetched"
+ )
+ result["result"] = cp.stdout.strip().splitlines()
+ except subprocess.CalledProcessError as e:
+ result["message"] = (
+ f"本地 sshd 状态获取失败:{e.stderr.strip()}"
+ if is_zh
+ else f"Failed to get local sshd status: {e.stderr.strip()}"
+ )
+ return result
+
+ # 远程
+ remote_auth = get_remote_auth(target_host)
+ if not remote_auth or not (remote_auth["username"] and remote_auth["password"]):
+ result["message"] = (
+ "远程认证配置缺失" if is_zh else "Remote auth config missing"
+ )
+ return result
+
+ ssh: Optional[paramiko.SSHClient] = None
+ try:
+ ssh = _open_ssh(remote_auth)
+ # 查找 systemctl 的绝对路径
+ systemctl_path = _find_cmd_absolute_path("systemctl") or "systemctl"
+ stdin, stdout, stderr = ssh.exec_command(f"{systemctl_path} status sshd")
+ out = stdout.read().decode("utf-8", errors="replace").strip()
+ err = stderr.read().decode("utf-8", errors="replace").strip()
+ if err and "Active:" not in out:
+ result["message"] = (
+ f"远程 sshd 状态获取失败:{err}"
+ if is_zh
+ else f"Failed to get remote sshd status: {err}"
+ )
+ else:
+ result["success"] = True
+ result["message"] = (
+ "远程 sshd 状态获取成功" if is_zh else "Remote sshd status fetched"
+ )
+ result["result"] = out.splitlines()
+ except paramiko.AuthenticationException:
+ result["message"] = (
+ "SSH 认证失败,请检查用户名和密码"
+ if is_zh
+ else "SSH authentication failed, check username and password"
+ )
+ except Exception as e:
+ result["message"] = (
+ f"远程 sshd 状态检查异常:{str(e)}"
+ if is_zh
+ else f"Remote sshd status check exception: {str(e)}"
+ )
+ finally:
+ if ssh:
+ ssh.close()
+ return result
+
+
+def fix_sshd_config_and_restart(target: Optional[str]) -> Dict:
+ """
+ 修复 /etc/ssh/sshd_config 中的 Port/PermitRootLogin/PasswordAuthentication
+ 并执行 systemctl restart sshd
+ """
+ target_host = target.strip() if target else "127.0.0.1"
+ result = init_result(target_host)
+ is_zh = get_language()
+
+ # 需要确保的配置行
+ desired_lines = {
+ "Port": "Port 22",
+ "PermitRootLogin": "PermitRootLogin yes",
+ "PasswordAuthentication": "PasswordAuthentication yes",
+ }
+
+ def build_sed_commands() -> str:
+ """
+ 为远程一次性执行构造 sed & restart 命令:
+ - 先针对每个键进行替换/解注释
+ - 若不存在目标键则追加
+ - 最后重启 sshd
+ """
+ # 查找命令的绝对路径
+ sed_path = _find_cmd_absolute_path("sed") or "sed"
+ grep_path = _find_cmd_absolute_path("grep") or "grep"
+ systemctl_path = _find_cmd_absolute_path("systemctl") or "systemctl"
+
+ sed_parts = []
+ # 替换或解注释三项配置
+ sed_parts.append(
+ f"{sed_path} -i -e 's/^[#]*[[:space:]]*Port[[:space:]].*/Port 22/' "
+ f"-e 's/^[#]*[[:space:]]*PermitRootLogin[[:space:]].*/PermitRootLogin yes/' "
+ f"-e 's/^[#]*[[:space:]]*PasswordAuthentication[[:space:]].*/PasswordAuthentication yes/' "
+ f"/etc/ssh/sshd_config"
+ )
+ # 若 key 不存在则追加
+ for k, line in desired_lines.items():
+ sed_parts.append(
+ f"{grep_path} -q '^{k}[[:space:]]' /etc/ssh/sshd_config || "
+ f"echo '{line}' >> /etc/ssh/sshd_config"
+ )
+ # 重启 sshd
+ sed_parts.append(f"{systemctl_path} restart sshd")
+ # 组合为单条 shell 命令
+ return " && ".join(sed_parts)
+
+ # 本地修复
+ if target_host == "127.0.0.1":
+ messages: List[str] = []
+ try:
+ # 直接执行与远程相同的 shell 逻辑
+ shell_cmd = build_sed_commands()
+ cp = subprocess.run(
+ shell_cmd,
+ shell=True,
+ check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ )
+ if cp.stdout.strip():
+ messages.extend(cp.stdout.strip().splitlines())
+ if cp.stderr.strip():
+ messages.extend(cp.stderr.strip().splitlines())
+ result["success"] = True
+ result["message"] = (
+ "本地 sshd 配置已修复并重启"
+ if is_zh
+ else "Local sshd config fixed and service restarted"
+ )
+ if not messages:
+ messages.append(result["message"])
+ result["result"] = messages
+ except subprocess.CalledProcessError as e:
+ result["message"] = (
+ f"本地 sshd 修复失败:{e.stderr.strip()}"
+ if is_zh
+ else f"Failed to fix local sshd: {e.stderr.strip()}"
+ )
+ result["result"] = [result["message"]]
+ return result
+
+ # 远程修复
+ remote_auth = get_remote_auth(target_host)
+ if not remote_auth or not (remote_auth["username"] and remote_auth["password"]):
+ result["message"] = (
+ "远程认证配置缺失" if is_zh else "Remote auth missing"
+ )
+ return result
+
+ ssh: Optional[paramiko.SSHClient] = None
+ try:
+ ssh = _open_ssh(remote_auth)
+ shell_cmd = build_sed_commands()
+ stdin, stdout, stderr = ssh.exec_command(shell_cmd)
+ out = stdout.read().decode("utf-8", errors="replace").strip()
+ err = stderr.read().decode("utf-8", errors="replace").strip()
+
+ messages: List[str] = []
+ if out:
+ messages.extend(out.splitlines())
+ if err:
+ messages.extend(err.splitlines())
+
+ exit_status = stdout.channel.recv_exit_status()
+ if exit_status == 0:
+ result["success"] = True
+ result["message"] = (
+ "远程 sshd 配置已修复并重启"
+ if is_zh
+ else "Remote sshd config fixed and service restarted"
+ )
+ else:
+ result["message"] = (
+ f"远程 sshd 修复失败:{err}"
+ if is_zh
+ else f"Failed to fix remote sshd: {err}"
+ )
+ if not messages:
+ messages.append(result["message"])
+ result["result"] = messages
+ except paramiko.AuthenticationException:
+ result["message"] = (
+ "SSH 认证失败,请检查用户名和密码"
+ if is_zh
+ else "SSH authentication failed, check username and password"
+ )
+ result["result"] = [result["message"]]
+ except Exception as e:
+ result["message"] = (
+ f"远程 sshd 修复异常:{str(e)}"
+ if is_zh
+ else f"Remote sshd fix exception: {str(e)}"
+ )
+ result["result"] = [result["message"]]
+ finally:
+ if ssh:
+ ssh.close()
+
+ return result
+
+
+def fix_sshd_issue(target: Optional[str], port: int = 22) -> Dict:
+ """
+ 整体解决 SSH 连接失败问题的工具,按顺序执行:
+ 1. 检查 SSH 端口连通性(ping 端口)
+ 2. 检查 sshd 服务状态(systemctl status sshd)
+ 3. 修复 sshd_config 关键配置并重启 sshd
+ """
+ target_host = target.strip() if (target and isinstance(target, str)) else "127.0.0.1"
+ is_zh = get_language()
+ result = init_result(target_host)
+
+ steps: List[str] = []
+
+ # 步骤 1:端口连通性检查
+ steps.append(
+ "=== 步骤1: 检查 SSH 端口连通性 ==="
+ if is_zh
+ else "=== Step 1: Check SSH port connectivity ==="
+ )
+ ping_res = ssh_port_ping(target_host, port=port)
+ steps.extend(ping_res.get("result", []) or [ping_res.get("message", "")])
+
+ # 步骤 2:sshd 状态检查
+ steps.append(
+ "=== 步骤2: 检查 sshd 服务状态 ==="
+ if is_zh
+ else "=== Step 2: Check sshd service status ==="
+ )
+ status_res = check_sshd_status(target_host)
+ if status_res.get("message"):
+ steps.append(status_res["message"])
+ # 只展示前几行关键信息,避免输出过长
+ status_lines = status_res.get("result") or []
+ if status_lines:
+ steps.extend(status_lines[:5])
+
+ # 步骤 3:修复配置并重启 sshd
+ steps.append(
+ "=== 步骤3: 修复 sshd 配置并重启服务 ==="
+ if is_zh
+ else "=== Step 3: Fix sshd config and restart service ==="
+ )
+ fix_res = fix_sshd_config_and_restart(target_host)
+ if fix_res.get("message"):
+ steps.append(fix_res["message"])
+ fix_lines = fix_res.get("result") or []
+ if fix_lines:
+ steps.extend(fix_lines)
+
+ # 综合结果:以修复步骤结果为准
+ result["success"] = bool(fix_res.get("success"))
+ result["message"] = (
+ "SSH 问题处理完成" if is_zh else "SSH issue handling finished"
+ )
+ if not result["success"] and fix_res.get("message"):
+ # 若修复失败,附加更明确的错误说明
+ result["message"] = fix_res["message"]
+
+ result["result"] = steps
+ return result
+
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/config.json
new file mode 100644
index 00000000..1d26e727
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/config.json
@@ -0,0 +1,10 @@
+{
+ "tools": {
+ "ssh_fix_tool": {
+ "zh": "整合的SSH修复工具:解决openEuler系统SSH连接失败问题。\n\n该工具按顺序自动执行以下步骤:\n1. ping SSH端口检查连通性(检测端口22是否可达)\n2. 检查sshd服务状态(执行systemctl status sshd)\n3. 修复/etc/ssh/sshd_config配置文件:\n - 确保Port 22已启用(若不存在则追加,若被注释则解注释并改为22)\n - 设置PermitRootLogin yes(允许root登录)\n - 设置PasswordAuthentication yes(允许密码认证)\n4. 重启sshd服务(执行systemctl restart sshd)\n\n参数:\n - target: 目标主机IP或主机名,None或\"127.0.0.1\"表示本机(必填)\n - port: SSH端口,默认22\n - lang: 语言设置(可选,保留以保持接口一致性)\n\n返回:\n - success: 是否修复成功(True/False)\n - message: 执行结果说明(总体修复结果)\n - target: 目标主机\n - result: 各步骤执行结果列表(包含步骤1-4的详细输出)\n\n使用场景:\n - openEuler 24.03 SP2系统SSH连接失败问题\n - sshd服务未正确配置导致无法连接\n - 需要快速诊断和修复SSH服务配置",
+ "en": "Integrated SSH fix tool: Resolve SSH connection failures on openEuler systems.\n\nThe tool automatically performs the following steps in sequence:\n1. Ping SSH port to check connectivity (check if port 22 is reachable)\n2. Check sshd service status (execute systemctl status sshd)\n3. Fix /etc/ssh/sshd_config configuration:\n - Ensure Port 22 is enabled (add if missing, uncomment and change to 22 if commented)\n - Set PermitRootLogin yes (allow root login)\n - Set PasswordAuthentication yes (allow password authentication)\n4. Restart sshd service (execute systemctl restart sshd)\n\nParameters:\n - target: Target host IP or hostname, None or \"127.0.0.1\" for localhost (required)\n - port: SSH port, default 22\n - lang: Language setting (optional, kept for interface consistency)\n\nReturns:\n - success: Whether the fix was successful (True/False)\n - message: Result description (overall fix result)\n - target: Target host\n - result: List of execution results for each step (detailed output for steps 1-4)\n\nUse cases:\n - SSH connection failures on openEuler 24.03 SP2 systems\n - sshd service misconfiguration preventing connections\n - Quick diagnosis and fix of SSH service configuration"
+ }
+ }
+}
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/deps.toml
new file mode 100644
index 00000000..23eff894
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/deps.toml
@@ -0,0 +1,11 @@
+[system]
+# SSH/sshd 相关系统工具依赖(通常系统已自带,这里仅列出以便需要时安装)
+#ssh_tools = [
+# "yum install -y openssh-clients openssh-server",
+# "apt-get update && apt-get install -y openssh-client openssh-server"
+#]
+
+[pip]
+# Python 依赖(与其他 mcptool 保持一致的基础依赖)
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/tool.py
new file mode 100644
index 00000000..d8a04562
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/ssh_fix_tool/tool.py
@@ -0,0 +1,28 @@
+from typing import Dict, Optional
+
+from config.public.base_config_loader import LanguageEnum
+
+from mcp_tools.base_tools.ssh_fix_tool.base import fix_sshd_issue
+
+
+def ssh_fix_tool(
+ target: Optional[str] = None,
+ port: int = 22,
+ lang: Optional[LanguageEnum] = LanguageEnum.ZH,
+) -> Dict:
+ """
+ 整合的SSH修复工具:解决SSH连接失败问题
+
+ 按顺序执行以下步骤:
+ 1. ping SSH端口检查连通性
+ 2. 检查sshd服务状态(systemctl status sshd)
+ 3. 修复/etc/ssh/sshd_config配置:
+ - 确保Port 22已启用
+ - 设置PermitRootLogin yes
+ - 设置PasswordAuthentication yes
+ 4. 重启sshd服务(systemctl restart sshd)
+ """
+ return fix_sshd_issue(target, port=port)
+
+
+
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/base.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/base.py
new file mode 100644
index 00000000..c7c34450
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/base.py
@@ -0,0 +1,419 @@
+import logging
+import os
+import platform
+import subprocess
+import re
+from enum import Enum
+from typing import Dict, List, Optional
+from config.public.base_config_loader import LanguageEnum, BaseConfig
+
+# 初始化日志
+logger = logging.getLogger("sys_info_tool")
+logger.setLevel(logging.INFO)
+
+# ========== 枚举类定义(不变) ==========
+class InfoTypeEnum(str, Enum):
+ """信息类型枚举(对应需求的3大类)"""
+ # 系统类
+ OS = "os" # 版本/内核
+ LOAD = "load" # 系统负载
+ UPTIME = "uptime" # 运行时间
+ # 硬件类
+ CPU = "cpu" # CPU信息
+ MEM = "mem" # 内存占用
+ DISK = "disk" # 磁盘分区/使用率
+ GPU = "gpu" # 显卡状态
+ NET = "net" # 网卡/IP
+ # 安全类
+ SELINUX = "selinux"# SELinux状态
+ FIREWALL = "firewall" # 防火墙规则
+
+# ========== 通用工具函数(不变) ==========
+def get_language() -> LanguageEnum:
+ """获取全局语言配置"""
+ return BaseConfig().get_config().public_config.language
+
+def is_zh() -> bool:
+ """判断是否为中文环境"""
+ return get_language() == LanguageEnum.ZH
+
+def init_result_dict(
+ target_host: str = "127.0.0.1",
+ result_type: str = "dict"
+) -> Dict:
+ """初始化返回结果字典(result为嵌套字典,key为信息类型)"""
+ return {
+ "success": False,
+ "message": "",
+ "result": {}, # 格式:{"cpu": {...}, "mem": {...}, ...}
+ "target": target_host,
+ "info_types": [] # 记录已采集的信息类型列表
+ }
+
+def parse_info_types(info_type_list: List[str]) -> List[InfoTypeEnum]:
+ """解析字符串列表为InfoTypeEnum列表(适配大模型输入)"""
+ if not info_type_list:
+ valid_values = [e.value for e in InfoTypeEnum]
+ zh_msg = f"信息类型列表不能为空,可选枚举值:{','.join(valid_values)}"
+ en_msg = f"Info type list cannot be empty, optional enum values: {','.join(valid_values)}"
+ raise ValueError(zh_msg if is_zh() else en_msg)
+
+ parsed_enums = []
+ valid_values = [e.value for e in InfoTypeEnum]
+ for info_type_str in info_type_list:
+ try:
+ parsed_enums.append(InfoTypeEnum(info_type_str.strip().lower()))
+ except ValueError:
+ zh_msg = f"无效的信息类型:{info_type_str},可选枚举值:{','.join(valid_values)}"
+ en_msg = f"Invalid info type: {info_type_str}, optional enum values: {','.join(valid_values)}"
+ raise ValueError(zh_msg if is_zh() else en_msg)
+ return parsed_enums
+
+# ========== 系统信息采集核心类(修复命令不存在问题) ==========
+class SystemInfoCollector:
+ """系统/硬件/安全信息采集类(优先Python原生,必要时调用系统命令)"""
+ def __init__(self):
+ self.lang = get_language()
+
+ def _get_msg(self, zh_msg: str, en_msg: str) -> str:
+ """多语言提示"""
+ return zh_msg if self.lang == LanguageEnum.ZH else en_msg
+
+ def _run_cmd_safe(self, cmd: List[str]) -> Optional[str]:
+ """安全执行系统命令(命令不存在/执行失败返回None,不抛出异常)"""
+ # 检查命令是否存在(获取绝对路径)
+ cmd_name = cmd[0]
+ cmd_path = self._find_cmd_absolute_path(cmd_name)
+ if not cmd_path:
+ logger.warning(self._get_msg(f"命令不存在:{cmd_name}", f"Command not found: {cmd_name}"))
+ return None
+
+ # 替换为绝对路径执行
+ cmd[0] = cmd_path
+ try:
+ logger.info(f"执行系统命令:{' '.join(cmd)}")
+ result = subprocess.run(
+ cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ check=True,
+ timeout=10 # 超时保护:10秒
+ )
+ return result.stdout.strip()
+ except subprocess.CalledProcessError as e:
+ error_msg = e.stderr.strip() or e.stdout.strip()
+ logger.error(self._get_msg(f"命令执行失败:{cmd_name},错误:{error_msg}", f"Command failed: {cmd_name}, error: {error_msg}"))
+ return None
+ except subprocess.TimeoutExpired:
+ logger.error(self._get_msg(f"命令执行超时:{cmd_name}", f"Command timeout: {cmd_name}"))
+ return None
+ except Exception as e:
+ logger.error(self._get_msg(f"系统调用异常:{cmd_name},错误:{str(e)}", f"System call exception: {cmd_name}, error: {str(e)}"))
+ return None
+
+ def _find_cmd_absolute_path(self, cmd: str) -> Optional[str]:
+ """辅助函数:查找命令的绝对路径(兼容特殊情况)"""
+ common_paths = ["/usr/bin", "/bin", "/usr/sbin", "/sbin", "/usr/local/bin"]
+ for path in common_paths:
+ cmd_path = os.path.join(path, cmd)
+ if os.path.exists(cmd_path) and os.access(cmd_path, os.X_OK):
+ return cmd_path
+ return None
+
+ # ---------- 系统类信息 ----------
+ def get_os_info(self) -> Dict:
+ """获取系统版本/内核信息(无命令依赖,不变)"""
+ os_info = {
+ "system": platform.system(),
+ "release": platform.release(),
+ "version": platform.version(),
+ "kernel": platform.uname().release,
+ "architecture": platform.machine()
+ }
+ # 补充OpenEuler版本(通过/etc/os-release)
+ if os.path.exists("/etc/os-release"):
+ try:
+ with open("/etc/os-release", "r", encoding="utf-8") as f:
+ for line in f:
+ if line.startswith("PRETTY_NAME="):
+ os_info["pretty_name"] = line.strip().split("=")[1].strip('"')
+ elif line.startswith("VERSION_ID="):
+ os_info["version_id"] = line.strip().split("=")[1].strip('"')
+ except Exception as e:
+ logger.error(self._get_msg(f"读取os-release失败:{str(e)}", f"Failed to read os-release: {str(e)}"))
+ return os_info
+
+ def get_load_info(self) -> Dict:
+ """获取系统负载(无命令依赖,不变)"""
+ try:
+ load_avg = os.getloadavg()
+ return {
+ "1min": load_avg[0],
+ "5min": load_avg[1],
+ "15min": load_avg[2],
+ "cpu_count": os.cpu_count() or 0
+ }
+ except Exception as e:
+ logger.error(self._get_msg(f"获取系统负载失败:{str(e)}", f"Failed to get load avg: {str(e)}"))
+ return {"error": self._get_msg("获取系统负载失败", "Failed to get load average")}
+
+ def get_uptime_info(self) -> Dict:
+ """获取系统运行时间(修复uptime命令不存在问题)"""
+ uptime_output = self._run_cmd_safe(["uptime", "-p"])
+ if uptime_output:
+ return {"uptime": uptime_output}
+ else:
+ # 命令不存在时,通过/proc/uptime计算(Python原生方式)
+ try:
+ with open("/proc/uptime", "r", encoding="utf-8") as f:
+ total_seconds = float(f.readline().split()[0])
+ hours = int(total_seconds // 3600)
+ minutes = int((total_seconds % 3600) // 60)
+ uptime_str = self._get_msg(f"运行 {hours} 小时 {minutes} 分钟", f"up {hours} hours, {minutes} minutes")
+ return {"uptime": uptime_str, "note": self._get_msg("基于/proc/uptime计算", "Calculated from /proc/uptime")}
+ except Exception as e:
+ logger.error(self._get_msg(f"获取运行时间失败:{str(e)}", f"Failed to get uptime: {str(e)}"))
+ return {"error": self._get_msg("获取运行时间失败", "Failed to get uptime")}
+
+ # ---------- 硬件类信息 ----------
+ def get_cpu_info(self) -> Dict:
+ """获取CPU型号/核心信息(无命令依赖,不变)"""
+ cpu_info = {}
+ try:
+ # 通过/proc/cpuinfo获取
+ if os.path.exists("/proc/cpuinfo"):
+ with open("/proc/cpuinfo", "r", encoding="utf-8") as f:
+ for line in f:
+ if line.startswith("model name"):
+ cpu_info["model"] = line.strip().split(":")[1].strip()
+ break
+ # 获取CPU核心数
+ cpu_info["physical_cores"] = os.cpu_count() or 0
+ return cpu_info
+ except Exception as e:
+ logger.error(self._get_msg(f"获取CPU信息失败:{str(e)}", f"Failed to get CPU info: {str(e)}"))
+ return {"error": self._get_msg("获取CPU信息失败", "Failed to get CPU information")}
+
+ def get_mem_info(self) -> Dict:
+ """获取内存占用信息(无命令依赖,不变)"""
+ mem_info = {}
+ try:
+ if os.path.exists("/proc/meminfo"):
+ with open("/proc/meminfo", "r", encoding="utf-8") as f:
+ for line in f:
+ if line.startswith("MemTotal:"):
+ mem_info["total_mb"] = int(line.strip().split()[1]) // 1024
+ elif line.startswith("MemFree:"):
+ mem_info["free_mb"] = int(line.strip().split()[1]) // 1024
+ elif line.startswith("MemAvailable:"):
+ mem_info["available_mb"] = int(line.strip().split()[1]) // 1024
+ if "total_mb" in mem_info and "free_mb" in mem_info:
+ mem_info["used_mb"] = mem_info["total_mb"] - mem_info["free_mb"]
+ mem_info["used_percent"] = round(mem_info["used_mb"] / mem_info["total_mb"] * 100, 2)
+ return mem_info
+ except Exception as e:
+ logger.error(self._get_msg(f"获取内存信息失败:{str(e)}", f"Failed to get memory info: {str(e)}"))
+ return {"error": self._get_msg("获取内存信息失败", "Failed to get memory information")}
+
+ def get_disk_info(self) -> List[Dict]:
+ """获取磁盘分区/使用率信息(优化命令检查)"""
+ disk_list = []
+ # 直接指定 df 绝对路径(OpenEuler 99% 情况下的路径)
+ df_cmd = self._find_cmd_absolute_path("df")
+ if not df_cmd:
+ logger.error(self._get_msg("未找到df命令,无法获取磁盘信息", "df command not found, cannot get disk info"))
+ return [{"error": self._get_msg("未找到df命令,请安装coreutils包", "df command not found, please install coreutils")}]
+
+ try:
+ # 用绝对路径执行 df 命令(避免 PATH 问题)
+ disk_output = self._run_cmd_safe([df_cmd, "-h", "-T"])
+
+ if not disk_output:
+ return [{"error": self._get_msg("df命令执行失败,无法获取磁盘信息", "df command execution failed")}]
+
+ # 解析输出
+ lines = [line.strip() for line in disk_output.splitlines() if line.strip()]
+ if len(lines) < 2:
+ logger.warning(self._get_msg("df未返回有效磁盘信息", "df returned no valid disk info"))
+ return [{"error": self._get_msg("df未返回有效磁盘信息", "df returned no valid disk information")}]
+
+ for line in lines[1:]:
+ parts = re.split(r"\s+", line, maxsplit=6)
+ if len(parts) != 7:
+ logger.debug(f"跳过无效行:{line}")
+ continue
+ used_percent = parts[5].strip("%") if "%" in parts[5] else parts[5]
+ disk_list.append({
+ "device": parts[0],
+ "fstype": parts[1],
+ "size": parts[2],
+ "used": parts[3],
+ "avail": parts[4],
+ "used_percent": used_percent,
+ "mountpoint": parts[6]
+ })
+ return disk_list if disk_list else [{"error": self._get_msg("未检测到有效磁盘分区", "No valid disk partitions detected")}]
+ except Exception as e:
+ logger.error(self._get_msg(f"获取磁盘信息失败:{str(e)}", f"Failed to get disk info: {str(e)}"))
+ return [{"error": self._get_msg("获取磁盘信息失败", "Failed to get disk information")}]
+
+ def get_gpu_info(self) -> List[Dict]:
+ """获取显卡状态(修复nvidia-smi命令不存在问题)"""
+ gpu_list = []
+ # 检查nvidia-smi是否存在
+ nvidia_smi_path = self._find_cmd_absolute_path("nvidia-smi")
+ if not nvidia_smi_path:
+ logger.warning(self._get_msg("未检测到nvidia-smi命令,跳过GPU信息采集", "nvidia-smi not found, skip GPU info collection"))
+ return [{"note": self._get_msg("未检测到nvidia-smi命令(无NVIDIA显卡或未安装驱动)", "nvidia-smi not found (no NVIDIA GPU or driver)")}]
+
+ # 执行命令
+ gpu_output = self._run_cmd_safe([
+ nvidia_smi_path,
+ "--query-gpu=name,memory.total,memory.used,utilization.gpu",
+ "--format=csv,noheader,nounits"
+ ])
+ if gpu_output:
+ for line in gpu_output.splitlines():
+ if not line.strip():
+ continue
+ parts = line.strip().split(", ")
+ if len(parts) != 4:
+ logger.debug(f"跳过无效GPU行:{line}")
+ continue
+ name, mem_total, mem_used, util = parts
+ gpu_list.append({
+ "name": name.strip(),
+ "memory_total_mb": int(mem_total) if mem_total.isdigit() else 0,
+ "memory_used_mb": int(mem_used) if mem_used.isdigit() else 0,
+ "utilization_percent": int(util) if util.isdigit() else 0
+ })
+ return gpu_list if gpu_list else [{"note": self._get_msg("未检测到可用GPU", "No available GPU detected")}]
+ else:
+ return [{"error": self._get_msg("GPU信息采集失败", "Failed to collect GPU information")}]
+
+ def get_net_info(self) -> List[Dict]:
+ """获取网卡/IP信息(修复ip命令不存在问题)"""
+ # 检查ip命令是否存在
+ ip_cmd_path = self._find_cmd_absolute_path("ip")
+ if not ip_cmd_path:
+ logger.warning(self._get_msg("未找到ip命令,尝试通过/proc/net/dev获取网卡信息", "ip command not found, try to get net info from /proc/net/dev"))
+ # 降级方案:通过/proc/net/dev获取网卡名称(无IP信息)
+ net_list = []
+ try:
+ with open("/proc/net/dev", "r", encoding="utf-8") as f:
+ lines = [line.strip() for line in f.readlines() if line.strip() and not line.startswith("Inter-|") and not line.startswith(" face |")]
+ for line in lines:
+ if ":" in line:
+ iface = line.split(":")[0].strip()
+ net_list.append({
+ "interface": iface,
+ "ips": [{"note": self._get_msg("ip命令不存在,无法获取IP信息", "ip command not found, cannot get IP info")}]
+ })
+ return net_list
+ except Exception as e:
+ logger.error(self._get_msg(f"获取网络信息失败:{str(e)}", f"Failed to get net info: {str(e)}"))
+ return [{"error": self._get_msg("获取网络信息失败(ip命令不存在)", "Failed to get network information (ip command not found)")}]
+
+ # ip命令存在,正常采集
+ net_output = self._run_cmd_safe([ip_cmd_path, "addr", "show"])
+ if not net_output:
+ return [{"error": self._get_msg("ip命令执行失败,无法获取网络信息", "ip command execution failed")}]
+
+ net_list = []
+ current_iface = None
+ for line in net_output.splitlines():
+ line = line.strip()
+ if line.startswith(("1:", "2:", "3:", "4:", "5:")): # 网卡名称行
+ current_iface = line.split(":")[1].strip()
+ net_list.append({"interface": current_iface, "ips": []})
+ elif current_iface and line.startswith("inet "): # IPv4地址
+ ip_part = line.split()[1]
+ net_list[-1]["ips"].append({"type": "ipv4", "address": ip_part})
+ elif current_iface and line.startswith("inet6 "): # IPv6地址
+ ip_part = line.split()[1]
+ net_list[-1]["ips"].append({"type": "ipv6", "address": ip_part})
+ return net_list if net_list else [{"error": self._get_msg("未检测到有效网卡信息", "No valid network interface detected")}]
+
+ # ---------- 安全类信息 ----------
+ def get_selinux_info(self) -> Dict:
+ """获取SELinux状态(修复getenforce命令不存在问题)"""
+ # 检查getenforce命令是否存在
+ getenforce_path = self._find_cmd_absolute_path("getenforce")
+ if getenforce_path:
+ selinux_output = self._run_cmd_safe([getenforce_path])
+ if selinux_output:
+ return {"status": selinux_output.strip()}
+
+ # 命令不存在时,通过配置文件判断
+ logger.warning(self._get_msg("未找到getenforce命令,尝试通过配置文件判断SELinux状态", "getenforce not found, try to judge SELinux status from config"))
+ try:
+ with open("/etc/selinux/config", "r", encoding="utf-8") as f:
+ for line in f:
+ line = line.strip()
+ if line.startswith("SELINUX=") and not line.startswith("#"):
+ selinux_mode = line.split("=")[1].strip()
+ return {
+ "status": selinux_mode.upper(),
+ "note": self._get_msg("基于/etc/selinux/config配置判断", "Judged from /etc/selinux/config")
+ }
+ return {"status": self._get_msg("未知", "Unknown"), "note": self._get_msg("未找到SELinux配置", "SELinux config not found")}
+ except Exception as e:
+ logger.error(self._get_msg(f"获取SELinux状态失败:{str(e)}", f"Failed to get SELinux status: {str(e)}"))
+ return {"error": self._get_msg("获取SELinux状态失败", "Failed to get SELinux status")}
+
+ def get_firewall_info(self) -> Dict:
+ """获取防火墙规则(修复firewalld相关命令不存在问题)"""
+ firewall_info = {}
+ # 检查systemctl命令是否存在
+ systemctl_path = self._find_cmd_absolute_path("systemctl")
+ if not systemctl_path:
+ logger.warning(self._get_msg("未找到systemctl命令,无法检查防火墙状态", "systemctl not found, cannot check firewall status"))
+ firewall_info["status"] = self._get_msg("未知(无systemctl)", "Unknown (systemctl not found)")
+ return firewall_info
+
+ # 检查firewalld状态
+ status_output = self._run_cmd_safe([systemctl_path, "is-active", "firewalld"])
+ if status_output == "active":
+ firewall_info["status"] = "active"
+ # 检查firewall-cmd命令是否存在
+ firewall_cmd_path = self._find_cmd_absolute_path("firewall-cmd")
+ if firewall_cmd_path:
+ ports_output = self._run_cmd_safe([firewall_cmd_path, "--list-ports"])
+ firewall_info["open_ports"] = ports_output.strip().split() if ports_output and ports_output.strip() else []
+ else:
+ firewall_info["open_ports"] = [self._get_msg("firewall-cmd命令不存在,无法获取开放端口", "firewall-cmd not found, cannot get open ports")]
+ elif status_output == "inactive":
+ firewall_info["status"] = "inactive"
+ else:
+ # 未安装firewalld或命令执行失败
+ firewall_info["status"] = self._get_msg("未安装firewalld或状态未知", "firewalld not installed or status unknown")
+ return firewall_info
+
+ # ---------- 批量采集多类型信息 ----------
+ def collect_batch(self, info_types: List[InfoTypeEnum]) -> Dict[str, Dict]:
+ """
+ 批量采集多类信息
+ :param info_types: InfoTypeEnum列表
+ :return: 结构化结果(key为信息类型字符串,value为对应采集结果)
+ """
+ batch_result = {}
+ info_type_map = {
+ InfoTypeEnum.OS: self.get_os_info,
+ InfoTypeEnum.LOAD: self.get_load_info,
+ InfoTypeEnum.UPTIME: self.get_uptime_info,
+ InfoTypeEnum.CPU: self.get_cpu_info,
+ InfoTypeEnum.MEM: self.get_mem_info,
+ InfoTypeEnum.DISK: self.get_disk_info,
+ InfoTypeEnum.GPU: self.get_gpu_info,
+ InfoTypeEnum.NET: self.get_net_info,
+ InfoTypeEnum.SELINUX: self.get_selinux_info,
+ InfoTypeEnum.FIREWALL: self.get_firewall_info
+ }
+ for info_type in info_types:
+ try:
+ batch_result[info_type.value] = info_type_map[info_type]()
+ except Exception as e:
+ logger.error(self._get_msg(f"采集{info_type.value}信息失败:{str(e)}", f"Failed to collect {info_type.value} info: {str(e)}"))
+ batch_result[info_type.value] = {"error": self._get_msg(f"采集{info_type.value}信息失败", f"Failed to collect {info_type.value} information")}
+ return batch_result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/config.json b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/config.json
new file mode 100644
index 00000000..56df6d52
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/config.json
@@ -0,0 +1,8 @@
+{
+ "tools": {
+ "sys_info_tool": {
+ "zh": "【系统/硬件/安全信息采集工具】\n功能:批量采集OpenEuler系统的系统、硬件、安全类信息(纯Python+系统命令安全调用)\n\n【核心提示】\n1. 信息类型(info_types)为枚举值列表,支持多选,可选枚举值如下:\n - 系统类:os(版本/内核)、load(负载)、uptime(运行时间)\n - 硬件类:cpu(型号/核心)、mem(内存占用)、disk(分区/使用率)、gpu(显卡状态)、net(网卡/IP)\n - 安全类:selinux(状态)、firewall(防火墙规则)\n2. 输入格式示例:[\"cpu\", \"mem\", \"disk\"](必须为列表,单个类型也需用列表包裹,如[\"os\"]);\n3. 无额外参数,仅需传入info_types列表即可批量采集对应信息;\n4. 语言配置:自动读取系统全局配置(中文/英文),不提供外部设置接口;\n5. 依赖说明:GPU信息采集需安装nvidia-smi(仅支持NVIDIA显卡),防火墙信息需安装firewalld。\n\n【枚举类定义(必须遵守)】\n- InfoTypeEnum(信息类型枚举):os / load / uptime / cpu / mem / disk / gpu / net / selinux / firewall\n\n【参数详情】\n- info_types:信息类型列表(必填,枚举值见上方,支持多选,格式为列表)\n\n【返回值说明】\n- success:采集结果(True=成功,False=失败)\n- message:采集信息/错误提示(根据全局语言配置自动切换)\n- result:采集结果(结构化嵌套字典,key为信息类型,value为对应采集数据)\n- target:执行目标(固定为127.0.0.1,本地执行)\n- info_types:已采集的信息类型列表",
+ "en": "【System/Hardware/Security Info Collector】\nFunction: Batch collect system, hardware, security info of OpenEuler (Python native + safe system command call)\n\n【Core Guidelines】\n1. Info types (info_types) is an enum value list, supports multiple selection, optional enum values:\n - System: os (version/kernel), load (system load), uptime (system uptime)\n - Hardware: cpu (model/core), mem (memory usage), disk (partition/usage), gpu (GPU status), net (network card/IP)\n - Security: selinux (SELinux status), firewall (firewall rules)\n2. Input format example: [\"cpu\", \"mem\", \"disk\"] (must be a list, single type also needs to be wrapped in a list, e.g. [\"os\"]);\n3. No additional parameters, just pass info_types list to batch collect corresponding info;\n4. Language configuration: Automatically read global system configuration (Chinese/English), no external setting interface;\n5. Dependency note: GPU info requires nvidia-smi (NVIDIA GPU only), firewall info requires firewalld.\n\n【Enum Class Definition (Must Follow)】\n- InfoTypeEnum (Info Type Enum): os / load / uptime / cpu / mem / disk / gpu / net / selinux / firewall\n\n【Parameter Details】\n- info_types: Info type list (required, enum values see above, supports multiple selection, format is list)\n\n【Return Value Explanation】\n- success: Collection result (True=success, False=failure)\n- message: Collection info/error prompt (automatically switch based on global config)\n- result: Collected info (structured nested dict, key is info type, value is corresponding collected data)\n- target: Execution target (fixed as 127.0.0.1, local execution)\n- info_types: Collected info type list"
+ }
+ }
+}
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/deps.toml b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/deps.toml
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/tool.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/tool.py
new file mode 100644
index 00000000..626027ff
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/base_tools/sys_info_tool/tool.py
@@ -0,0 +1,50 @@
+from typing import Dict, List
+from pydantic import Field
+
+from servers.oe_cli_mcp_server.mcp_tools.base_tools.sys_info_tool.base import (
+ init_result_dict,
+ SystemInfoCollector,
+ InfoTypeEnum,
+ parse_info_types,
+ is_zh,
+ logger
+)
+
+def sys_info_tool(
+ info_types: List[str] = Field(
+ ...,
+ description="信息类型列表(枚举值可选:os/load/uptime/cpu/mem/disk/gpu/net/selinux/firewall,支持多选)"
+ )
+) -> Dict:
+ """
+ 系统/硬件/安全信息采集工具(支持批量采集多类信息)
+ 语言配置从全局BaseConfig读取,不提供外部接口
+ """
+ # 初始化结果字典
+ result = init_result_dict()
+ collector = SystemInfoCollector()
+ use_zh = is_zh()
+
+ # 解析信息类型列表(字符串列表→枚举列表)
+ try:
+ parsed_info_types = parse_info_types(info_types)
+ result["info_types"] = [t.value for t in parsed_info_types] # 记录原始输入的类型列表
+ except ValueError as e:
+ result["message"] = str(e)
+ return result
+
+ # 批量采集信息
+ try:
+ batch_result = collector.collect_batch(parsed_info_types)
+ result["result"] = batch_result
+ result["success"] = True
+
+ # 生成提示信息
+ info_type_str = ",".join(result["info_types"])
+ result["message"] = f"以下信息采集完成:{info_type_str}" if use_zh else f"Collected info types: {info_type_str}"
+
+ except Exception as e:
+ result["message"] = f"信息采集失败:{str(e)}" if use_zh else f"Info collection failed: {str(e)}"
+ logger.error(f"System info batch collection error: {str(e)}")
+
+ return result
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/mcp_tools/tool_type.py b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/tool_type.py
new file mode 100644
index 00000000..cafb249c
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/mcp_tools/tool_type.py
@@ -0,0 +1,10 @@
+from enum import Enum
+
+
+class ToolType(Enum):
+ BASE = "base_tools"
+ PERSONAL = "personal_tools"
+ AI = "AI_tools"
+ MIRROR = "mirror_tools"
+ CAL = "cal_tools"
+ RAG = "rag_tools"
diff --git a/mcp_center/servers/oe_cli_mcp_server/run.sh b/mcp_center/servers/oe_cli_mcp_server/run.sh
new file mode 100755
index 00000000..6b1bc659
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/run.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+set -e
+
+# 关键路径(只改这里就行)
+VENV_PATH="/usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/venv/global"
+
+
+# 新增1:安装创建虚拟环境的必需工具(解决隐性创建失败)
+echo "=== 安装 python3-venv ==="
+yum install -y python3-venv --skip-broken >/dev/null 2>&1
+
+# 新增2:创建虚拟环境父目录(避免二级目录创建失败)
+mkdir -p $(dirname "$VENV_PATH") >/dev/null 2>&1
+
+# 1. 没有虚拟环境就创建(新增:--system-site-packages 继承系统 RPM 依赖)
+if [ ! -d "$VENV_PATH" ] || [ ! -f "$VENV_PATH/bin/activate" ]; then # 新增:检查环境完整性
+ echo "=== 未找到虚拟环境或环境不完整,创建并继承系统 RPM 依赖 ==="
+ rm -rf "$VENV_PATH" # 删除损坏目录
+ python3 -m venv "$VENV_PATH" --system-site-packages
+ chmod -R 755 "$VENV_PATH" # 新增:赋予执行权限
+ echo "虚拟环境创建成功:$VENV_PATH"
+else
+ echo "=== 虚拟环境已存在且完整:$VENV_PATH ==="
+fi
+
+# 2. 激活虚拟环境
+source "$VENV_PATH/bin/activate"
+echo "=== 虚拟环境激活成功:$VIRTUAL_ENV ==="
+
+
+# 3. 部署systemd服务
+cp /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/mcp-server.service /etc/systemd/system/
+# 新增:替换服务文件中的 Python 路径(确保与虚拟环境一致)
+sed -i "s|ExecStart=.*python|ExecStart=$VENV_PATH/bin/python|" /etc/systemd/system/mcp-server.service
+systemctl daemon-reload
+systemctl enable mcp-server --now
+
+# 4. 全局命令链接
+chmod +x /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py
+rm -f /usr/local/bin/mcp-server
+ln -s /usr/lib/euler-copilot-framework/mcp_center/servers/oe_cli_mcp_server/mcp_server/cli.py /usr/local/bin/mcp-server
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/server.py b/mcp_center/servers/oe_cli_mcp_server/server.py
new file mode 100644
index 00000000..61c33a45
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/server.py
@@ -0,0 +1,15 @@
+# server.py
+import sys
+from pathlib import Path
+mcp_center_dir = Path(__file__).parent.parent.parent # 关键:根据目录层级调整
+sys.path.append(str(mcp_center_dir))
+from config.private.mcp_server.config_loader import McpServerConfig
+from servers.oe_cli_mcp_server.mcp_server.mcp_manager import McpServer
+
+config = McpServerConfig().get_config().private_config
+
+
+
+if __name__ == "__main__":
+ server = McpServer("mcp实例", host="0.0.0.0", port=config.port)
+ server.start()
diff --git a/mcp_center/servers/oe_cli_mcp_server/setup.py b/mcp_center/servers/oe_cli_mcp_server/setup.py
new file mode 100644
index 00000000..59149dfe
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/setup.py
@@ -0,0 +1,14 @@
+from setuptools import setup, find_packages
+
+setup(
+ name="mcp-server", # 命令名
+ version="0.1.0",
+ packages=find_packages(), # 自动找到 mcp_server 模块
+ entry_points={
+ "console_scripts": [
+ # 配置命令:mcp-server → 指向 cli.py 的 main 函数
+ "mcp-server = mcp_server.cli.cli:main",
+ ],
+ },
+ python_requires=">=3.9", # 你的 Python 版本要求
+)
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/get_project_root.py b/mcp_center/servers/oe_cli_mcp_server/util/get_project_root.py
new file mode 100644
index 00000000..f5e68f58
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/get_project_root.py
@@ -0,0 +1,20 @@
+import os
+
+def get_project_root() -> str:
+ """
+ 基于当前文件路径,向上查找项目根目录(默认找 .git 文件夹,可修改标志文件)
+ :return: 项目根目录的绝对路径
+ """
+ # 获取当前脚本的绝对路径(__file__ 是当前文件的相对路径,abspath 转为绝对路径)
+ current_path = os.path.abspath(__file__)
+
+ # 向上递归查找,直到找到包含 .git 的目录(即项目根目录)
+ while not os.path.exists(os.path.join(current_path, "mcp_server")):
+ # 向上跳一级目录
+ parent_path = os.path.dirname(current_path)
+ # 防止递归到系统根目录(如 / 或 C:\)
+ if parent_path == current_path:
+ raise FileNotFoundError("未找到项目根目录(未发现 mcp_server 文件夹)")
+ current_path = parent_path
+
+ return current_path
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/get_tool_state_path.py b/mcp_center/servers/oe_cli_mcp_server/util/get_tool_state_path.py
new file mode 100644
index 00000000..b8d782b7
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/get_tool_state_path.py
@@ -0,0 +1,12 @@
+import os
+
+from servers.oe_cli_mcp_server.util.get_project_root import get_project_root
+
+
+def get_tool_state_path() -> str:
+ """获取Tool状态持久化文件路径"""
+ root = get_project_root()
+ state_file = os.path.join(root, "data", "tool_state.json")
+ # 确保目录存在
+ os.makedirs(os.path.dirname(state_file), exist_ok=True)
+ return state_file
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/get_type.py b/mcp_center/servers/oe_cli_mcp_server/util/get_type.py
new file mode 100644
index 00000000..0744eb0f
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/get_type.py
@@ -0,0 +1,14 @@
+from mcp_tools.tool_type import ToolType
+
+
+def get_type(package : str):
+ type_map = {
+ "base_tools" : ToolType.BASE,
+ "personal_tools" : ToolType.BASE,
+ "AI_tools" : ToolType.BASE,
+ "mirror_tools" : ToolType.BASE,
+ "cal_tools" : ToolType.BASE
+ }
+ if not package in type_map:
+ return ToolType.BASE
+ return type_map[package]
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/test_llm_valid.py b/mcp_center/servers/oe_cli_mcp_server/util/test_llm_valid.py
new file mode 100644
index 00000000..0d04fba4
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/test_llm_valid.py
@@ -0,0 +1,35 @@
+import requests
+
+def is_llm_config_valid(API_URL: str, API_KEY: str = "", MODEL_NAME: str = "") -> bool:
+ """
+ 极简验证大模型配置是否通畅
+ :param API_URL: 模型 API 地址
+ :param API_KEY: 可选 API Key
+ :param MODEL_NAME: 模型名
+ :return: True=通畅,False=不通
+ """
+ try:
+ # 极简请求体(满足接口最基本要求)
+ payload = {
+ "model": MODEL_NAME,
+ "messages": [{"role": "user", "content": "hi"}], # 最短输入
+ "max_tokens": 10, # 最少生成字数,加快速度
+ "temperature": 0.0
+ }
+ # 请求头
+ headers = {"Content-Type": "application/json"}
+ if API_KEY:
+ headers["Authorization"] = f"Bearer {API_KEY}"
+ # 发送请求(超时 5 秒,快速失败)
+ response = requests.post(
+ API_URL,
+ json=payload,
+ headers=headers,
+ timeout=5,
+ verify=False # 忽略 SSL 证书校验(可选,简化验证)
+ )
+ # 只要状态码 2xx,且返回有 choices,就认为通
+ return response.status_code == 200 and "choices" in response.json()
+ except:
+ # 任何异常都视为“不通”
+ return False
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/tool_package_file_check.py b/mcp_center/servers/oe_cli_mcp_server/util/tool_package_file_check.py
new file mode 100644
index 00000000..526bb6f7
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/tool_package_file_check.py
@@ -0,0 +1,17 @@
+import os
+
+
+def tool_package_file_check(path: str) -> bool:
+ """
+ 检查工具包文件是否存在
+ :param path: 工具包文件路径
+ :return: 是否存在
+ """
+ config_path = os.path.join(path, "config.json")
+ tool_path = os.path.join(path, "tool.py")
+ if not os.path.exists(config_path):
+ return False
+ if not os.path.exists(tool_path):
+ return False
+ return True
+
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/venv_util.py b/mcp_center/servers/oe_cli_mcp_server/util/venv_util.py
new file mode 100644
index 00000000..65169950
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/venv_util.py
@@ -0,0 +1,66 @@
+# util/venv_util.py(适配 openEuler 系统,仅保留 yum 逻辑)
+import logging
+import os
+import subprocess
+import toml
+
+def get_current_venv_pip() -> str:
+ """
+ 获取当前激活的mcp虚拟环境的pip路径(仅适配 openEuler/Linux 系统)
+ :return: pip可执行文件的绝对路径
+ :raises Exception: 未激活虚拟环境时抛出异常
+ """
+
+ # 逻辑:通过VIRTUAL_ENV环境变量定位虚拟环境(激活后自动生成该变量)
+ venv_path = os.getenv("VIRTUAL_ENV")
+ if not venv_path:
+ raise Exception("未激活mcp虚拟环境,请先执行 source ./venv/global/bin/activate(文档2-142节)")
+
+ return os.path.join(venv_path, "bin", "pip")
+
+def execute_simple_deps_script(deps_script_path: str):
+ """
+ 执行简化版deps.toml脚本
+ 安装逻辑:先装系统依赖(yum),再装Python依赖(当前虚拟环境pip)
+ :param deps_script_path: 简化版deps.toml的路径
+ :raises FileNotFoundError: 依赖脚本不存在时抛出异常
+ :raises subprocess.CalledProcessError: 依赖安装命令执行失败时抛出异常
+ """
+
+ # 1. 读取deps.toml内容
+ if not os.path.exists(deps_script_path):
+ raise FileNotFoundError(f"依赖脚本不存在:{deps_script_path}")
+ with open(deps_script_path, "r", encoding="utf-8") as f:
+ deps_data = toml.load(f)
+
+ # 2. 安装系统依赖
+ system_deps = deps_data.get("system_deps", {})
+ if system_deps:
+ logging.info("=== 开始安装系统依赖(openEuler yum)===")
+ for dep_name, yum_cmd in system_deps.items():
+ # 检查依赖是否已安装(通过 --version 或专用命令验证)
+ verify_cmd = f"{dep_name} --version" if dep_name != "docker" else "docker --version"
+ # 静默执行验证命令,返回码为0表示已安装
+ if subprocess.run(verify_cmd, shell=True, capture_output=True, text=True).returncode == 0:
+ logging.info(f"系统依赖[{dep_name}]已安装,跳过")
+ continue
+
+ # 执行 yum 安装命令(openEuler 专用)
+ logging.info(f"正在安装系统依赖[{dep_name}]:{yum_cmd}")
+ # check=True 确保命令失败时抛异常,便于上层捕获
+ subprocess.run(yum_cmd, shell=True, check=True, text=True)
+ logging.info(f"系统依赖[{dep_name}]安装完成\n")
+
+ # 3. 安装Python依赖
+ pip_deps = deps_data.get("pip_deps", {})
+ if pip_deps:
+ logging.info("=== 开始安装Python依赖(当前虚拟环境)===")
+ pip_path = get_current_venv_pip()
+ for dep_name, version in pip_deps.items():
+ # 构造pip安装命令
+ install_cmd = [pip_path, "install", "-q", f"{dep_name}{version}"] # -q 静默安装,减少输出
+ logging.info(f"正在安装Python依赖[{dep_name}]:{' '.join(install_cmd)}")
+ subprocess.run(install_cmd, check=True, text=True)
+ logging.info(f"Python依赖[{dep_name}]安装完成\n")
+
+ logging.info(f"所有依赖安装完成(依赖脚本:{deps_script_path})")
\ No newline at end of file
diff --git a/mcp_center/servers/oe_cli_mcp_server/util/zip_tool_util.py b/mcp_center/servers/oe_cli_mcp_server/util/zip_tool_util.py
new file mode 100644
index 00000000..b00f9c3e
--- /dev/null
+++ b/mcp_center/servers/oe_cli_mcp_server/util/zip_tool_util.py
@@ -0,0 +1,118 @@
+import os
+import zipfile
+import logging
+from typing import Optional
+import shutil
+
+from servers.oe_cli_mcp_server.util.get_project_root import get_project_root
+
+# 全局目标目录(转为绝对路径,避免相对路径混乱)
+target_dir = os.path.join(get_project_root(),"mcp_tools/personal_tools/")
+
+def clean_zip_extract_dir(dir_path: str) -> None:
+ """清理指定目录(递归删除,确保无残留)"""
+ if not os.path.exists(dir_path):
+ return
+ try:
+ shutil.rmtree(dir_path)
+ logging.info(f"已清理旧目录:{dir_path}")
+ except Exception as e:
+ logging.error(f"清理目录 {dir_path} 失败:{e}")
+ raise # 清理失败终止解压,避免残留干扰
+
+def unzip_tool(zip_path: str, extract_to: Optional[str] = None) -> bool:
+ """
+ 解压工具包到指定目录(仅一层路径,无嵌套)
+ :param zip_path: 工具包zip文件路径(相对/绝对路径)
+ :param extract_to: 解压目标目录(可选),默认 target_dir
+ :return: 解压是否成功
+ """
+ # 1. 基础校验:ZIP文件是否存在
+ if not os.path.exists(zip_path):
+ logging.error(f"错误:工具包文件 {zip_path} 不存在。")
+ return False
+
+ # 2. 确定最终解压根目录(默认 target_dir,支持自定义)
+ extract_root_dir = os.path.abspath(extract_to) if extract_to else target_dir
+
+ # 3. 提取ZIP文件名(无后缀),作为最终的「一层路径」名称
+ zip_filename = os.path.basename(zip_path)
+ final_extract_dir = os.path.join(extract_root_dir, os.path.splitext(zip_filename)[0])
+
+ # 4. 清理旧目录(若已存在,避免文件冲突)
+ if os.path.exists(final_extract_dir):
+ clean_zip_extract_dir(final_extract_dir)
+
+ # 5. 创建最终解压目录(确保父目录存在)
+ try:
+ os.makedirs(final_extract_dir, exist_ok=True)
+ logging.info(f"已创建解压目录:{final_extract_dir}")
+ except PermissionError as e:
+ logging.error(f"错误:创建目录 {final_extract_dir} 权限不足:{e}")
+ return False
+ except Exception as e:
+ logging.error(f"错误:创建目录 {final_extract_dir} 失败:{e}")
+ return False
+
+ # 6. 核心逻辑:解压并跳过ZIP内部顶层目录,直接提取内容到最终目录
+ try:
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
+ # 获取ZIP内部所有文件的路径
+ zip_file_list = zip_ref.namelist()
+ if not zip_file_list:
+ logging.error(f"错误:ZIP文件 {zip_path} 为空。")
+ clean_zip_extract_dir(final_extract_dir)
+ return False
+
+ # 找到ZIP内部的顶层目录(假设压缩时是「文件夹+内容」,顶层目录唯一)
+ # 例如:ZIP内部路径是「custom_gpu_tool/xxx.py」,顶层目录就是「custom_gpu_tool/」
+ top_level_dirs = set()
+ for file_path in zip_file_list:
+ # 分割路径,取第一级目录(如「custom_gpu_tool/xxx.py」→ 「custom_gpu_tool」)
+ top_level = file_path.split(os.sep)[0]
+ if top_level: # 排除空路径(避免异常)
+ top_level_dirs.add(top_level)
+
+ # 处理两种情况:ZIP内部有顶层目录 / 无顶层目录(直接是文件)
+ if len(top_level_dirs) == 1:
+ # 情况1:有唯一顶层目录(大多数Linux zip压缩的情况)
+ top_dir = next(iter(top_level_dirs)) + os.sep # 拼接路径分隔符(如「custom_gpu_tool/」)
+ for file_path in zip_file_list:
+ # 跳过顶层目录本身(只提取其下内容)
+ if file_path == top_dir:
+ continue
+ # 构建目标路径:去掉顶层目录前缀,直接放到 final_extract_dir 下
+ target_file_path = os.path.join(final_extract_dir, file_path[len(top_dir):])
+ # 创建目标文件的父目录(避免因子目录不存在报错)
+ os.makedirs(os.path.dirname(target_file_path), exist_ok=True)
+ # 提取文件并写入目标路径
+ with zip_ref.open(file_path) as source, open(target_file_path, 'wb') as target:
+ shutil.copyfileobj(source, target)
+ logging.debug(f"已提取:{file_path} → {target_file_path}")
+ else:
+ # 情况2:ZIP内部无顶层目录(直接是文件/多个子目录),直接提取所有内容
+ zip_ref.extractall(path=final_extract_dir)
+ logging.debug(f"ZIP无统一顶层目录,直接提取所有内容到 {final_extract_dir}")
+
+ logging.info(f"成功:文件 {zip_path} 已解压到 {final_extract_dir}(仅一层路径)")
+ return True
+
+ except zipfile.BadZipFile:
+ logging.error(f"错误:文件 {zip_path} 不是有效的ZIP文件。")
+ except PermissionError as e:
+ logging.error(f"错误:解压权限不足(目标目录不可写或文件被占用):{e}")
+ except Exception as e:
+ logging.error(f"错误:解压文件 {zip_path} 时发生异常:{e}")
+
+ # 解压失败,清理临时目录
+ clean_zip_extract_dir(final_extract_dir)
+ return False
+
+# 示例调用
+if __name__ == "__main__":
+ # 配置日志(按需调整级别)
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
+
+ # 测试:解压 custom_gpu_tool.zip,最终路径为 mcp_tools/personal_tools/custom_gpu_tool/xxx
+ result = unzip_tool(zip_path="/home/tsn/cli_mcp_server/nvidia_tool.zip")
+ print(f"解压结果:{'成功' if result else '失败'}")
\ No newline at end of file
diff --git a/mcp_center/servers/rag/README.en.md b/mcp_center/servers/rag/README.en.md
new file mode 100644
index 00000000..487a230c
--- /dev/null
+++ b/mcp_center/servers/rag/README.en.md
@@ -0,0 +1,136 @@
+# RAG Knowledge Base Management MCP (Management Control Program) Specification Document
+
+## 1. Service Introduction
+
+This service is a comprehensive RAG (Retrieval-Augmented Generation) knowledge base management MCP (Management Control Program) based on SQLite database, FTS5 full-text search, and sqlite-vec vector search technologies. It provides full lifecycle management functions for knowledge bases including **creation, deletion, selection, document import, hybrid search, document management, and database import/export**. It supports multiple document formats such as TXT, DOCX, DOC, and PDF, uses asynchronous batch vectorization processing, and combines keyword search and vector search hybrid search strategies to provide a complete solution for knowledge base construction and intelligent retrieval. The service is adapted to both Chinese and English configurations to meet the needs of different scenarios.
+
+
+## 2. Core Tool Information
+
+| Tool Name | Tool Function | Core Input Parameters | Key Return Content |
+| ---- | ---- | ---- | ---- |
+| `create_knowledge_base` | Creates a new knowledge base. A knowledge base is a container for documents, and each knowledge base can have its own chunk_size and embedding configuration | - `kb_name`: Knowledge base name (required, must be unique)
- `chunk_size`: Chunk size in tokens (required, e.g., 512, 1024)
- `embedding_model`: Embedding model name (optional)
- `embedding_endpoint`: Embedding service endpoint URL (optional)
- `embedding_api_key`: Embedding service API Key (optional) | Creation result dictionary (including `kb_id` knowledge base ID, `kb_name` knowledge base name, `chunk_size` chunk size) |
+| `delete_knowledge_base` | Deletes the specified knowledge base. Cannot delete the currently active knowledge base. Deleting a knowledge base will cascade delete all documents and chunks under it | - `kb_name`: Knowledge base name (required) | Deletion result dictionary (including `kb_name` deleted knowledge base name) |
+| `list_knowledge_bases` | Lists all available knowledge bases. Returns detailed information about all knowledge bases, including the currently selected one | No parameters | Knowledge base list dictionary (including `knowledge_bases` list, `count` number of knowledge bases, `current_kb_id` currently selected knowledge base ID. Each knowledge base contains id, name, chunk_size, embedding_model, created_at, is_current, etc.) |
+| `select_knowledge_base` | Selects a knowledge base as the currently active one. After selection, subsequent operations such as document import and search will be performed in this knowledge base | - `kb_name`: Knowledge base name (required) | Selection result dictionary (including `kb_id` knowledge base ID, `kb_name` knowledge base name, `document_count` number of documents in this knowledge base) |
+| `import_document` | Imports documents into the currently selected knowledge base. Supports concurrent import of multiple files. Supports TXT, DOCX, DOC, and PDF formats. Documents will be parsed, split into chunks, and vectors will be generated asynchronously in batch and stored in the database | - `file_paths`: List of file paths (absolute paths), supports 1~n files (required)
- `chunk_size`: Chunk size in tokens (optional, defaults to knowledge base's chunk_size) | Import result dictionary (including `total` total number of files, `success_count` number of successfully imported files, `failed_count` number of failed files, `success_files` list of successfully imported files, `failed_files` list of failed files) |
+| `search` | Performs hybrid search in the currently selected knowledge base. Combines keyword search (FTS5) and vector search (sqlite-vec), merges results using weighted approach (keyword weight 0.3, vector weight 0.7), deduplicates, reranks using Jaccard similarity, and returns the top-k most relevant results | - `query`: Query text (required)
- `top_k`: Number of results to return (optional, default from config, usually 5) | Search result dictionary (including `chunks` chunk list, `count` number of results. Each chunk contains id, doc_id, content, tokens, chunk_index, doc_name, score, etc.) |
+| `list_documents` | Lists all documents in the currently selected knowledge base. Returns detailed information about the documents | No parameters | Document list dictionary (including `documents` document list, `count` number of documents. Each document contains id, name, file_path, file_type, chunk_size, created_at, updated_at, etc.) |
+| `delete_document` | Deletes the specified document from the currently selected knowledge base. Deleting a document will cascade delete all chunks of that document | - `doc_name`: Document name (required) | Deletion result dictionary (including `doc_name` deleted document name) |
+| `update_document` | Updates the document's chunk_size and re-parses the document. Will delete existing chunks, re-split the document using the new chunk_size, and asynchronously generate new vectors in batch | - `doc_name`: Document name (required)
- `chunk_size`: New chunk size in tokens (required) | Update result dictionary (including `doc_id` document ID, `doc_name` document name, `chunk_count` new number of chunks, `chunk_size` new chunk size) |
+| `export_database` | Exports the entire kb.db database file to the specified path | - `export_path`: Export path (absolute path, required) | Export result dictionary (including `source_path` source database path, `export_path` export path) |
+| `import_database` | Imports a .db database file and merges its contents into kb.db. Import will automatically handle name conflicts by adding timestamps to knowledge base and document names | - `source_db_path`: Source database file path (absolute path, required) | Import result dictionary (including `source_path` source database path, `imported_kb_count` number of imported knowledge bases, `imported_doc_count` number of imported documents) |
+
+
+## 3. `rag-server` CLI Usage Guide
+
+`rag-server` is a command-line wrapper around the core RAG tools, suitable for managing knowledge bases, importing documents, and performing searches directly from the terminal. After installation, you can run `rag-server [options]` anywhere.
+
+### 3.1 Basic Usage
+
+- Show help:
+
+```bash
+rag-server --help
+```
+
+- General format:
+
+```bash
+rag-server [--option value...]
+```
+
+### 3.2 Knowledge Base Management Commands
+
+- **Create knowledge base**
+
+```bash
+rag-server create_kb --kb_name --chunk_size [--embedding_model model] [--embedding_endpoint URL] [--embedding_api_key KEY]
+```
+
+Description:
+- `--kb_name`: Knowledge base name (required, must be unique)
+- `--chunk_size`: Chunk size in tokens (required, e.g., 512, 1024)
+- `--embedding_model`: Embedding model name (optional)
+- `--embedding_endpoint`: Embedding service endpoint URL (optional)
+- `--embedding_api_key`: Embedding service API Key (optional)
+
+- **Delete knowledge base**
+
+```bash
+rag-server delete_kb --kb_name
+```
+
+- **List knowledge bases**
+
+```bash
+rag-server list_kb
+```
+
+- **Select current knowledge base**
+
+```bash
+rag-server select_kb --kb_name
+```
+
+Note: After selection, the current knowledge base ID is persisted to `database/state.json`, so different `rag-server` invocations will share the same selected KB state.
+
+### 3.3 Document Management Commands
+
+- **Import documents**
+
+```bash
+rag-server import_doc --file_paths /abs/path/doc1.txt [/abs/path/doc2.txt ...] [--chunk_size ]
+```
+
+Description:
+- `--file_paths`: One or more **absolute file paths** (required, at least one)
+- `--chunk_size`: Override default chunk size (optional)
+- You must call `select_kb` first to choose an active knowledge base.
+
+- **List documents**
+
+```bash
+rag-server list_doc
+```
+
+- **Delete document**
+
+```bash
+rag-server delete_doc --doc_name
+```
+
+- **Update document chunk size and rebuild vectors**
+
+```bash
+rag-server update_doc --doc_name --chunk_size
+```
+
+### 3.4 Search Commands
+
+- **Search**
+
+```bash
+rag-server search --query "" [--top_k ]
+```
+
+Description:
+- `--query`: Query text (required)
+- `--top_k`: Number of results to return (optional, default from config, usually 5)
+
+### 3.5 Database Import/Export Commands
+
+- **Export database**
+
+```bash
+rag-server export_db --export_path /abs/path/kb_export.db
+```
+
+- **Import database**
+
+```bash
+rag-server import_db --source_db_path /abs/path/other_kb.db
+```
+
+Description: During import, name conflicts of knowledge bases and documents will be automatically resolved by appending timestamps.
+
diff --git a/mcp_center/servers/rag/README.md b/mcp_center/servers/rag/README.md
new file mode 100644
index 00000000..ac743822
--- /dev/null
+++ b/mcp_center/servers/rag/README.md
@@ -0,0 +1,136 @@
+# RAG知识库管理MCP(管理控制程序)规范文档
+
+## 一、服务介绍
+
+本服务是一款功能完整的RAG(检索增强生成)知识库管理MCP(管理控制程序),基于SQLite数据库、FTS5全文检索、sqlite-vec向量检索等技术,实现对知识库的**创建、删除、选择、文档导入、混合检索、文档管理、数据库导入导出**等全生命周期管理功能。支持TXT、DOCX、DOC、PDF等多种文档格式,采用异步批量向量化处理,结合关键词检索和向量检索的混合搜索策略,为知识库构建和智能检索提供完整的解决方案,适配中文与英文双语言配置,满足不同场景下的使用需求。
+
+
+## 二、核心工具信息
+
+| 工具名称 | 工具功能 | 核心输入参数 | 关键返回内容 |
+| ---- | ---- | ---- | ---- |
+| `create_knowledge_base` | 创建一个新的知识库,知识库是文档的容器,每个知识库可以有自己的chunk_size和embedding配置 | - `kb_name`:知识库名称(必填,必须唯一)
- `chunk_size`:chunk大小,单位token(必填,例如512、1024)
- `embedding_model`:向量化模型名称(可选)
- `embedding_endpoint`:向量化服务端点URL(可选)
- `embedding_api_key`:向量化服务API Key(可选) | 创建结果字典(含`kb_id`知识库ID、`kb_name`知识库名称、`chunk_size`chunk大小) |
+| `delete_knowledge_base` | 删除指定的知识库,不能删除当前正在使用的知识库,删除知识库会级联删除该知识库下的所有文档和chunks | - `kb_name`:知识库名称(必填) | 删除结果字典(含`kb_name`已删除的知识库名称) |
+| `list_knowledge_bases` | 列出所有可用的知识库,返回所有知识库的详细信息,包括当前选中的知识库 | 无参数 | 知识库列表字典(含`knowledge_bases`知识库列表、`count`知识库数量、`current_kb_id`当前选中的知识库ID,每个知识库包含id、name、chunk_size、embedding_model、created_at、is_current等字段) |
+| `select_knowledge_base` | 选择一个知识库作为当前使用的知识库,选择后,后续的文档导入、查询等操作都会在该知识库中进行 | - `kb_name`:知识库名称(必填) | 选择结果字典(含`kb_id`知识库ID、`kb_name`知识库名称、`document_count`该知识库下的文档数量) |
+| `import_document` | 导入文档到当前选中的知识库,支持多文件并发导入,支持TXT、DOCX、DOC、PDF格式,文档会被解析、切分为chunks,并异步批量生成向量存储到数据库中 | - `file_paths`:文件路径列表(绝对路径),支持1~n个文件,为list形式(必填)
- `chunk_size`:chunk大小,单位token(可选,默认使用知识库的chunk_size) | 导入结果字典(含`total`总文件数、`success_count`成功导入的文件数、`failed_count`失败的文件数、`success_files`成功导入的文件列表、`failed_files`失败的文件列表) |
+| `search` | 在当前选中的知识库中进行混合检索,结合关键词检索(FTS5)和向量检索(sqlite-vec),使用加权方式合并结果(关键词权重0.3,向量权重0.7),去重后使用Jaccard相似度重排序,返回最相关的top-k个结果 | - `query`:查询文本(必填)
- `top_k`:返回数量(可选,默认从配置读取,通常为5) | 检索结果字典(含`chunks`chunk列表、`count`结果数量,每个chunk包含id、doc_id、content、tokens、chunk_index、doc_name、score等字段) |
+| `list_documents` | 查看当前选中的知识库下的所有文档列表,返回文档的详细信息 | 无参数 | 文档列表字典(含`documents`文档列表、`count`文档数量,每个文档包含id、name、file_path、file_type、chunk_size、created_at、updated_at等字段) |
+| `delete_document` | 删除当前选中的知识库下的指定文档,删除文档会级联删除该文档的所有chunks | - `doc_name`:文档名称(必填) | 删除结果字典(含`doc_name`已删除的文档名称) |
+| `update_document` | 修改文档的chunk_size并重新解析文档,会删除原有的chunks,使用新的chunk_size重新切分文档,并异步批量生成新的向量 | - `doc_name`:文档名称(必填)
- `chunk_size`:新的chunk大小,单位token(必填) | 修改结果字典(含`doc_id`文档ID、`doc_name`文档名称、`chunk_count`新的chunk数量、`chunk_size`新的chunk大小) |
+| `export_database` | 导出整个kb.db数据库文件到指定路径 | - `export_path`:导出路径(绝对路径,必填) | 导出结果字典(含`source_path`源数据库路径、`export_path`导出路径) |
+| `import_database` | 导入一个.db数据库文件,将其中的内容合并到kb.db中,导入时会自动处理重名冲突,为知识库和文档名称添加时间戳 | - `source_db_path`:源数据库文件路径(绝对路径,必填) | 导入结果字典(含`source_path`源数据库路径、`imported_kb_count`导入的知识库数量、`imported_doc_count`导入的文档数量) |
+
+
+## 三、`rag-server` 命令行使用指南
+
+`rag-server` 是对 RAG 核心工具的命令行封装,适合在服务器上以 CLI 方式直接管理知识库、导入文档和执行检索。安装完成后,可直接在终端中使用 `rag-server <子命令> [参数]`。
+
+### 1. 基本用法
+
+- 查看帮助:
+
+```bash
+rag-server --help
+```
+
+- 命令通用格式:
+
+```bash
+rag-server [--参数名 参数值...]
+```
+
+### 2. 知识库管理相关命令
+
+- **创建知识库**
+
+```bash
+rag-server create_kb --kb_name <名称> --chunk_size [--embedding_model 模型名] [--embedding_endpoint URL] [--embedding_api_key KEY]
+```
+
+说明:
+- `--kb_name`:知识库名称(必填,必须唯一)
+- `--chunk_size`:chunk 大小(必填,单位 token,例如 512、1024)
+- `--embedding_model`:向量化模型名称(可选)
+- `--embedding_endpoint`:向量化服务端点 URL(可选)
+- `--embedding_api_key`:向量化服务 API Key(可选)
+
+- **删除知识库**
+
+```bash
+rag-server delete_kb --kb_name <名称>
+```
+
+- **列出知识库**
+
+```bash
+rag-server list_kb
+```
+
+- **选择当前知识库**
+
+```bash
+rag-server select_kb --kb_name <名称>
+```
+
+说明:选择成功后,当前知识库 ID 会持久化到 `database/state.json` 中,不同次 `rag-server` 调用会自动复用该状态。
+
+### 3. 文档管理相关命令
+
+- **导入文档**
+
+```bash
+rag-server import_doc --file_paths /abs/path/doc1.txt [/abs/path/doc2.txt ...] [--chunk_size ]
+```
+
+说明:
+- `--file_paths`:一个或多个**绝对路径**的文件列表(必填,至少 1 个)
+- `--chunk_size`:覆盖默认的 chunk 大小(可选)
+- 在调用前需要先通过 `select_kb` 选择当前知识库
+
+- **列出文档**
+
+```bash
+rag-server list_doc
+```
+
+- **删除文档**
+
+```bash
+rag-server delete_doc --doc_name <文档名称>
+```
+
+- **更新文档 chunk 大小并重建向量**
+
+```bash
+rag-server update_doc --doc_name <文档名称> --chunk_size <新的chunk大小>
+```
+
+### 4. 检索相关命令
+
+- **搜索**
+
+```bash
+rag-server search --query "<查询文本>" [--top_k <返回数量>]
+```
+
+说明:
+- `--query`:查询文本(必填)
+- `--top_k`:返回结果数量(可选,默认从配置中读取,通常为 5)
+
+### 5. 数据库导入导出相关命令
+
+- **导出数据库**
+
+```bash
+rag-server export_db --export_path /abs/path/kb_export.db
+```
+
+- **导入数据库**
+
+```bash
+rag-server import_db --source_db_path /abs/path/other_kb.db
+```
+
+说明:导入时会自动处理知识库和文档的重名冲突,为名称添加时间戳。
+
diff --git a/mcp_center/servers/rag/run.sh b/mcp_center/servers/rag/run.sh
new file mode 100644
index 00000000..37122dac
--- /dev/null
+++ b/mcp_center/servers/rag/run.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# RAG 服务部署脚本
+
+# 设置路径
+RAG_DIR="/usr/lib/euler-copilot-framework/mcp_center/servers/rag"
+SERVICE_FILE="/usr/lib/euler-copilot-framework/mcp_center/service/rag.service"
+
+# 复制 service 文件
+if [ -f "$SERVICE_FILE" ]; then
+ cp "$SERVICE_FILE" /etc/systemd/system/
+ echo "✅ Service 文件已复制"
+else
+ echo "⚠️ 警告:未找到 service 文件:$SERVICE_FILE"
+fi
+
+
+# 安装依赖
+if [ -f "$RAG_DIR/src/requirements.txt" ]; then
+ python3 -m pip install -r "$RAG_DIR/src/requirements.txt" -i https://pypi.tuna.tsinghua.edu.cn/simple
+ echo "✅ 依赖安装完成"
+fi
+
+# 重新加载 systemd
+systemctl daemon-reload
+
+# 启用服务
+systemctl enable rag.service
+echo "✅ 服务已启用"
+
+# 启动服务
+systemctl start rag.service
+echo "✅ 服务已启动"
+
+# 查看服务状态
+systemctl status rag.service
+
+# 设置 CLI 工具权限并创建符号链接
+chmod +x "$RAG_DIR/src/cli.py"
+rm -f /usr/local/bin/rag-server
+ln -s "$RAG_DIR/src/cli.py" /usr/local/bin/rag-server
+echo "✅ CLI 工具已安装:rag-server"
+
+echo ""
+echo "安装完成!可以使用以下命令:"
+echo " rag-server --help # 查看帮助"
+echo " rag-server list_kb # 列出知识库"
+echo " rag-server import_doc --file_paths /path/to/file.txt # 导入文档"
+
diff --git a/mcp_center/servers/rag/src/base/config.py b/mcp_center/servers/rag/src/base/config.py
new file mode 100644
index 00000000..7c889abb
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/config.py
@@ -0,0 +1,174 @@
+import os
+import json
+import logging
+from typing import Optional, Dict, Any
+
+logger = logging.getLogger(__name__)
+
+# 配置缓存
+_config: Optional[Dict[str, Any]] = None
+_config_file_path = "rag_config.json"
+
+
+def _load_config() -> Dict[str, Any]:
+ """
+ 加载配置文件
+ :return: 配置字典
+ """
+ global _config
+
+ if _config is not None:
+ return _config
+
+ default_config = {
+ "embedding": {
+ "type": "openai",
+ "api_key": "",
+ "endpoint": "",
+ "model_name": "text-embedding-ada-002",
+ "timeout": 30,
+ "vector_dimension": 1024
+ },
+ "token": {
+ "model": "gpt-4",
+ "max_tokens": 8192,
+ "default_chunk_size": 1024
+ },
+ "search": {
+ "default_top_k": 5,
+ "max_top_k": 100
+ }
+ }
+
+ config_file = _get_config_file_path()
+ if os.path.exists(config_file):
+ try:
+ with open(config_file, 'r', encoding='utf-8') as f:
+ file_config = json.load(f)
+ # 合并配置(文件配置优先)
+ _config = _merge_config(default_config, file_config)
+ except Exception as e:
+ logger.warning(f"[Config] 加载配置文件失败: {e}")
+ _config = default_config
+ else:
+ _config = default_config
+
+ _apply_env_overrides(_config)
+
+ return _config
+
+
+def _cfg() -> Dict[str, Any]:
+ return _load_config()
+
+
+def _get_config_file_path() -> str:
+ """
+ 获取配置文件路径
+ 优先使用项目根目录下的 rag_config.json
+ :return: 配置文件路径
+ """
+ # 尝试从项目根目录查找
+ current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+ config_file = os.path.join(current_dir, _config_file_path)
+ if os.path.exists(config_file):
+ return config_file
+
+ # 尝试从当前工作目录查找
+ cwd_config = os.path.join(os.getcwd(), _config_file_path)
+ if os.path.exists(cwd_config):
+ return cwd_config
+
+ # 返回项目根目录路径(即使文件不存在)
+ return config_file
+
+
+def _merge_config(default: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ 合并配置字典(递归合并)
+ :param default: 默认配置
+ :param override: 覆盖配置
+ :return: 合并后的配置
+ """
+ result = default.copy()
+ for key, value in override.items():
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
+ result[key] = _merge_config(result[key], value)
+ else:
+ result[key] = value
+ return result
+
+
+def _apply_env_overrides(config: Dict[str, Any]):
+ """
+ 应用环境变量覆盖(优先级最高)
+ :param config: 配置字典
+ """
+ if os.getenv("EMBEDDING_TYPE"):
+ config["embedding"]["type"] = os.getenv("EMBEDDING_TYPE")
+ if os.getenv("EMBEDDING_API_KEY"):
+ config["embedding"]["api_key"] = os.getenv("EMBEDDING_API_KEY")
+ if os.getenv("EMBEDDING_ENDPOINT"):
+ config["embedding"]["endpoint"] = os.getenv("EMBEDDING_ENDPOINT")
+ if os.getenv("EMBEDDING_MODEL_NAME"):
+ config["embedding"]["model_name"] = os.getenv("EMBEDDING_MODEL_NAME")
+
+ if os.getenv("TOKEN_MODEL"):
+ config["token"]["model"] = os.getenv("TOKEN_MODEL")
+ if os.getenv("MAX_TOKENS"):
+ try:
+ config["token"]["max_tokens"] = int(os.getenv("MAX_TOKENS"))
+ except ValueError:
+ pass
+ if os.getenv("DEFAULT_CHUNK_SIZE"):
+ try:
+ config["token"]["default_chunk_size"] = int(os.getenv("DEFAULT_CHUNK_SIZE"))
+ except ValueError:
+ pass
+
+
+def get_embedding_type() -> str:
+ return _cfg()["embedding"]["type"]
+
+
+def get_embedding_api_key() -> str:
+ return _cfg()["embedding"]["api_key"]
+
+
+def get_embedding_endpoint() -> str:
+ return _cfg()["embedding"]["endpoint"]
+
+
+def get_embedding_model_name() -> str:
+ return _cfg()["embedding"]["model_name"]
+
+
+def get_embedding_timeout() -> int:
+ return _cfg()["embedding"]["timeout"]
+
+
+def get_embedding_vector_dimension() -> int:
+ return _cfg()["embedding"]["vector_dimension"]
+
+
+def get_token_model() -> str:
+ return _cfg()["token"]["model"]
+
+
+def get_max_tokens() -> int:
+ return _cfg()["token"]["max_tokens"]
+
+
+def get_default_chunk_size() -> int:
+ return _cfg()["token"]["default_chunk_size"]
+
+
+def get_default_top_k() -> int:
+ return _cfg()["search"]["default_top_k"]
+
+
+def reload_config():
+ """当 rag_config.json 更新后重新加载缓存"""
+ global _config
+ _config = None
+
diff --git a/mcp_center/servers/rag/src/base/embedding.py b/mcp_center/servers/rag/src/base/embedding.py
new file mode 100644
index 00000000..0a23bd19
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/embedding.py
@@ -0,0 +1,145 @@
+import json
+import logging
+import asyncio
+import aiohttp
+from typing import Optional, List
+from base.config import (
+ get_embedding_type,
+ get_embedding_api_key,
+ get_embedding_endpoint,
+ get_embedding_model_name,
+ get_embedding_timeout,
+ get_embedding_vector_dimension
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Embedding:
+ """Embedding 服务类"""
+
+ @staticmethod
+ def _get_config():
+ """获取配置(延迟加载)"""
+ return {
+ "type": get_embedding_type(),
+ "api_key": get_embedding_api_key(),
+ "endpoint": get_embedding_endpoint(),
+ "model_name": get_embedding_model_name(),
+ "timeout": get_embedding_timeout(),
+ "vector_dimension": get_embedding_vector_dimension()
+ }
+
+ @staticmethod
+ def is_configured() -> bool:
+ config = Embedding._get_config()
+ return bool(config["api_key"] and config["endpoint"])
+
+ @staticmethod
+ async def vectorize_embedding(text: str, session: Optional[aiohttp.ClientSession] = None) -> Optional[List[float]]:
+ """
+ 将文本向量化(异步实现)
+ :param text: 文本内容
+ :param session: 可选的 aiohttp 会话
+ :return: 向量列表
+ """
+ config = Embedding._get_config()
+ vector = None
+ should_close_session = False
+
+ # 如果没有提供会话,创建一个新的
+ if session is None:
+ timeout = aiohttp.ClientTimeout(total=config["timeout"])
+ connector = aiohttp.TCPConnector(ssl=False)
+ session = aiohttp.ClientSession(timeout=timeout, connector=connector)
+ should_close_session = True
+
+ try:
+ if config["type"] == "openai":
+ headers = {
+ "Authorization": f"Bearer {config['api_key']}"
+ }
+ data = {
+ "input": text,
+ "model": config["model_name"],
+ "encoding_format": "float"
+ }
+ try:
+ async with session.post(
+ url=config["endpoint"],
+ headers=headers,
+ json=data
+ ) as res:
+ if res.status != 200:
+ return None
+ result = await res.json()
+ vector = result['data'][0]['embedding']
+ except Exception:
+ return None
+ elif config["type"] == "mindie":
+ try:
+ data = {
+ "inputs": text,
+ }
+ async with session.post(
+ url=config["endpoint"],
+ json=data
+ ) as res:
+ if res.status != 200:
+ return None
+ text_result = await res.text()
+ vector = json.loads(text_result)[0]
+ except Exception:
+ return None
+ else:
+ return None
+
+ # 确保向量长度为配置的维度(不足补0,超过截断)
+ if vector:
+ vector_dim = config["vector_dimension"]
+ while len(vector) < vector_dim:
+ vector.append(0.0)
+ return vector[:vector_dim]
+ return None
+ finally:
+ if should_close_session:
+ await session.close()
+
+ @staticmethod
+ async def vectorize_embeddings_batch(texts: List[str], max_concurrent: int = 5) -> List[Optional[List[float]]]:
+ """
+ 批量向量化(并发处理)
+ :param texts: 文本列表
+ :param max_concurrent: 最大并发数
+ :return: 向量列表(与输入文本顺序对应)
+ """
+ config = Embedding._get_config()
+ if not config["api_key"] or not config["endpoint"]:
+ return [None] * len(texts)
+
+ # 创建共享的 aiohttp 会话(复用连接)
+ timeout = aiohttp.ClientTimeout(total=config["timeout"])
+ connector = aiohttp.TCPConnector(ssl=False)
+ async with aiohttp.ClientSession(timeout=timeout, connector=connector) as session:
+ # 使用信号量控制并发数
+ semaphore = asyncio.Semaphore(max_concurrent)
+
+ async def vectorize_with_semaphore(text: str, index: int) -> tuple:
+ async with semaphore:
+ vector = await Embedding.vectorize_embedding(text, session=session)
+ return index, vector
+
+ tasks = [vectorize_with_semaphore(text, i) for i, text in enumerate(texts)]
+
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+
+ vectors = [None] * len(texts)
+ for result in results:
+ if isinstance(result, Exception):
+ continue
+ index, vector = result
+ vectors[index] = vector
+
+ return vectors
+
+
diff --git a/mcp_center/servers/rag/src/base/manager/database_manager.py b/mcp_center/servers/rag/src/base/manager/database_manager.py
new file mode 100644
index 00000000..e77808c4
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/manager/database_manager.py
@@ -0,0 +1,257 @@
+"""
+数据库操作类 - 使用 SQLAlchemy ORM
+"""
+import os
+import struct
+import uuid
+from typing import List, Optional, Dict, Any
+from datetime import datetime
+import logging
+from sqlalchemy import create_engine, text, inspect
+from sqlalchemy.orm import sessionmaker, Session
+from sqlalchemy.exc import SQLAlchemyError
+
+from base.models import Base, KnowledgeBase, Document, Chunk
+from base.config import get_embedding_vector_dimension
+from base.manager.document_manager import DocumentManager
+import sqlite_vec
+
+logger = logging.getLogger(__name__)
+
+
+class Database:
+ """SQLite 数据库操作类 - 使用 SQLAlchemy ORM"""
+
+ def __init__(self, db_path: str = "knowledge_base.db"):
+ """
+ 初始化数据库连接
+ :param db_path: 数据库文件路径
+ """
+ db_dir = os.path.dirname(os.path.abspath(db_path))
+ if db_dir and not os.path.exists(db_dir):
+ os.makedirs(db_dir, exist_ok=True)
+
+ self.db_path = os.path.abspath(db_path)
+ self.engine = create_engine(
+ f'sqlite:///{self.db_path}',
+ echo=False,
+ connect_args={'check_same_thread': False}
+ )
+ self.SessionLocal = sessionmaker(bind=self.engine, autocommit=False, autoflush=False)
+ self._init_database()
+
+ def _init_database(self):
+ """初始化数据库表结构"""
+ try:
+ # 创建所有表
+ Base.metadata.create_all(self.engine)
+
+ # 加载 sqlite-vec 扩展并创建 FTS5 和 vec_index 表
+ with self.engine.begin() as conn:
+ # 创建 FTS5 虚拟表(需要使用原生 SQL)
+ conn.execute(text("""
+ CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
+ id UNINDEXED,
+ content,
+ content_rowid=id
+ )
+ """))
+
+ # 加载 sqlite-vec 扩展
+ try:
+ raw_conn = conn.connection.dbapi_connection
+ raw_conn.enable_load_extension(True)
+ sqlite_vec.load(raw_conn)
+ raw_conn.enable_load_extension(False)
+ except Exception as e:
+ logger.warning(f"加载 sqlite-vec 扩展失败: {e}")
+
+ # 创建 vec_index 虚拟表
+ try:
+ vector_dim = get_embedding_vector_dimension()
+ conn.execute(text(f"""
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_index USING vec0(
+ embedding float[{vector_dim}]
+ )
+ """))
+ except Exception as e:
+ logger.warning(f"创建 vec_index 表失败: {e}")
+ except Exception as e:
+ logger.exception(f"[Database] 初始化数据库失败: {e}")
+ raise e
+
+ def get_session(self) -> Session:
+ """获取数据库会话"""
+ return self.SessionLocal()
+
+ def get_connection(self):
+ """
+ 获取原始数据库连接(用于特殊操作,如 FTS5 和 vec_index)
+ 注意:此方法保留以兼容现有代码,但推荐使用 get_session()
+ 返回一个上下文管理器,使用后会自动关闭
+ """
+ return self.engine.connect()
+
+ def add_knowledge_base(self, kb_id: str, name: str, chunk_size: int,
+ embedding_model: Optional[str] = None,
+ embedding_endpoint: Optional[str] = None,
+ embedding_api_key: Optional[str] = None) -> bool:
+ """添加知识库"""
+ session = self.get_session()
+ try:
+ kb = KnowledgeBase(
+ id=kb_id,
+ name=name,
+ chunk_size=chunk_size,
+ embedding_model=embedding_model,
+ embedding_endpoint=embedding_endpoint,
+ embedding_api_key=embedding_api_key
+ )
+ session.add(kb)
+ session.commit()
+ return True
+ except SQLAlchemyError as e:
+ logger.exception(f"[Database] 添加知识库失败: {e}")
+ session.rollback()
+ return False
+ finally:
+ session.close()
+
+ def get_knowledge_base(self, kb_name: str) -> Optional[KnowledgeBase]:
+ """获取知识库"""
+ session = self.get_session()
+ try:
+ return session.query(KnowledgeBase).filter_by(name=kb_name).first()
+ finally:
+ session.close()
+
+ def delete_knowledge_base(self, kb_id: str) -> bool:
+ """删除知识库(级联删除相关文档和chunks)"""
+ session = self.get_session()
+ try:
+ kb = session.query(KnowledgeBase).filter_by(id=kb_id).first()
+ if kb:
+ session.delete(kb)
+ session.commit()
+ return True
+ return False
+ except SQLAlchemyError as e:
+ logger.exception(f"[Database] 删除知识库失败: {e}")
+ session.rollback()
+ return False
+ finally:
+ session.close()
+
+ def list_knowledge_bases(self) -> List[KnowledgeBase]:
+ """列出所有知识库"""
+ session = self.get_session()
+ try:
+ return session.query(KnowledgeBase).order_by(KnowledgeBase.created_at.desc()).all()
+ finally:
+ session.close()
+
+ def import_database(self, source_db_path: str) -> tuple[int, int]:
+ """
+ 导入数据库,将其中的内容合并到当前数据库
+
+ :param source_db_path: 源数据库文件路径
+ :return: (imported_kb_count, imported_doc_count)
+ """
+ source_db = Database(source_db_path)
+ source_session = source_db.get_session()
+
+ try:
+ # 读取源数据库的知识库
+ source_kbs = source_session.query(KnowledgeBase).all()
+ if not source_kbs:
+ return 0, 0
+
+ # 读取源数据库的文档
+ source_docs = source_session.query(Document).all()
+
+ # 合并到当前数据库
+ target_session = self.get_session()
+
+ try:
+ imported_kb_count = 0
+ imported_doc_count = 0
+
+ for source_kb in source_kbs:
+ # 检查知识库是否已存在,如果存在则生成唯一名称
+ kb_name = source_kb.name
+ existing_kb = self.get_knowledge_base(kb_name)
+ if existing_kb:
+ # 生成唯一名称
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ counter = 1
+ unique_kb_name = f"{kb_name}_{timestamp}"
+ while self.get_knowledge_base(unique_kb_name):
+ unique_kb_name = f"{kb_name}_{timestamp}_{counter}"
+ counter += 1
+ kb_name = unique_kb_name
+
+ # 导入知识库
+ new_kb_id = str(uuid.uuid4())
+ if self.add_knowledge_base(new_kb_id, kb_name, source_kb.chunk_size,
+ source_kb.embedding_model, source_kb.embedding_endpoint,
+ source_kb.embedding_api_key):
+ imported_kb_count += 1
+
+ # 导入该知识库下的文档
+ kb_docs = [doc for doc in source_docs if doc.kb_id == source_kb.id]
+ manager = DocumentManager(target_session)
+
+ for source_doc in kb_docs:
+ # 检查文档是否已存在,如果存在则生成唯一名称
+ doc_name = source_doc.name
+ existing_doc = manager.get_document(new_kb_id, doc_name)
+ if existing_doc:
+ # 生成唯一名称
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ # 分离文件名和扩展名
+ if '.' in doc_name:
+ name_part, ext_part = doc_name.rsplit('.', 1)
+ unique_doc_name = f"{name_part}_{timestamp}.{ext_part}"
+ else:
+ unique_doc_name = f"{doc_name}_{timestamp}"
+
+ # 如果新名称仍然存在,继续添加后缀
+ counter = 1
+ final_doc_name = unique_doc_name
+ while manager.get_document(new_kb_id, final_doc_name):
+ if '.' in doc_name:
+ name_part, ext_part = doc_name.rsplit('.', 1)
+ final_doc_name = f"{name_part}_{timestamp}_{counter}.{ext_part}"
+ else:
+ final_doc_name = f"{doc_name}_{timestamp}_{counter}"
+ counter += 1
+ doc_name = final_doc_name
+
+ # 导入文档
+ new_doc_id = str(uuid.uuid4())
+ if manager.add_document(new_doc_id, new_kb_id, doc_name,
+ source_doc.file_path, source_doc.file_type,
+ source_doc.content, source_doc.chunk_size):
+ imported_doc_count += 1
+
+ # 导入chunks(包含向量)
+ source_chunks = source_session.query(Chunk).filter_by(doc_id=source_doc.id).all()
+ for source_chunk in source_chunks:
+ new_chunk_id = str(uuid.uuid4())
+ # 提取向量(如果存在)
+ embedding = None
+ if source_chunk.embedding:
+ embedding_bytes = source_chunk.embedding
+ if len(embedding_bytes) > 0 and len(embedding_bytes) % 4 == 0:
+ embedding = list(struct.unpack(f'{len(embedding_bytes)//4}f', embedding_bytes))
+
+ manager.add_chunk(new_chunk_id, new_doc_id, source_chunk.content,
+ source_chunk.tokens, source_chunk.chunk_index, embedding)
+ return imported_kb_count, imported_doc_count
+ finally:
+ target_session.close()
+ finally:
+ source_session.close()
+ source_db = None
+
diff --git a/mcp_center/servers/rag/src/base/manager/document_manager.py b/mcp_center/servers/rag/src/base/manager/document_manager.py
new file mode 100644
index 00000000..5186ee24
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/manager/document_manager.py
@@ -0,0 +1,394 @@
+"""
+文档操作模块 - 使用 SQLAlchemy ORM
+"""
+import os
+import struct
+import uuid
+import asyncio
+from typing import List, Optional, Tuple
+from datetime import datetime
+import logging
+from sqlalchemy import text
+from sqlalchemy.orm import Session
+from sqlalchemy.exc import SQLAlchemyError
+
+from base.models import Document, Chunk
+from base.embedding import Embedding
+from base.parser.parser import Parser
+from base.token_tool import TokenTool
+import jieba
+
+logger = logging.getLogger(__name__)
+
+
+class DocumentManager:
+ """文档操作管理器"""
+
+ def __init__(self, session: Session):
+ """
+ 初始化文档管理器
+ :param session: 数据库会话
+ """
+ self.session = session
+
+ def add_document(self, doc_id: str, kb_id: str, name: str, file_path: str,
+ file_type: str, content: Optional[str] = None, chunk_size: Optional[int] = None) -> bool:
+ """添加文档"""
+ try:
+ document = Document(
+ id=doc_id,
+ kb_id=kb_id,
+ name=name,
+ file_path=file_path,
+ file_type=file_type,
+ content=content,
+ chunk_size=chunk_size,
+ updated_at=datetime.now()
+ )
+ self.session.add(document)
+ self.session.commit()
+ return True
+ except SQLAlchemyError as e:
+ logger.exception(f"[DocumentManager] 添加文档失败: {e}")
+ self.session.rollback()
+ return False
+
+ def delete_document(self, kb_id: str, doc_name: str) -> bool:
+ """删除文档(级联删除相关chunks)"""
+ try:
+ doc = self.session.query(Document).filter_by(kb_id=kb_id, name=doc_name).first()
+ if doc:
+ self.session.delete(doc)
+ self.session.commit()
+ return True
+ return False
+ except SQLAlchemyError as e:
+ logger.exception(f"[DocumentManager] 删除文档失败: {e}")
+ self.session.rollback()
+ return False
+
+ def get_document(self, kb_id: str, doc_name: str) -> Optional[Document]:
+ """获取文档"""
+ return self.session.query(Document).filter_by(kb_id=kb_id, name=doc_name).first()
+
+ def list_documents_by_kb(self, kb_id: str) -> List[Document]:
+ """列出知识库下的所有文档"""
+ return self.session.query(Document).filter_by(kb_id=kb_id).order_by(Document.created_at.desc()).all()
+
+ def add_chunk(self, chunk_id: str, doc_id: str, content: str, tokens: int, chunk_index: int,
+ embedding: Optional[List[float]] = None) -> bool:
+ """添加 chunk(可包含向量)"""
+ try:
+ embedding_bytes = None
+ if embedding:
+ embedding_bytes = struct.pack(f'{len(embedding)}f', *embedding)
+
+ chunk = Chunk(
+ id=chunk_id,
+ doc_id=doc_id,
+ content=content,
+ tokens=tokens,
+ chunk_index=chunk_index,
+ embedding=embedding_bytes
+ )
+ self.session.add(chunk)
+ self.session.flush()
+
+ # 添加 FTS5 索引(需要使用原生 SQL)
+ fts_content = self._prepare_fts_content(content)
+ self.session.execute(text("""
+ INSERT INTO chunks_fts (id, content)
+ VALUES (:chunk_id, :content)
+ """), {"chunk_id": chunk_id, "content": fts_content})
+
+ # 检查并更新 vec_index(需要使用原生 SQL)
+ if embedding_bytes:
+ conn = self.session.connection()
+ result = conn.execute(text("""
+ SELECT name FROM sqlite_master
+ WHERE type='table' AND name='vec_index'
+ """))
+ if result.fetchone():
+ result = conn.execute(text("""
+ SELECT rowid FROM chunks WHERE id = :chunk_id
+ """), {"chunk_id": chunk_id})
+ row = result.fetchone()
+ if row:
+ vec_rowid = row[0]
+ # 先删除可能存在的旧记录,避免 UNIQUE constraint 冲突
+ conn.execute(text("""
+ DELETE FROM vec_index WHERE rowid = :rowid
+ """), {"rowid": vec_rowid})
+ # 然后插入新记录
+ conn.execute(text("""
+ INSERT INTO vec_index(rowid, embedding)
+ VALUES (:rowid, :embedding)
+ """), {"rowid": vec_rowid, "embedding": embedding_bytes})
+
+ self.session.commit()
+ return True
+ except SQLAlchemyError as e:
+ logger.exception(f"[DocumentManager] 添加chunk失败: {e}")
+ self.session.rollback()
+ return False
+
+ def _prepare_fts_content(self, content: str) -> str:
+ """
+ 准备 FTS5 内容(对中文进行 jieba 分词)
+ :param content: 原始内容
+ :return: 分词后的内容(用空格连接)
+ """
+ try:
+ words = jieba.cut(content)
+ words = [word.strip() for word in words if word.strip()]
+ return ' '.join(words)
+ except Exception:
+ return content
+
+ def update_chunk_embedding(self, chunk_id: str, embedding: List[float]) -> bool:
+ """更新 chunk 的向量"""
+ try:
+ embedding_bytes = struct.pack(f'{len(embedding)}f', *embedding)
+
+ chunk = self.session.query(Chunk).filter_by(id=chunk_id).first()
+ if not chunk:
+ return False
+
+ chunk.embedding = embedding_bytes
+ self.session.flush()
+
+ # 检查并更新 vec_index(需要使用原生 SQL)
+ conn = self.session.connection()
+ result = conn.execute(text("""
+ SELECT name FROM sqlite_master
+ WHERE type='table' AND name='vec_index'
+ """))
+ if result.fetchone():
+ result = conn.execute(text("""
+ SELECT rowid FROM chunks WHERE id = :chunk_id
+ """), {"chunk_id": chunk_id})
+ row = result.fetchone()
+ if row:
+ vec_rowid = row[0]
+ # 先删除可能存在的旧记录,避免 UNIQUE constraint 冲突
+ conn.execute(text("""
+ DELETE FROM vec_index WHERE rowid = :rowid
+ """), {"rowid": vec_rowid})
+ # 然后插入新记录
+ conn.execute(text("""
+ INSERT INTO vec_index(rowid, embedding)
+ VALUES (:rowid, :embedding)
+ """), {"rowid": vec_rowid, "embedding": embedding_bytes})
+
+ self.session.commit()
+ return True
+ except SQLAlchemyError as e:
+ logger.exception(f"[DocumentManager] 更新chunk向量失败: {e}")
+ self.session.rollback()
+ return False
+
+ def delete_document_chunks(self, doc_id: str) -> None:
+ """删除文档的所有chunks"""
+ chunks = self.session.query(Chunk).filter_by(doc_id=doc_id).all()
+ conn = self.session.connection()
+ for chunk in chunks:
+ # 删除FTS5索引
+ conn.execute(text("""
+ DELETE FROM chunks_fts WHERE id = :chunk_id
+ """), {"chunk_id": chunk.id})
+ # 删除向量索引(如果chunk有向量)
+ if chunk.embedding:
+ result = conn.execute(text("""
+ SELECT rowid FROM chunks WHERE id = :chunk_id
+ """), {"chunk_id": chunk.id})
+ row = result.fetchone()
+ if row:
+ conn.execute(text("""
+ DELETE FROM vec_index WHERE rowid = :rowid
+ """), {"rowid": row[0]})
+ # 删除chunk
+ self.session.delete(chunk)
+ self.session.commit()
+
+ def update_document_content(self, doc_id: str, content: str, chunk_size: int) -> None:
+ """更新文档的content和chunk_size"""
+ doc = self.session.query(Document).filter_by(id=doc_id).first()
+ if doc:
+ doc.chunk_size = chunk_size
+ doc.content = content
+ doc.updated_at = datetime.now()
+ self.session.commit()
+
+
+def _generate_unique_name(base_name: str, check_exists_func) -> str:
+ """
+ 生成唯一名称,如果已存在则添加时间戳
+
+ :param base_name: 基础名称
+ :param check_exists_func: 检查是否存在的函数,接受名称参数,返回是否存在
+ :return: 唯一名称
+ """
+ if not check_exists_func(base_name):
+ return base_name
+
+ # 如果已存在,添加时间戳
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ # 分离文件名和扩展名
+ if '.' in base_name:
+ name_part, ext_part = base_name.rsplit('.', 1)
+ new_name = f"{name_part}_{timestamp}.{ext_part}"
+ else:
+ new_name = f"{base_name}_{timestamp}"
+
+ # 如果新名称仍然存在,继续添加后缀
+ counter = 1
+ final_name = new_name
+ while check_exists_func(final_name):
+ if '.' in base_name:
+ name_part, ext_part = base_name.rsplit('.', 1)
+ final_name = f"{name_part}_{timestamp}_{counter}.{ext_part}"
+ else:
+ final_name = f"{base_name}_{timestamp}_{counter}"
+ counter += 1
+
+ return final_name
+
+
+async def import_document(session: Session, kb_id: str, file_path: str,
+ chunk_size: int) -> Tuple[bool, str, Optional[dict]]:
+ """
+ 导入文档(异步)
+
+ :param session: 数据库会话
+ :param kb_id: 知识库ID
+ :param file_path: 文件路径
+ :param chunk_size: chunk大小
+ :return: (success, message, data)
+ """
+ try:
+ doc_name = os.path.basename(file_path)
+ content = Parser.parse(file_path)
+ if not content:
+ return False, "文档解析失败", None
+
+ chunks = TokenTool.split_content_to_chunks(content, chunk_size)
+ if not chunks:
+ return False, "文档内容为空", None
+
+ manager = DocumentManager(session)
+
+ # 检查文档是否已存在,如果存在则生成唯一名称
+ def check_doc_exists(name: str) -> bool:
+ return manager.get_document(kb_id, name) is not None
+
+ unique_doc_name = _generate_unique_name(doc_name, check_doc_exists)
+
+ doc_id = str(uuid.uuid4())
+ file_type = file_path.lower().split('.')[-1]
+
+ if not manager.add_document(doc_id, kb_id, unique_doc_name, file_path, file_type, content, chunk_size):
+ return False, "添加文档失败", None
+
+ chunk_ids = []
+ chunk_data = []
+
+ # 先收集所有chunk数据
+ for idx, chunk_content in enumerate(chunks):
+ chunk_id = str(uuid.uuid4())
+ tokens = TokenTool.get_tokens(chunk_content)
+ chunk_data.append((chunk_id, chunk_content, tokens, idx))
+
+ # 批量生成向量(异步)
+ embeddings_list = [None] * len(chunk_data)
+ if Embedding.is_configured() and chunk_data:
+ try:
+ chunk_contents = [content for _, content, _, _ in chunk_data]
+ embeddings_list = await Embedding.vectorize_embeddings_batch(chunk_contents, max_concurrent=5)
+ except Exception as e:
+ logger.warning(f"批量生成向量失败: {e}")
+
+ # 添加chunks(包含向量)
+ for (chunk_id, chunk_content, tokens, idx), embedding in zip(chunk_data, embeddings_list):
+ if manager.add_chunk(chunk_id, doc_id, chunk_content, tokens, idx, embedding):
+ chunk_ids.append(chunk_id)
+
+ return True, f"成功导入文档,共 {len(chunk_ids)} 个 chunks", {
+ "doc_id": doc_id,
+ "doc_name": unique_doc_name,
+ "original_name": doc_name if unique_doc_name != doc_name else None,
+ "chunk_count": len(chunk_ids),
+ "file_path": file_path
+ }
+ except Exception as e:
+ logger.exception(f"[import_document] 导入文档失败: {e}")
+ return False, "导入文档失败", None
+
+
+async def update_document(session: Session, kb_id: str, doc_name: str, chunk_size: int) -> Tuple[bool, str, Optional[dict]]:
+ """
+ 更新文档的chunk_size并重新解析(异步)
+
+ :param session: 数据库会话
+ :param kb_id: 知识库ID
+ :param doc_name: 文档名称
+ :param chunk_size: 新的chunk大小
+ :return: (success, message, data)
+ """
+ try:
+ manager = DocumentManager(session)
+ doc = manager.get_document(kb_id, doc_name)
+ if not doc:
+ return False, f"文档 '{doc_name}' 不存在", None
+
+ # 删除旧文档的所有chunks
+ manager.delete_document_chunks(doc.id)
+
+ # 重新解析文档
+ if not doc.file_path or not os.path.exists(doc.file_path):
+ return False, "文档文件不存在", None
+
+ content = Parser.parse(doc.file_path)
+ if not content:
+ return False, "文档解析失败", None
+
+ chunks = TokenTool.split_content_to_chunks(content, chunk_size)
+ if not chunks:
+ return False, "文档内容为空", None
+
+ # 收集所有chunk数据
+ chunk_ids = []
+ chunk_data = []
+
+ for idx, chunk_content in enumerate(chunks):
+ chunk_id = str(uuid.uuid4())
+ tokens = TokenTool.get_tokens(chunk_content)
+ chunk_data.append((chunk_id, chunk_content, tokens, idx))
+
+ # 批量生成向量(异步)
+ embeddings_list = [None] * len(chunk_data)
+ if Embedding.is_configured() and chunk_data:
+ try:
+ chunk_contents = [content for _, content, _, _ in chunk_data]
+ embeddings_list = await Embedding.vectorize_embeddings_batch(chunk_contents, max_concurrent=5)
+ except Exception as e:
+ logger.warning(f"批量生成向量失败: {e}")
+
+ # 添加chunks(包含向量)
+ for (chunk_id, chunk_content, tokens, idx), embedding in zip(chunk_data, embeddings_list):
+ if manager.add_chunk(chunk_id, doc.id, chunk_content, tokens, idx, embedding):
+ chunk_ids.append(chunk_id)
+
+ # 更新文档的chunk_size和content
+ manager.update_document_content(doc.id, content, chunk_size)
+
+ return True, f"成功修改文档,共 {len(chunk_ids)} 个 chunks", {
+ "doc_id": doc.id,
+ "doc_name": doc_name,
+ "chunk_count": len(chunk_ids),
+ "chunk_size": chunk_size
+ }
+ except Exception as e:
+ logger.exception(f"[update_document] 修改文档失败: {e}")
+ return False, "修改文档失败", None
+
diff --git a/mcp_center/servers/rag/src/base/models.py b/mcp_center/servers/rag/src/base/models.py
new file mode 100644
index 00000000..4b197d44
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/models.py
@@ -0,0 +1,79 @@
+from sqlalchemy import (
+ Column, String, Integer, Text, DateTime, ForeignKey,
+ LargeBinary, Index, func
+)
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import relationship
+from datetime import datetime
+
+Base = declarative_base()
+
+
+class KnowledgeBase(Base):
+ """知识库表"""
+ __tablename__ = 'knowledge_bases'
+
+ id = Column(String, primary_key=True)
+ name = Column(String, nullable=False, unique=True)
+ chunk_size = Column(Integer, nullable=False)
+ embedding_model = Column(Text)
+ embedding_endpoint = Column(Text)
+ embedding_api_key = Column(Text)
+ created_at = Column(DateTime, default=datetime.now, server_default=func.current_timestamp())
+ updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now, server_default=func.current_timestamp())
+
+ # 关系
+ documents = relationship("Document", back_populates="knowledge_base", cascade="all, delete-orphan")
+
+ # 索引
+ __table_args__ = (
+ Index('idx_kb_name', 'name'),
+ )
+
+
+class Document(Base):
+ """文档表"""
+ __tablename__ = 'documents'
+
+ id = Column(String, primary_key=True)
+ kb_id = Column(String, ForeignKey('knowledge_bases.id', ondelete='CASCADE'), nullable=False)
+ name = Column(String, nullable=False)
+ file_path = Column(Text)
+ file_type = Column(String)
+ content = Column(Text) # 文档完整内容
+ chunk_size = Column(Integer)
+ created_at = Column(DateTime, default=datetime.now, server_default=func.current_timestamp())
+ updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now, server_default=func.current_timestamp())
+
+ # 关系
+ knowledge_base = relationship("KnowledgeBase", back_populates="documents")
+ chunks = relationship("Chunk", back_populates="document", cascade="all, delete-orphan")
+
+ # 索引
+ __table_args__ = (
+ Index('idx_doc_kb_id', 'kb_id'),
+ Index('idx_doc_name', 'name'),
+ )
+
+
+class Chunk(Base):
+ """文档分块表"""
+ __tablename__ = 'chunks'
+
+ id = Column(String, primary_key=True)
+ doc_id = Column(String, ForeignKey('documents.id', ondelete='CASCADE'), nullable=False)
+ content = Column(Text, nullable=False)
+ tokens = Column(Integer)
+ chunk_index = Column(Integer)
+ embedding = Column(LargeBinary) # 向量嵌入
+ created_at = Column(DateTime, default=datetime.now, server_default=func.current_timestamp())
+
+ # 关系
+ document = relationship("Document", back_populates="chunks")
+
+ # 索引
+ __table_args__ = (
+ Index('idx_chunk_doc_id', 'doc_id'),
+ Index('idx_chunk_index', 'chunk_index'),
+ )
+
diff --git a/mcp_center/servers/rag/src/base/parser/doc.py b/mcp_center/servers/rag/src/base/parser/doc.py
new file mode 100644
index 00000000..7f5cf90f
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/parser/doc.py
@@ -0,0 +1,61 @@
+import logging
+from typing import Optional
+from docx import Document as DocxDocument
+
+logger = logging.getLogger(__name__)
+
+
+def parse_docx(file_path: str) -> Optional[str]:
+ """
+ 解析 DOCX 文件
+ :param file_path: 文件路径
+ :return: 文件内容
+ """
+ try:
+ doc = DocxDocument(file_path)
+ if not doc:
+ logger.error("[DocParser] 无法打开docx文件")
+ return None
+
+ paragraphs = []
+ for paragraph in doc.paragraphs:
+ if paragraph.text.strip():
+ paragraphs.append(paragraph.text)
+
+ for table in doc.tables:
+ for row in table.rows:
+ for cell in row.cells:
+ if cell.text.strip():
+ paragraphs.append(cell.text)
+
+ content = '\n'.join(paragraphs)
+ return content
+ except Exception as e:
+ logger.exception(f"[DocParser] 解析DOCX文件失败: {e}")
+ return None
+
+
+def parse_doc(file_path: str) -> Optional[str]:
+ """
+ 解析 DOC 文件(旧版 Word 格式)
+ :param file_path: 文件路径
+ :return: 文件内容
+ """
+ try:
+ doc = DocxDocument(file_path)
+ paragraphs = []
+ for paragraph in doc.paragraphs:
+ if paragraph.text.strip():
+ paragraphs.append(paragraph.text)
+ for table in doc.tables:
+ for row in table.rows:
+ for cell in row.cells:
+ if cell.text.strip():
+ paragraphs.append(cell.text)
+ content = '\n'.join(paragraphs)
+ return content
+ except Exception:
+ logger.warning("[DocParser] python-docx 不支持 DOC 格式,尝试其他方法")
+ logger.warning("[DocParser] DOC 格式解析需要额外工具,当前仅支持 DOCX")
+ return None
+
diff --git a/mcp_center/servers/rag/src/base/parser/parser.py b/mcp_center/servers/rag/src/base/parser/parser.py
new file mode 100644
index 00000000..5fba0955
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/parser/parser.py
@@ -0,0 +1,59 @@
+"""
+文档解析器模块
+"""
+import logging
+from typing import Optional, Dict
+
+logger = logging.getLogger(__name__)
+
+from base.parser.txt import parse_txt
+from base.parser.doc import parse_docx, parse_doc
+from base.parser.pdf import parse_pdf
+
+_parsers: Dict[str, callable] = {}
+
+
+def register_parser(file_ext: str, parser_func: callable):
+ """
+ 注册解析器
+ :param file_ext: 文件扩展名(如 'txt', 'docx')
+ :param parser_func: 解析函数,接收 file_path 参数,返回 Optional[str]
+ """
+ _parsers[file_ext.lower()] = parser_func
+ logger.debug(f"[Parser] 注册解析器: {file_ext}")
+
+
+def parse(file_path: str) -> Optional[str]:
+ """
+ 根据文件类型自动选择解析器
+ :param file_path: 文件路径
+ :return: 文件内容
+ """
+ file_ext = file_path.lower().split('.')[-1]
+
+ if file_ext not in _parsers:
+ logger.error(f"[Parser] 不支持的文件类型: {file_ext}")
+ return None
+
+ try:
+ parser_func = _parsers[file_ext]
+ return parser_func(file_path)
+ except Exception as e:
+ logger.exception(f"[Parser] 解析文件失败: {file_path}, {e}")
+ return None
+
+
+# 注册解析器
+register_parser('txt', parse_txt)
+register_parser('docx', parse_docx)
+register_parser('doc', parse_doc)
+register_parser('pdf', parse_pdf)
+
+
+class Parser:
+ """文档解析器类"""
+
+ @staticmethod
+ def parse(file_path: str) -> Optional[str]:
+ return parse(file_path)
+
diff --git a/mcp_center/servers/rag/src/base/parser/pdf.py b/mcp_center/servers/rag/src/base/parser/pdf.py
new file mode 100644
index 00000000..a3549c4e
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/parser/pdf.py
@@ -0,0 +1,80 @@
+"""
+PDF 文件解析器
+使用 PyMuPDF (fitz) 提取 PDF 中的文本内容
+"""
+import logging
+from typing import Optional
+import fitz
+
+logger = logging.getLogger(__name__)
+
+
+def parse_pdf(file_path: str) -> Optional[str]:
+ """
+ 解析 PDF 文件,提取文本内容
+
+ :param file_path: PDF 文件路径
+ :return: 提取的文本内容,如果失败则返回 None
+ """
+ try:
+ # 打开 PDF 文件
+ pdf_doc = fitz.open(file_path)
+
+ if not pdf_doc:
+ logger.error("[PdfParser] 无法打开 PDF 文件")
+ return None
+
+ text_blocks = []
+
+ # 遍历每一页
+ for page_num in range(len(pdf_doc)):
+ page = pdf_doc.load_page(page_num)
+
+ # 获取文本块
+ blocks = page.get_text("blocks")
+
+ # 提取文本块内容
+ for block in blocks:
+ if block[6] == 0: # 确保是文本块(block[6] == 0 表示文本块)
+ text = block[4].strip() # block[4] 是文本内容
+ if text:
+ # 保存文本和位置信息用于排序
+ bbox = block[:4] # (x0, y0, x1, y1)
+ text_blocks.append({
+ 'text': text,
+ 'y0': bbox[1], # 上边界,用于排序
+ 'x0': bbox[0] # 左边界,用于排序
+ })
+
+ # 关闭 PDF 文档
+ pdf_doc.close()
+
+ if not text_blocks:
+ logger.warning("[PdfParser] PDF 文件中没有找到文本内容")
+ return None
+
+ # 按位置排序(从上到下,从左到右)
+ text_blocks.sort(key=lambda x: (x['y0'], x['x0']))
+
+ # 合并文本块,添加换行
+ paragraphs = []
+ prev_y0 = None
+
+ for block in text_blocks:
+ text = block['text']
+ y0 = block['y0']
+
+ # 如果当前块与上一个块在垂直方向上有较大距离,添加换行
+ if prev_y0 is not None and y0 - prev_y0 > 10: # 10 像素的阈值,表示新段落
+ paragraphs.append('')
+
+ paragraphs.append(text)
+ prev_y0 = y0
+
+ content = '\n'.join(paragraphs)
+ return content
+
+ except Exception as e:
+ logger.exception(f"[PdfParser] 解析 PDF 文件失败: {e}")
+ return None
+
diff --git a/mcp_center/servers/rag/src/base/parser/txt.py b/mcp_center/servers/rag/src/base/parser/txt.py
new file mode 100644
index 00000000..6ed639ff
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/parser/txt.py
@@ -0,0 +1,30 @@
+import chardet
+import logging
+from typing import Optional
+
+logger = logging.getLogger(__name__)
+
+def detect_encoding(file_path: str) -> str:
+ try:
+ with open(file_path, 'rb') as file:
+ raw_data = file.read()
+ result = chardet.detect(raw_data)
+ encoding = result['encoding']
+ if encoding is None:
+ encoding = 'utf-8'
+ return encoding
+ except Exception as e:
+ logger.exception(f"[TxtParser] 检测编码失败: {e}")
+ return 'utf-8'
+
+
+def parse_txt(file_path: str) -> Optional[str]:
+ try:
+ encoding = detect_encoding(file_path)
+ with open(file_path, 'r', encoding=encoding, errors='ignore') as file:
+ content = file.read()
+ return content
+ except Exception as e:
+ logger.exception(f"[TxtParser] 解析TXT文件失败: {e}")
+ return None
+
diff --git a/mcp_center/servers/rag/src/base/rerank.py b/mcp_center/servers/rag/src/base/rerank.py
new file mode 100644
index 00000000..1e004ed0
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/rerank.py
@@ -0,0 +1,64 @@
+import jieba
+import logging
+from typing import List, Dict, Any
+
+logger = logging.getLogger(__name__)
+
+class Rerank:
+ """Rerank 类(使用 Jaccard 相似度)"""
+
+ stopwords = set(['的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一', '一个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有', '看', '好', '自己', '这'])
+
+ @staticmethod
+ def split_words(content: str) -> List[str]:
+ try:
+ return list(jieba.cut(str(content)))
+ except Exception:
+ return []
+
+ @staticmethod
+ def cal_jaccard(str1: str, str2: str) -> float:
+ try:
+ if len(str1) == 0 and len(str2) == 0:
+ return 100.0
+
+ words1 = Rerank.split_words(str1)
+ words2 = Rerank.split_words(str2)
+
+ new_words1 = [word for word in words1 if word not in Rerank.stopwords and word.strip()]
+ new_words2 = [word for word in words2 if word not in Rerank.stopwords and word.strip()]
+
+ if len(new_words1) == 0 or len(new_words2) == 0:
+ return 0.0
+
+ set1 = set(new_words1)
+ set2 = set(new_words2)
+ intersection = len(set1.intersection(set2))
+ union = len(set1.union(set2))
+
+ if union == 0:
+ return 0.0
+
+ score = intersection / union * 100.0
+ return score
+ except Exception:
+ return 0.0
+
+ @staticmethod
+ def rerank_chunks(chunks: List[Dict[str, Any]], query: str) -> List[Dict[str, Any]]:
+ try:
+ score_chunks = []
+ for chunk in chunks:
+ content = chunk.get('content', '')
+ score = Rerank.cal_jaccard(content, query)
+ chunk['jaccard_score'] = score
+ score_chunks.append((score, chunk))
+
+ # 按 Jaccard 分数降序排序
+ score_chunks.sort(key=lambda x: x[0], reverse=True)
+ sorted_chunks = [chunk for _, chunk in score_chunks]
+
+ return sorted_chunks
+ except Exception:
+ return chunks
+
diff --git a/mcp_center/servers/rag/src/base/search/keyword.py b/mcp_center/servers/rag/src/base/search/keyword.py
new file mode 100644
index 00000000..d1994d0b
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/search/keyword.py
@@ -0,0 +1,92 @@
+"""
+关键词检索模块 - 使用 SQLAlchemy
+"""
+import logging
+from typing import List, Dict, Any, Optional
+from sqlalchemy import text
+import jieba
+
+logger = logging.getLogger(__name__)
+
+
+def _prepare_fts_query(query: str) -> str:
+ """
+ 准备 FTS5 查询
+ :param query: 原始查询文本
+ :return: FTS5 查询字符串
+ """
+ def escape_fts_word(word: str) -> str:
+ # 包含以下任意字符时,整体作为短语用双引号包裹,避免触发 FTS5 语法解析
+ # 特别是 '%' 在 FTS5 MATCH 语法中会导致 "syntax error near '%'"
+ special_chars = [
+ '"', "'", '(', ')', '*', ':', '?', '+', '-', '|', '&',
+ '{', '}', '[', ']', '^', '$', '\\', '/', '!', '~', ';',
+ ',', '.', ' ', '%'
+ ]
+ if any(char in word for char in special_chars):
+ escaped_word = word.replace('"', '""')
+ return f'"{escaped_word}"'
+ return word
+
+ try:
+ words = jieba.cut(query)
+ words = [word.strip() for word in words if word.strip()]
+ if not words:
+ return escape_fts_word(query)
+
+ escaped_words = [escape_fts_word(word) for word in words]
+ fts_query = ' OR '.join(escaped_words)
+ return fts_query
+ except Exception:
+ return escape_fts_word(query)
+
+
+def search_by_keyword(conn, query: str, top_k: int = 5, doc_ids: Optional[List[str]] = None) -> List[Dict[str, Any]]:
+ """
+ 关键词检索(FTS5,使用 jieba 对中文进行分词)
+ :param conn: 数据库连接对象(SQLAlchemy Connection)
+ :param query: 查询文本
+ :param top_k: 返回数量
+ :param doc_ids: 可选的文档ID列表,用于过滤
+ :return: chunk 列表
+ """
+ try:
+ fts_query = _prepare_fts_query(query)
+
+ params = {"fts_query": fts_query, "top_k": top_k}
+ where_clause = "WHERE chunks_fts MATCH :fts_query"
+
+ if doc_ids:
+ placeholders = ','.join([f':doc_id_{i}' for i in range(len(doc_ids))])
+ for i, doc_id in enumerate(doc_ids):
+ params[f'doc_id_{i}'] = doc_id
+ where_clause += f" AND c.doc_id IN ({placeholders})"
+
+ sql = f"""
+ SELECT c.id, c.doc_id, c.content, c.tokens, c.chunk_index,
+ d.name as doc_name,
+ chunks_fts.rank
+ FROM chunks_fts
+ JOIN chunks c ON c.id = chunks_fts.id
+ JOIN documents d ON d.id = c.doc_id
+ {where_clause}
+ ORDER BY chunks_fts.rank
+ LIMIT :top_k
+ """
+ result = conn.execute(text(sql), params)
+
+ results = []
+ for row in result:
+ results.append({
+ 'id': row.id,
+ 'doc_id': row.doc_id,
+ 'content': row.content,
+ 'tokens': row.tokens,
+ 'chunk_index': row.chunk_index,
+ 'doc_name': row.doc_name,
+ 'score': row.rank if row.rank is not None else 0.0
+ })
+ return results
+ except Exception as e:
+ logger.exception(f"[KeywordSearch] 关键词检索失败: {e}")
+ return []
diff --git a/mcp_center/servers/rag/src/base/search/vector.py b/mcp_center/servers/rag/src/base/search/vector.py
new file mode 100644
index 00000000..179423ca
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/search/vector.py
@@ -0,0 +1,67 @@
+"""
+向量检索模块 - 使用 SQLAlchemy
+"""
+import logging
+import struct
+from typing import List, Dict, Any, Optional
+from sqlalchemy import text
+
+logger = logging.getLogger(__name__)
+
+
+def search_by_vector(conn, query_vector: List[float], top_k: int = 5, doc_ids: Optional[List[str]] = None) -> List[Dict[str, Any]]:
+ """
+ 向量检索
+ :param conn: 数据库连接对象(SQLAlchemy Connection)
+ :param query_vector: 查询向量
+ :param top_k: 返回数量
+ :param doc_ids: 可选的文档ID列表,用于过滤
+ :return: chunk 列表
+ """
+ try:
+ # 检查 vec_index 表是否存在
+ result = conn.execute(text("""
+ SELECT name FROM sqlite_master
+ WHERE type='table' AND name='vec_index'
+ """))
+ if not result.fetchone():
+ return []
+
+ query_vector_bytes = struct.pack(f'{len(query_vector)}f', *query_vector)
+
+ params = {"query_vector": query_vector_bytes, "top_k": top_k}
+ where_clause = "WHERE v.embedding MATCH :query_vector AND k = :top_k"
+
+ if doc_ids:
+ placeholders = ','.join([f':doc_id_{i}' for i in range(len(doc_ids))])
+ for i, doc_id in enumerate(doc_ids):
+ params[f'doc_id_{i}'] = doc_id
+ where_clause += f" AND c.doc_id IN ({placeholders})"
+
+ sql = f"""
+ SELECT c.id, c.doc_id, c.content, c.tokens, c.chunk_index,
+ d.name as doc_name,
+ distance
+ FROM vec_index v
+ JOIN chunks c ON c.rowid = v.rowid
+ JOIN documents d ON d.id = c.doc_id
+ {where_clause}
+ ORDER BY distance
+ """
+ result = conn.execute(text(sql), params)
+
+ results = []
+ for row in result:
+ results.append({
+ 'id': row.id,
+ 'doc_id': row.doc_id,
+ 'content': row.content,
+ 'tokens': row.tokens,
+ 'chunk_index': row.chunk_index,
+ 'doc_name': row.doc_name,
+ 'score': row.distance
+ })
+ return results
+ except Exception as e:
+ logger.exception(f"[VectorSearch] 向量检索失败: {e}")
+ return []
diff --git a/mcp_center/servers/rag/src/base/search/weighted_keyword_and_vector_search.py b/mcp_center/servers/rag/src/base/search/weighted_keyword_and_vector_search.py
new file mode 100644
index 00000000..f8241514
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/search/weighted_keyword_and_vector_search.py
@@ -0,0 +1,122 @@
+import logging
+import asyncio
+from typing import List, Dict, Any, Optional
+from base.search.keyword import search_by_keyword as keyword_search
+from base.search.vector import search_by_vector as vector_search
+from base.embedding import Embedding
+from base.rerank import Rerank
+
+logger = logging.getLogger(__name__)
+
+
+async def weighted_keyword_and_vector_search(
+ conn,
+ query: str,
+ top_k: int = 5,
+ weight_keyword: float = 0.3,
+ weight_vector: float = 0.7,
+ doc_ids: Optional[List[str]] = None
+) -> List[Dict[str, Any]]:
+ """
+ 加权关键词和向量混合检索(异步)
+
+ :param conn: 数据库连接对象(SQLAlchemy Connection)
+ :param query: 查询文本
+ :param top_k: 返回数量
+ :param weight_keyword: 关键词搜索权重
+ :param weight_vector: 向量搜索权重
+ :return: 合并后的 chunk 列表
+ """
+ try:
+ # 同时进行关键词和向量搜索,每个获取 2*topk 个结果
+ keyword_chunks = []
+ vector_chunks = []
+
+ # 关键词搜索
+ try:
+ keyword_chunks = keyword_search(conn, query, 2 * top_k, doc_ids)
+ except Exception as e:
+ logger.warning(f"[WeightedSearch] 关键词检索失败: {e}")
+
+ # 向量搜索(需要 embedding 配置)
+ if Embedding.is_configured():
+ try:
+ query_vector = await Embedding.vectorize_embedding(query)
+ if query_vector:
+ vector_chunks = vector_search(conn, query_vector, 2 * top_k, doc_ids)
+ except Exception as e:
+ logger.warning(f"[WeightedSearch] 向量检索失败: {e}")
+
+ # 如果没有结果
+ if not keyword_chunks and not vector_chunks:
+ return []
+
+ # 归一化并合并结果
+ merged_chunks = {}
+
+ # 处理关键词搜索结果
+ if keyword_chunks:
+ # 归一化 rank 分数(rank 越小越好,转换为越大越好)
+ keyword_scores = [chunk.get('score', 0.0) for chunk in keyword_chunks if chunk.get('score') is not None]
+ if keyword_scores:
+ min_rank = min(keyword_scores)
+ max_rank = max(keyword_scores)
+ rank_range = max_rank - min_rank
+
+ for chunk in keyword_chunks:
+ chunk_id = chunk['id']
+ rank = chunk.get('score', 0.0)
+ # 转换为越大越好的分数(归一化到 0-1)
+ if rank_range > 0:
+ normalized_score = 1.0 - ((rank - min_rank) / rank_range)
+ else:
+ normalized_score = 1.0
+ weighted_score = normalized_score * weight_keyword
+
+ if chunk_id not in merged_chunks:
+ merged_chunks[chunk_id] = chunk.copy()
+ merged_chunks[chunk_id]['score'] = weighted_score
+ else:
+ merged_chunks[chunk_id]['score'] += weighted_score
+
+ # 处理向量搜索结果
+ if vector_chunks:
+ # 归一化 distance 分数(distance 越小越好,转换为越大越好)
+ vector_scores = [chunk.get('score', 0.0) for chunk in vector_chunks if chunk.get('score') is not None]
+ if vector_scores:
+ min_distance = min(vector_scores)
+ max_distance = max(vector_scores)
+ distance_range = max_distance - min_distance
+
+ for chunk in vector_chunks:
+ chunk_id = chunk['id']
+ distance = chunk.get('score', 0.0)
+ # 转换为越大越好的分数(归一化到 0-1)
+ if distance_range > 0:
+ normalized_score = 1.0 - ((distance - min_distance) / distance_range)
+ else:
+ normalized_score = 1.0
+ weighted_score = normalized_score * weight_vector
+
+ if chunk_id not in merged_chunks:
+ merged_chunks[chunk_id] = chunk.copy()
+ merged_chunks[chunk_id]['score'] = weighted_score
+ else:
+ merged_chunks[chunk_id]['score'] += weighted_score
+
+ # 转换为列表并按分数排序
+ merged_list = list(merged_chunks.values())
+ merged_list.sort(key=lambda x: x.get('score', 0.0), reverse=True)
+
+ # Rerank
+ reranked_chunks = Rerank.rerank_chunks(merged_list, query)
+
+ # 取前 top_k 个
+ final_chunks = reranked_chunks[:top_k]
+
+ return final_chunks
+
+ except Exception as e:
+ logger.exception(f"[WeightedSearch] 混合检索失败: {e}")
+ return []
+
diff --git a/mcp_center/servers/rag/src/base/token_tool.py b/mcp_center/servers/rag/src/base/token_tool.py
new file mode 100644
index 00000000..66bce6f1
--- /dev/null
+++ b/mcp_center/servers/rag/src/base/token_tool.py
@@ -0,0 +1,157 @@
+import tiktoken
+import logging
+import re
+import uuid
+from typing import List
+from base.config import get_token_model, get_max_tokens
+
+logger = logging.getLogger(__name__)
+
+
+class TokenTool:
+ """Token 工具类"""
+
+ _encoding_cache = {}
+
+ @staticmethod
+ def _get_encoding():
+ """
+ 获取编码器(带缓存)
+ :return: tiktoken 编码器
+ """
+ model = get_token_model()
+ if model not in TokenTool._encoding_cache:
+ try:
+ TokenTool._encoding_cache[model] = tiktoken.encoding_for_model(model)
+ except Exception:
+ TokenTool._encoding_cache[model] = tiktoken.get_encoding("cl100k_base")
+ return TokenTool._encoding_cache[model]
+
+ @staticmethod
+ def get_tokens(content: str) -> int:
+ """
+ 获取文本的 token 数量
+ :param content: 文本内容
+ :return: token 数量
+ """
+ try:
+ enc = TokenTool._get_encoding()
+ return len(enc.encode(str(content)))
+ except Exception:
+ return 0
+
+ @staticmethod
+ def get_k_tokens_words_from_content(content: str, k: int = 1024) -> str:
+ """
+ 从内容中获取 k 个 token 的文本
+ :param content: 文本内容
+ :param k: token 数量
+ :return: 截取后的文本
+ """
+ try:
+ if TokenTool.get_tokens(content) <= k:
+ return content
+
+ # 使用二分查找找到合适的截取位置
+ l = 0
+ r = len(content)
+ while l + 1 < r:
+ mid = (l + r) // 2
+ if TokenTool.get_tokens(content[:mid]) <= k:
+ l = mid
+ else:
+ r = mid
+ return content[:l]
+ except Exception:
+ return ""
+
+ @staticmethod
+ def content_to_sentences(content: str) -> List[str]:
+ """
+ 将内容分割为句子
+ :param content: 文本内容
+ :return: 句子列表
+ """
+ protected_phrases = [
+ 'e.g.', 'i.e.', 'U.S.', 'U.K.', 'A.M.', 'P.M.', 'a.m.', 'p.m.',
+ 'Inc.', 'Ltd.', 'No.', 'vs.', 'approx.', 'Dr.', 'Mr.', 'Ms.', 'Prof.',
+ ]
+
+ placeholder_map = {}
+ for phrase in protected_phrases:
+ placeholder = f"__PROTECTED_{uuid.uuid4().hex}__"
+ placeholder_map[placeholder] = phrase
+ content = content.replace(phrase, placeholder)
+
+ # 分句正则模式
+ chinese_punct = r'[。!?!?;;]'
+ right_quotes = r'["'""'】】》〕〉)\\]]'
+ pattern = re.compile(
+ rf'(?<={chinese_punct}{right_quotes})'
+ rf'|(?<={chinese_punct})(?=[^{right_quotes}])'
+ r'|(?<=[\.\?!;])(?=\s|$)'
+ )
+
+ # 分割并还原
+ sentences = []
+ for segment in pattern.split(content):
+ segment = segment.strip()
+ if not segment:
+ continue
+ for placeholder, original in placeholder_map.items():
+ segment = segment.replace(placeholder, original)
+ sentences.append(segment)
+
+ return sentences
+
+ @staticmethod
+ def split_content_to_chunks(content: str, chunk_size: int = 1024) -> List[str]:
+ """
+ 将内容切分为 chunks
+ :param content: 文本内容
+ :param chunk_size: chunk 大小(token 数)
+ :return: chunk 列表
+ """
+ try:
+ sentences = TokenTool.content_to_sentences(content)
+ chunks = []
+ current_chunk = ""
+ current_tokens = 0
+
+ for sentence in sentences:
+ sentence_tokens = TokenTool.get_tokens(sentence)
+
+ # 如果单个句子超过 chunk_size,需要进一步切分
+ if sentence_tokens > chunk_size:
+ if current_chunk:
+ chunks.append(current_chunk)
+ current_chunk = ""
+ current_tokens = 0
+
+ # 切分长句子
+ sub_content = sentence
+ while TokenTool.get_tokens(sub_content) > chunk_size:
+ sub_chunk = TokenTool.get_k_tokens_words_from_content(sub_content, chunk_size)
+ chunks.append(sub_chunk)
+ sub_content = sub_content[len(sub_chunk):]
+
+ if sub_content:
+ current_chunk = sub_content
+ current_tokens = TokenTool.get_tokens(sub_content)
+ else:
+ if current_tokens + sentence_tokens > chunk_size:
+ if current_chunk:
+ chunks.append(current_chunk)
+ current_chunk = sentence
+ current_tokens = sentence_tokens
+ else:
+ current_chunk += sentence
+ current_tokens += sentence_tokens
+
+ if current_chunk:
+ chunks.append(current_chunk)
+
+ return chunks
+ except Exception:
+ return []
+
diff --git a/mcp_center/servers/rag/src/cli.py b/mcp_center/servers/rag/src/cli.py
new file mode 100644
index 00000000..e89e0832
--- /dev/null
+++ b/mcp_center/servers/rag/src/cli.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+"""
+RAG Server CLI 工具
+用于直接调用 RAG 工具函数的命令行接口
+"""
+import os
+import sys
+
+# 从 systemd service 文件读取工作目录
+SERVICE_FILE = "/etc/systemd/system/rag.service"
+PROJECT_ROOT = "/usr/lib/euler-copilot-framework/mcp_center"
+if os.path.exists(SERVICE_FILE):
+ with open(SERVICE_FILE, "r") as f:
+ for line in f:
+ if line.strip().startswith("WorkingDirectory="):
+ PROJECT_ROOT = line.strip().split("=", 1)[1]
+ break
+
+# 添加项目根目录到 sys.path
+if PROJECT_ROOT not in sys.path:
+ sys.path.insert(0, PROJECT_ROOT)
+
+# 添加当前目录到路径
+current_dir = os.path.dirname(os.path.abspath(__file__))
+if current_dir not in sys.path:
+ sys.path.insert(0, current_dir)
+
+# 导入 CLI 模块
+from cli.parse_args import parse_args
+from cli.handle import (
+ handle_create_kb,
+ handle_delete_kb,
+ handle_list_kb,
+ handle_select_kb,
+ handle_import_doc,
+ handle_list_doc,
+ handle_delete_doc,
+ handle_update_doc,
+ handle_search,
+ handle_export_db,
+ handle_import_db
+)
+
+def main():
+ """主函数"""
+ args = parse_args()
+
+ if not args.command:
+ print("❌ 请指定命令,使用 --help 查看帮助")
+ sys.exit(1)
+
+ success = False
+
+ # 命令调度
+ if args.command == "create_kb":
+ success = handle_create_kb(args)
+ elif args.command == "delete_kb":
+ success = handle_delete_kb(args)
+ elif args.command == "list_kb":
+ success = handle_list_kb(args)
+ elif args.command == "select_kb":
+ success = handle_select_kb(args)
+ elif args.command == "import_doc":
+ success = handle_import_doc(args)
+ elif args.command == "list_doc":
+ success = handle_list_doc(args)
+ elif args.command == "delete_doc":
+ success = handle_delete_doc(args)
+ elif args.command == "update_doc":
+ success = handle_update_doc(args)
+ elif args.command == "search":
+ success = handle_search(args)
+ elif args.command == "export_db":
+ success = handle_export_db(args)
+ elif args.command == "import_db":
+ success = handle_import_db(args)
+
+ sys.exit(0 if success else 1)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/mcp_center/servers/rag/src/cli/__init__.py b/mcp_center/servers/rag/src/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/mcp_center/servers/rag/src/cli/handle.py b/mcp_center/servers/rag/src/cli/handle.py
new file mode 100644
index 00000000..2872d4fd
--- /dev/null
+++ b/mcp_center/servers/rag/src/cli/handle.py
@@ -0,0 +1,168 @@
+import os
+import sys
+import asyncio
+import json
+from typing import Dict, Any
+
+# 添加路径
+current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+if current_dir not in sys.path:
+ sys.path.insert(0, current_dir)
+
+# 添加 mcp_center 目录到路径
+mcp_center_dir = os.path.abspath(os.path.join(current_dir, '../../..'))
+if mcp_center_dir not in sys.path:
+ sys.path.insert(0, mcp_center_dir)
+
+from tool import (
+ create_knowledge_base,
+ delete_knowledge_base,
+ list_knowledge_bases,
+ select_knowledge_base,
+ import_document,
+ search,
+ list_documents,
+ delete_document,
+ update_document,
+ export_database,
+ import_database
+)
+
+def print_result(result: Dict[str, Any]):
+ """打印结果"""
+ if result.get("success"):
+ print(f"✅ {result.get('message', '操作成功')}")
+ if result.get("data"):
+ print(json.dumps(result["data"], ensure_ascii=False, indent=2))
+ else:
+ print(f"❌ {result.get('message', '操作失败')}")
+
+def handle_create_kb(args):
+ """创建知识库"""
+ if not args.kb_name or not args.chunk_size:
+ print("❌ 缺少参数:--kb_name 和 --chunk_size 必填")
+ return False
+
+ result = create_knowledge_base(
+ kb_name=args.kb_name,
+ chunk_size=args.chunk_size,
+ embedding_model=args.embedding_model,
+ embedding_endpoint=args.embedding_endpoint,
+ embedding_api_key=args.embedding_api_key
+ )
+ print_result(result)
+ return result.get("success", False)
+
+def handle_delete_kb(args):
+ """删除知识库"""
+ if not args.kb_name:
+ print("❌ 缺少参数:--kb_name 必填")
+ return False
+
+ result = delete_knowledge_base(args.kb_name)
+ print_result(result)
+ return result.get("success", False)
+
+def handle_list_kb(args):
+ """列出知识库"""
+ result = list_knowledge_bases()
+ print_result(result)
+ return result.get("success", False)
+
+def handle_select_kb(args):
+ """选择知识库"""
+ if not args.kb_name:
+ print("❌ 缺少参数:--kb_name 必填")
+ return False
+
+ result = select_knowledge_base(args.kb_name)
+ print_result(result)
+ return result.get("success", False)
+
+async def handle_import_doc_async(args):
+ """导入文档(异步)"""
+ if not args.file_paths:
+ print("❌ 缺少参数:--file_paths 必填(文件路径列表)")
+ return False
+
+ result = await import_document(
+ file_paths=args.file_paths,
+ chunk_size=args.chunk_size
+ )
+ print_result(result)
+ return result.get("success", False)
+
+def handle_import_doc(args):
+ """导入文档(同步包装)"""
+ return asyncio.run(handle_import_doc_async(args))
+
+def handle_list_doc(args):
+ """列出文档"""
+ result = list_documents()
+ print_result(result)
+ return result.get("success", False)
+
+def handle_delete_doc(args):
+ """删除文档"""
+ if not args.doc_name:
+ print("❌ 缺少参数:--doc_name 必填")
+ return False
+
+ result = delete_document(args.doc_name)
+ print_result(result)
+ return result.get("success", False)
+
+async def handle_update_doc_async(args):
+ """更新文档(异步)"""
+ if not args.doc_name or not args.chunk_size:
+ print("❌ 缺少参数:--doc_name 和 --chunk_size 必填")
+ return False
+
+ result = await update_document(
+ doc_name=args.doc_name,
+ chunk_size=args.chunk_size
+ )
+ print_result(result)
+ return result.get("success", False)
+
+def handle_update_doc(args):
+ """更新文档(同步包装)"""
+ return asyncio.run(handle_update_doc_async(args))
+
+async def handle_search_async(args):
+ """搜索(异步)"""
+ if not args.query:
+ print("❌ 缺少参数:--query 必填")
+ return False
+
+ result = await search(
+ query=args.query,
+ top_k=args.top_k
+ )
+ print_result(result)
+ return result.get("success", False)
+
+def handle_search(args):
+ """搜索(同步包装)"""
+ return asyncio.run(handle_search_async(args))
+
+def handle_export_db(args):
+ """导出数据库"""
+ if not args.export_path:
+ print("❌ 缺少参数:--export_path 必填(绝对路径)")
+ return False
+
+ result = export_database(args.export_path)
+ print_result(result)
+ return result.get("success", False)
+
+def handle_import_db(args):
+ """导入数据库"""
+ if not args.source_db_path:
+ print("❌ 缺少参数:--source_db_path 必填(绝对路径)")
+ return False
+
+ result = import_database(args.source_db_path)
+ print_result(result)
+ return result.get("success", False)
+
diff --git a/mcp_center/servers/rag/src/cli/parse_args.py b/mcp_center/servers/rag/src/cli/parse_args.py
new file mode 100644
index 00000000..ae4b3f72
--- /dev/null
+++ b/mcp_center/servers/rag/src/cli/parse_args.py
@@ -0,0 +1,58 @@
+import argparse
+
+def parse_args():
+ """解析命令行参数"""
+ parser = argparse.ArgumentParser(description="rag-server 命令行工具")
+ subparsers = parser.add_subparsers(dest="command", help="可用命令")
+
+ # 创建知识库
+ create_kb_parser = subparsers.add_parser("create_kb", help="创建知识库")
+ create_kb_parser.add_argument("--kb_name", required=True, help="知识库名称")
+ create_kb_parser.add_argument("--chunk_size", type=int, required=True, help="chunk大小(token数)")
+ create_kb_parser.add_argument("--embedding_model", help="向量化模型名称")
+ create_kb_parser.add_argument("--embedding_endpoint", help="向量化服务端点URL")
+ create_kb_parser.add_argument("--embedding_api_key", help="向量化服务API Key")
+
+ # 删除知识库
+ delete_kb_parser = subparsers.add_parser("delete_kb", help="删除知识库")
+ delete_kb_parser.add_argument("--kb_name", required=True, help="知识库名称")
+
+ # 列出知识库
+ subparsers.add_parser("list_kb", help="列出所有知识库")
+
+ # 选择知识库
+ select_kb_parser = subparsers.add_parser("select_kb", help="选择知识库")
+ select_kb_parser.add_argument("--kb_name", required=True, help="知识库名称")
+
+ # 导入文档
+ import_doc_parser = subparsers.add_parser("import_doc", help="导入文档")
+ import_doc_parser.add_argument("--file_paths", nargs="+", required=True, help="文件路径列表(绝对路径)")
+ import_doc_parser.add_argument("--chunk_size", type=int, help="chunk大小(可选,默认使用知识库的chunk_size)")
+
+ # 列出文档
+ subparsers.add_parser("list_doc", help="列出文档")
+
+ # 删除文档
+ delete_doc_parser = subparsers.add_parser("delete_doc", help="删除文档")
+ delete_doc_parser.add_argument("--doc_name", required=True, help="文档名称")
+
+ # 更新文档
+ update_doc_parser = subparsers.add_parser("update_doc", help="更新文档")
+ update_doc_parser.add_argument("--doc_name", required=True, help="文档名称")
+ update_doc_parser.add_argument("--chunk_size", type=int, required=True, help="新的chunk大小(token数)")
+
+ # 搜索
+ search_parser = subparsers.add_parser("search", help="搜索文档")
+ search_parser.add_argument("--query", required=True, help="查询文本")
+ search_parser.add_argument("--top_k", type=int, help="返回数量(可选,默认5)")
+
+ # 导出数据库
+ export_db_parser = subparsers.add_parser("export_db", help="导出数据库")
+ export_db_parser.add_argument("--export_path", required=True, help="导出路径(绝对路径)")
+
+ # 导入数据库
+ import_db_parser = subparsers.add_parser("import_db", help="导入数据库")
+ import_db_parser.add_argument("--source_db_path", required=True, help="源数据库文件路径(绝对路径)")
+
+ return parser.parse_args()
+
diff --git a/mcp_center/servers/rag/src/config.json b/mcp_center/servers/rag/src/config.json
new file mode 100644
index 00000000..776f4c25
--- /dev/null
+++ b/mcp_center/servers/rag/src/config.json
@@ -0,0 +1,48 @@
+{
+ "tools": {
+ "create_knowledge_base": {
+ "zh": "创建一个新的知识库。知识库是文档的容器,每个知识库可以有自己的chunk_size和embedding配置。创建后需要调用select_knowledge_base来选择该知识库才能使用。\n\n参数说明:\n- kb_name:知识库名称(必填,必须唯一)\n- chunk_size:chunk大小,单位token(必填,例如512、1024)\n- embedding_model:向量化模型名称(可选,例如text-embedding-ada-002)\n- embedding_endpoint:向量化服务端点URL(可选)\n- embedding_api_key:向量化服务API Key(可选)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含创建结果\n - kb_id:知识库ID\n - kb_name:知识库名称\n - chunk_size:chunk大小",
+ "en": "Create a new knowledge base. A knowledge base is a container for documents, and each knowledge base can have its own chunk_size and embedding configuration. After creation, you need to call select_knowledge_base to select this knowledge base before using it.\n\nParameters:\n- kb_name: Knowledge base name (required, must be unique)\n- chunk_size: Chunk size in tokens (required, e.g., 512, 1024)\n- embedding_model: Embedding model name (optional, e.g., text-embedding-ada-002)\n- embedding_endpoint: Embedding service endpoint URL (optional)\n- embedding_api_key: Embedding service API Key (optional)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing creation results\n - kb_id: Knowledge base ID\n - kb_name: Knowledge base name\n - chunk_size: Chunk size"
+ },
+ "delete_knowledge_base": {
+ "zh": "删除指定的知识库。不能删除当前正在使用的知识库。删除知识库会级联删除该知识库下的所有文档和chunks。\n\n参数说明:\n- kb_name:知识库名称(必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含删除结果\n - kb_name:已删除的知识库名称",
+ "en": "Delete the specified knowledge base. Cannot delete the currently active knowledge base. Deleting a knowledge base will cascade delete all documents and chunks under it.\n\nParameters:\n- kb_name: Knowledge base name (required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing deletion results\n - kb_name: Deleted knowledge base name"
+ },
+ "list_knowledge_bases": {
+ "zh": "列出所有可用的知识库。返回所有知识库的详细信息,包括当前选中的知识库。\n\n参数说明:\n无参数\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含知识库列表\n - knowledge_bases:知识库列表,每个知识库包含:\n - id:知识库ID\n - name:知识库名称\n - chunk_size:chunk大小\n - embedding_model:向量化模型\n - created_at:创建时间\n - is_current:是否为当前选中的知识库\n - count:知识库数量\n - current_kb_id:当前选中的知识库ID",
+ "en": "List all available knowledge bases. Returns detailed information about all knowledge bases, including the currently selected one.\n\nParameters:\nNo parameters\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing knowledge base list\n - knowledge_bases: List of knowledge bases, each containing:\n - id: Knowledge base ID\n - name: Knowledge base name\n - chunk_size: Chunk size\n - embedding_model: Embedding model\n - created_at: Creation time\n - is_current: Whether this is the currently selected knowledge base\n - count: Number of knowledge bases\n - current_kb_id: Currently selected knowledge base ID"
+ },
+ "select_knowledge_base": {
+ "zh": "选择一个知识库作为当前使用的知识库。选择后,后续的文档导入、查询等操作都会在该知识库中进行。\n\n参数说明:\n- kb_name:知识库名称(必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含选择结果\n - kb_id:知识库ID\n - kb_name:知识库名称\n - document_count:该知识库下的文档数量",
+ "en": "Select a knowledge base as the currently active one. After selection, subsequent operations such as document import and search will be performed in this knowledge base.\n\nParameters:\n- kb_name: Knowledge base name (required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing selection results\n - kb_id: Knowledge base ID\n - kb_name: Knowledge base name\n - document_count: Number of documents in this knowledge base"
+ },
+ "import_document": {
+ "zh": "导入文档到当前选中的知识库(支持多文件并发导入)。支持TXT、DOCX、DOC格式。文档会被解析、切分为chunks,并异步批量生成向量存储到数据库中。多个文档会并发处理,提高导入效率。如果文档名称已存在,会自动添加时间戳避免冲突。\n\n参数说明:\n- file_paths:文件路径列表(绝对路径),支持1~n个文件(必填)\n- chunk_size:chunk大小,单位token(可选,默认使用知识库的chunk_size)\n\n返回值:\n- success:布尔值,表示是否成功(只要有文件成功导入即为true)\n- message:字符串,描述操作结果(包含成功和失败的数量)\n- data:字典,包含导入结果\n - total:总文件数\n - success_count:成功导入的文件数\n - failed_count:失败的文件数\n - success_files:成功导入的文件列表,每个包含:\n - file_path:文件路径\n - doc_name:文档名称\n - chunk_count:chunk数量\n - failed_files:失败的文件列表,每个包含:\n - file_path:文件路径\n - error:错误信息",
+ "en": "Import documents into the currently selected knowledge base (supports concurrent import of multiple files). Supports TXT, DOCX, and DOC formats. Documents will be parsed, split into chunks, and vectors will be generated asynchronously in batch and stored in the database. Multiple documents are processed concurrently to improve import efficiency. If the document name already exists, a timestamp will be automatically added to avoid conflicts.\n\nParameters:\n- file_paths: List of file paths (absolute paths), supports 1~n files (required)\n- chunk_size: Chunk size in tokens (optional, defaults to knowledge base's chunk_size)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful (true if any file was successfully imported)\n- message: String, describing the operation result (includes counts of successful and failed imports)\n- data: Dictionary containing import results\n - total: Total number of files\n - success_count: Number of successfully imported files\n - failed_count: Number of failed files\n - success_files: List of successfully imported files, each containing:\n - file_path: File path\n - doc_name: Document name\n - chunk_count: Number of chunks\n - failed_files: List of failed files, each containing:\n - file_path: File path\n - error: Error message"
+ },
+ "search": {
+ "zh": "在当前选中的知识库中进行混合检索。结合关键词检索(FTS5)和向量检索(sqlite-vec),使用加权方式合并结果(关键词权重0.3,向量权重0.7),去重后使用Jaccard相似度重排序,返回最相关的top-k个结果。\n\n参数说明:\n- query:查询文本(必填)\n- top_k:返回数量(可选,默认从配置读取,通常为5)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述检索结果\n- data:字典,包含检索结果\n - chunks:chunk列表,每个chunk包含:\n - id:chunk ID\n - doc_id:文档ID\n - content:chunk内容\n - tokens:token数量\n - chunk_index:chunk索引\n - doc_name:文档名称\n - score:综合检索分数\n - count:结果数量",
+ "en": "Perform hybrid search in the currently selected knowledge base. Combines keyword search (FTS5) and vector search (sqlite-vec), merges results using weighted approach (keyword weight 0.3, vector weight 0.7), deduplicates, reranks using Jaccard similarity, and returns the top-k most relevant results.\n\nParameters:\n- query: Query text (required)\n- top_k: Number of results to return (optional, default from config, usually 5)\n\nReturn value:\n- success: Boolean, indicating whether the search was successful\n- message: String, describing the search result\n- data: Dictionary containing search results\n - chunks: List of chunks, each containing:\n - id: Chunk ID\n - doc_id: Document ID\n - content: Chunk content\n - tokens: Number of tokens\n - chunk_index: Chunk index\n - doc_name: Document name\n - score: Combined search score\n - count: Number of results"
+ },
+ "list_documents": {
+ "zh": "查看当前选中的知识库下的所有文档列表。返回文档的详细信息。\n\n参数说明:\n无参数\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含文档列表\n - documents:文档列表,每个文档包含:\n - id:文档ID\n - name:文档名称\n - file_path:文件路径\n - file_type:文件类型\n - chunk_size:chunk大小\n - created_at:创建时间\n - updated_at:更新时间\n - count:文档数量",
+ "en": "List all documents in the currently selected knowledge base. Returns detailed information about the documents.\n\nParameters:\nNo parameters\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing document list\n - documents: List of documents, each containing:\n - id: Document ID\n - name: Document name\n - file_path: File path\n - file_type: File type\n - chunk_size: Chunk size\n - created_at: Creation time\n - updated_at: Update time\n - count: Number of documents"
+ },
+ "delete_document": {
+ "zh": "删除当前选中的知识库下的指定文档。删除文档会级联删除该文档的所有chunks。\n\n参数说明:\n- doc_name:文档名称(必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含删除结果\n - doc_name:已删除的文档名称",
+ "en": "Delete the specified document from the currently selected knowledge base. Deleting a document will cascade delete all chunks of that document.\n\nParameters:\n- doc_name: Document name (required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing deletion results\n - doc_name: Deleted document name"
+ },
+ "update_document": {
+ "zh": "修改文档的chunk_size并重新解析文档。会删除原有的chunks,使用新的chunk_size重新切分文档,并异步批量生成新的向量。\n\n参数说明:\n- doc_name:文档名称(必填)\n- chunk_size:新的chunk大小,单位token(必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含修改结果\n - doc_id:文档ID\n - doc_name:文档名称\n - chunk_count:新的chunk数量\n - chunk_size:新的chunk大小",
+ "en": "Update the document's chunk_size and re-parse the document. Will delete existing chunks, re-split the document using the new chunk_size, and asynchronously generate new vectors in batch.\n\nParameters:\n- doc_name: Document name (required)\n- chunk_size: New chunk size in tokens (required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing update results\n - doc_id: Document ID\n - doc_name: Document name\n - chunk_count: New number of chunks\n - chunk_size: New chunk size"
+ },
+ "export_database": {
+ "zh": "导出整个kb.db数据库文件到指定路径。\n\n参数说明:\n- export_path:导出路径(绝对路径,必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含导出结果\n - source_path:源数据库路径\n - export_path:导出路径",
+ "en": "Export the entire kb.db database file to the specified path.\n\nParameters:\n- export_path: Export path (absolute path, required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing export results\n - source_path: Source database path\n - export_path: Export path"
+ },
+ "import_database": {
+ "zh": "导入一个.db数据库文件,将其中的内容合并到kb.db中。导入时会自动处理重名冲突,为知识库和文档名称添加时间戳。\n\n参数说明:\n- source_db_path:源数据库文件路径(绝对路径,必填)\n\n返回值:\n- success:布尔值,表示是否成功\n- message:字符串,描述操作结果\n- data:字典,包含导入结果\n - source_path:源数据库路径\n - imported_kb_count:导入的知识库数量\n - imported_doc_count:导入的文档数量",
+ "en": "Import a .db database file and merge its contents into kb.db. Import will automatically handle name conflicts by adding timestamps to knowledge base and document names.\n\nParameters:\n- source_db_path: Source database file path (absolute path, required)\n\nReturn value:\n- success: Boolean, indicating whether the operation was successful\n- message: String, describing the operation result\n- data: Dictionary containing import results\n - source_path: Source database path\n - imported_kb_count: Number of imported knowledge bases\n - imported_doc_count: Number of imported documents"
+ }
+ }
+}
diff --git a/mcp_center/servers/rag/src/rag_config.json b/mcp_center/servers/rag/src/rag_config.json
new file mode 100644
index 00000000..4bed2cec
--- /dev/null
+++ b/mcp_center/servers/rag/src/rag_config.json
@@ -0,0 +1,20 @@
+{
+ "embedding": {
+ "type": "openai",
+ "api_key": "",
+ "endpoint": "https://dashscope.aliyuncs.com/compatible-mode/v1/embeddings",
+ "model_name": "text-embedding-v4",
+ "timeout": 30,
+ "vector_dimension": 1024
+ },
+ "token": {
+ "model": "gpt-4",
+ "max_tokens": 8192,
+ "default_chunk_size": 1024
+ },
+ "search": {
+ "default_top_k": 5,
+ "max_top_k": 100
+ }
+}
+
diff --git a/mcp_center/servers/rag/src/requirements.txt b/mcp_center/servers/rag/src/requirements.txt
new file mode 100644
index 00000000..8b429535
--- /dev/null
+++ b/mcp_center/servers/rag/src/requirements.txt
@@ -0,0 +1,6 @@
+python-docx==1.2.0
+chardet==5.2.0
+jieba==0.42.1
+aiohttp==3.11.10
+sqlite-vec==0.1.6
+PyMuPDF==1.26.6
\ No newline at end of file
diff --git a/mcp_center/servers/rag/src/server.py b/mcp_center/servers/rag/src/server.py
new file mode 100644
index 00000000..e85fcd55
--- /dev/null
+++ b/mcp_center/servers/rag/src/server.py
@@ -0,0 +1,176 @@
+"""
+MCP Server for Copilot-0 Knowledge Base Management
+将 copilot-0 项目启动为 MCP 服务
+"""
+import os
+import sys
+import json
+from typing import Optional, Dict, Any, List
+from mcp.server import FastMCP
+
+# 添加当前目录到路径
+current_dir = os.path.dirname(os.path.abspath(__file__))
+if current_dir not in sys.path:
+ sys.path.append(current_dir)
+
+# 添加 mcp_center 目录到路径(用于导入配置模块)
+mcp_center_dir = os.path.abspath(os.path.join(current_dir, '../../..'))
+if mcp_center_dir not in sys.path:
+ sys.path.insert(0, mcp_center_dir)
+
+# 导入配置加载器
+from config.public.base_config_loader import LanguageEnum
+from config.private.rag.config_loader import RemoteInfoConfig as RagConfig
+
+# 导入 tool.py 中的所有函数
+from tool import (
+ create_knowledge_base,
+ delete_knowledge_base,
+ list_knowledge_bases,
+ select_knowledge_base,
+ import_document,
+ search,
+ list_documents,
+ delete_document,
+ update_document,
+ export_database,
+ import_database
+)
+
+# 加载配置文件
+config_path = os.path.join(current_dir, "config.json")
+with open(config_path, 'r', encoding='utf-8') as f:
+ tool_configs = json.load(f)["tools"]
+
+# 获取语言配置
+_config = RagConfig().get_config()
+_language = _config.public_config.language
+
+# 辅助函数:根据语言获取工具描述
+def get_tool_description(tool_name: str) -> str:
+ """根据配置的语言获取工具描述"""
+ tool_desc = tool_configs.get(tool_name, {})
+ if _language == LanguageEnum.ZH:
+ return tool_desc.get("zh", tool_desc.get("en", ""))
+ else:
+ return tool_desc.get("en", tool_desc.get("zh", ""))
+
+# 创建 MCP 服务器
+_config = RagConfig().get_config()
+port = _config.private_config.port # 从配置读取端口 12311
+host = "0.0.0.0" # 或从配置读取
+
+mcp = FastMCP("Copilot-0 Knowledge Base MCP Server", host=host, port=port)
+
+# 注册同步函数
+@mcp.tool(
+ name="create_knowledge_base",
+ description=get_tool_description("create_knowledge_base")
+)
+def mcp_create_knowledge_base(
+ kb_name: str,
+ chunk_size: int,
+ embedding_model: Optional[str] = None,
+ embedding_endpoint: Optional[str] = None,
+ embedding_api_key: Optional[str] = None
+) -> Dict[str, Any]:
+ """创建知识库"""
+ return create_knowledge_base(kb_name, chunk_size, embedding_model, embedding_endpoint, embedding_api_key)
+
+
+@mcp.tool(
+ name="delete_knowledge_base",
+ description=get_tool_description("delete_knowledge_base")
+)
+def mcp_delete_knowledge_base(kb_name: str) -> Dict[str, Any]:
+ """删除知识库"""
+ return delete_knowledge_base(kb_name)
+
+
+@mcp.tool(
+ name="list_knowledge_bases",
+ description=get_tool_description("list_knowledge_bases")
+)
+def mcp_list_knowledge_bases() -> Dict[str, Any]:
+ """列出所有知识库"""
+ return list_knowledge_bases()
+
+
+@mcp.tool(
+ name="select_knowledge_base",
+ description=get_tool_description("select_knowledge_base")
+)
+def mcp_select_knowledge_base(kb_name: str) -> Dict[str, Any]:
+ """选择知识库"""
+ return select_knowledge_base(kb_name)
+
+
+@mcp.tool(
+ name="list_documents",
+ description=get_tool_description("list_documents")
+)
+def mcp_list_documents() -> Dict[str, Any]:
+ """列出文档"""
+ return list_documents()
+
+
+@mcp.tool(
+ name="delete_document",
+ description=get_tool_description("delete_document")
+)
+def mcp_delete_document(doc_name: str) -> Dict[str, Any]:
+ """删除文档"""
+ return delete_document(doc_name)
+
+
+@mcp.tool(
+ name="export_database",
+ description=get_tool_description("export_database")
+)
+def mcp_export_database(export_path: str) -> Dict[str, Any]:
+ """导出数据库"""
+ return export_database(export_path)
+
+
+@mcp.tool(
+ name="import_database",
+ description=get_tool_description("import_database")
+)
+def mcp_import_database(source_db_path: str) -> Dict[str, Any]:
+ """导入数据库"""
+ return import_database(source_db_path)
+
+
+# 注册异步函数
+@mcp.tool(
+ name="import_document",
+ description=get_tool_description("import_document")
+)
+async def mcp_import_document(file_paths: List[str], chunk_size: Optional[int] = None) -> Dict[str, Any]:
+ """导入文档(异步,支持多文件并发导入)"""
+ return await import_document(file_paths, chunk_size)
+
+
+@mcp.tool(
+ name="search",
+ description=get_tool_description("search")
+)
+async def mcp_search(query: str, top_k: Optional[int] = None) -> Dict[str, Any]:
+ """搜索(异步)"""
+ return await search(query, top_k)
+
+
+@mcp.tool(
+ name="update_document",
+ description=get_tool_description("update_document")
+)
+async def mcp_update_document(doc_name: str, chunk_size: int) -> Dict[str, Any]:
+ """更新文档(异步)"""
+ return await update_document(doc_name, chunk_size)
+
+
+if __name__ == "__main__":
+ # 启动 MCP 服务器
+ # 使用 stdio transport,这是 MCP 工具的标准方式
+ mcp.run(transport='sse')
+
diff --git a/mcp_center/servers/rag/src/tool.py b/mcp_center/servers/rag/src/tool.py
new file mode 100644
index 00000000..87243315
--- /dev/null
+++ b/mcp_center/servers/rag/src/tool.py
@@ -0,0 +1,632 @@
+import os
+import sys
+import uuid
+import shutil
+import logging
+import asyncio
+import json
+from typing import Optional, Dict, Any, List
+
+current_dir = os.path.dirname(os.path.abspath(__file__))
+if current_dir not in sys.path:
+ sys.path.append(current_dir)
+
+from base.manager.database_manager import Database
+from base.manager.document_manager import DocumentManager, import_document as _import_document, update_document as _update_document
+from base.config import get_default_top_k
+from base.models import KnowledgeBase
+from base.search.weighted_keyword_and_vector_search import weighted_keyword_and_vector_search
+
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s [%(levelname)s] %(filename)s:%(lineno)d - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+_db_instance: Optional[Database] = None
+_db_path = os.path.join(current_dir, "database", "kb.db")
+_current_kb_id: Optional[str] = None
+_state_file = os.path.join(current_dir, "database", "state.json")
+
+
+def _load_state() -> None:
+ """从状态文件加载当前知识库ID(用于在不同进程间共享选择状态)"""
+ global _current_kb_id
+ try:
+ if os.path.exists(_state_file):
+ with open(_state_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+ _current_kb_id = data.get("current_kb_id")
+ except Exception as e:
+ logger.warning(f"[state] 加载当前知识库状态失败: {e}")
+
+
+def _save_state() -> None:
+ """将当前知识库ID写入状态文件"""
+ try:
+ state_dir = os.path.dirname(_state_file)
+ if not os.path.exists(state_dir):
+ os.makedirs(state_dir, exist_ok=True)
+ with open(_state_file, "w", encoding="utf-8") as f:
+ json.dump({"current_kb_id": _current_kb_id}, f, ensure_ascii=False)
+ except Exception as e:
+ logger.warning(f"[state] 保存当前知识库状态失败: {e}")
+
+
+# 模块导入时尝试加载之前保存的当前知识库状态
+_load_state()
+
+
+def _get_db() -> Database:
+ """获取数据库实例(固定使用kb.db)"""
+ global _db_instance
+ if _db_instance is None:
+ db_dir = os.path.dirname(_db_path)
+ if not os.path.exists(db_dir):
+ os.makedirs(db_dir, exist_ok=True)
+ _db_instance = Database(_db_path)
+ return _db_instance
+
+
+def _ensure_active_kb(result: Dict[str, Any]) -> Optional[str]:
+ """确保已选择知识库"""
+ if not _current_kb_id:
+ result["message"] = "请先选择知识库"
+ return None
+ return _current_kb_id
+
+
+def create_knowledge_base(
+ kb_name: str,
+ chunk_size: int,
+ embedding_model: Optional[str] = None,
+ embedding_endpoint: Optional[str] = None,
+ embedding_api_key: Optional[str] = None
+) -> Dict[str, Any]:
+ """
+ 新增知识库
+
+ :param kb_name: 知识库名称
+ :param chunk_size: chunk 大小(token 数)
+ :param embedding_model: 向量化模型名称(可选)
+ :param embedding_endpoint: 向量化服务端点(可选)
+ :param embedding_api_key: 向量化服务 API Key(可选)
+ :return: 创建结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ db = _get_db()
+ session = db.get_session()
+ try:
+ # 检查知识库名称是否已存在
+ existing_kb = db.get_knowledge_base(kb_name)
+ if existing_kb:
+ result["message"] = f"知识库 '{kb_name}' 已存在"
+ return result
+
+ kb_id = str(uuid.uuid4())
+ if db.add_knowledge_base(kb_id, kb_name, chunk_size,
+ embedding_model, embedding_endpoint, embedding_api_key):
+ result["success"] = True
+ result["message"] = f"成功创建知识库: {kb_name}"
+ result["data"] = {
+ "kb_id": kb_id,
+ "kb_name": kb_name,
+ "chunk_size": chunk_size
+ }
+ else:
+ result["message"] = "创建知识库失败"
+ finally:
+ session.close()
+ except Exception as e:
+ logger.exception(f"[create_knowledge_base] 创建知识库失败: {e}")
+ result["message"] = "创建知识库失败"
+
+ return result
+
+
+def delete_knowledge_base(kb_name: str) -> Dict[str, Any]:
+ """
+ 删除知识库
+
+ :param kb_name: 知识库名称
+ :return: 删除结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ db = _get_db()
+ kb = db.get_knowledge_base(kb_name)
+ if not kb:
+ result["message"] = f"知识库 '{kb_name}' 不存在"
+ return result
+
+ # 检查是否是当前使用的知识库
+ global _current_kb_id
+ if _current_kb_id == kb.id:
+ result["message"] = "不能删除当前正在使用的知识库"
+ return result
+
+ if db.delete_knowledge_base(kb.id):
+ result["success"] = True
+ result["message"] = f"成功删除知识库: {kb_name}"
+ result["data"] = {"kb_name": kb_name}
+ else:
+ result["message"] = "删除知识库失败"
+ except Exception as e:
+ logger.exception(f"[delete_knowledge_base] 删除知识库失败: {e}")
+ result["message"] = "删除知识库失败"
+
+ return result
+
+
+def list_knowledge_bases() -> Dict[str, Any]:
+ """
+ 查看知识库列表
+
+ :return: 知识库列表
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ db = _get_db()
+ kbs = db.list_knowledge_bases()
+
+ knowledge_bases = []
+ global _current_kb_id
+ for kb in kbs:
+ knowledge_bases.append({
+ "id": kb.id,
+ "name": kb.name,
+ "chunk_size": kb.chunk_size,
+ "embedding_model": kb.embedding_model,
+ "created_at": kb.created_at.isoformat() if kb.created_at else None,
+ "is_current": _current_kb_id == kb.id
+ })
+
+ result["success"] = True
+ result["message"] = f"找到 {len(knowledge_bases)} 个知识库"
+ result["data"] = {
+ "knowledge_bases": knowledge_bases,
+ "count": len(knowledge_bases),
+ "current_kb_id": _current_kb_id
+ }
+ except Exception as e:
+ logger.exception(f"[list_knowledge_bases] 获取知识库列表失败: {e}")
+ result["message"] = "获取知识库列表失败"
+
+ return result
+
+
+def select_knowledge_base(kb_name: str) -> Dict[str, Any]:
+ """
+ 选择知识库
+
+ :param kb_name: 知识库名称
+ :return: 选择结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ db = _get_db()
+ kb = db.get_knowledge_base(kb_name)
+ if not kb:
+ result["message"] = f"知识库 '{kb_name}' 不存在"
+ return result
+
+ global _current_kb_id
+ _current_kb_id = kb.id
+ _save_state()
+
+ session = db.get_session()
+ try:
+ manager = DocumentManager(session)
+ docs = manager.list_documents_by_kb(kb.id)
+ doc_count = len(docs)
+ finally:
+ session.close()
+
+ result["success"] = True
+ result["message"] = f"成功选择知识库,共 {doc_count} 个文档"
+ result["data"] = {
+ "kb_id": kb.id,
+ "kb_name": kb.name,
+ "document_count": doc_count
+ }
+ except Exception as e:
+ logger.exception(f"[select_knowledge_base] 选择知识库失败: {e}")
+ result["message"] = "选择知识库失败"
+
+ return result
+
+
+async def import_document(file_paths: List[str], chunk_size: Optional[int] = None) -> Dict[str, Any]:
+ """
+ 上传文档到当前知识库(异步,支持多文件并发导入)
+
+ :param file_paths: 文件路径列表(绝对路径),支持1~n个文件
+ :param chunk_size: chunk 大小(token 数,可选,默认使用知识库的chunk_size)
+ :return: 导入结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ kb_id = _ensure_active_kb(result)
+ if not kb_id:
+ return result
+
+ if not file_paths:
+ result["message"] = "文件路径列表为空"
+ return result
+
+ # 验证文件路径是否存在
+ invalid_paths = [path for path in file_paths if not os.path.exists(path)]
+ if invalid_paths:
+ result["message"] = f"以下文件路径不存在: {', '.join(invalid_paths)}"
+ return result
+
+ db = _get_db()
+ # 先获取知识库信息
+ session = db.get_session()
+ try:
+ kb = session.query(KnowledgeBase).filter_by(id=kb_id).first()
+ if not kb:
+ result["message"] = "知识库不存在"
+ return result
+
+ if chunk_size is None:
+ chunk_size = kb.chunk_size
+ finally:
+ session.close()
+
+ # 并发处理多个文件,每个文件使用独立的 session
+ async def import_single_file(file_path: str):
+ """为单个文件创建独立的 session 并导入"""
+ file_session = db.get_session()
+ try:
+ return await _import_document(file_session, kb_id, file_path, chunk_size)
+ finally:
+ file_session.close()
+
+ tasks = [
+ import_single_file(file_path)
+ for file_path in file_paths
+ ]
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+
+ # 统计结果
+ success_count = 0
+ failed_count = 0
+ success_files = []
+ failed_files = []
+
+ for i, res in enumerate(results):
+ file_path = file_paths[i]
+ if isinstance(res, Exception):
+ failed_count += 1
+ failed_files.append({
+ "file_path": file_path,
+ "error": str(res)
+ })
+ logger.exception(f"[import_document] 导入文件失败: {file_path}, 错误: {res}")
+ else:
+ success, message, data = res
+ if success:
+ success_count += 1
+ success_files.append({
+ "file_path": file_path,
+ "doc_name": data.get("doc_name") if data else os.path.basename(file_path),
+ "chunk_count": data.get("chunk_count", 0) if data else 0
+ })
+ else:
+ failed_count += 1
+ failed_files.append({
+ "file_path": file_path,
+ "error": message
+ })
+
+ result["success"] = success_count > 0
+ result["message"] = f"成功导入 {success_count} 个文档,失败 {failed_count} 个"
+ result["data"] = {
+ "total": len(file_paths),
+ "success_count": success_count,
+ "failed_count": failed_count,
+ "success_files": success_files,
+ "failed_files": failed_files
+ }
+ except Exception as e:
+ logger.exception(f"[import_document] 导入文档失败: {e}")
+ result["message"] = f"导入文档失败: {str(e)}"
+
+ return result
+
+
+async def search(query: str, top_k: Optional[int] = None) -> Dict[str, Any]:
+ """
+ 在当前知识库中查询(异步)
+
+ :param query: 查询文本
+ :param top_k: 返回数量(可选,默认从配置读取)
+ :return: 检索结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ if top_k is None:
+ top_k = get_default_top_k()
+
+ kb_id = _ensure_active_kb(result)
+ if not kb_id:
+ return result
+
+ weight_keyword = 0.3
+ weight_vector = 0.7
+
+ try:
+ db = _get_db()
+ session = db.get_session()
+ try:
+ # 获取当前知识库的所有文档ID
+ manager = DocumentManager(session)
+ docs = manager.list_documents_by_kb(kb_id)
+ doc_ids = [doc.id for doc in docs]
+
+ if not doc_ids:
+ result["message"] = "当前知识库中没有文档"
+ result["data"] = {"chunks": []}
+ return result
+
+ conn = session.connection()
+ chunks = await weighted_keyword_and_vector_search(
+ conn, query, top_k, weight_keyword, weight_vector, doc_ids
+ )
+ finally:
+ session.close()
+
+ if not chunks:
+ result["message"] = "未找到相关结果"
+ result["data"] = {"chunks": []}
+ return result
+
+ result["success"] = True
+ result["message"] = f"找到 {len(chunks)} 个相关结果"
+ result["data"] = {
+ "chunks": chunks,
+ "count": len(chunks)
+ }
+ except Exception as e:
+ logger.exception(f"[search] 搜索失败: {e}")
+ result["message"] = "搜索失败"
+
+ return result
+
+
+def list_documents() -> Dict[str, Any]:
+ """
+ 查看当前知识库下的文档列表
+
+ :return: 文档列表
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ kb_id = _ensure_active_kb(result)
+ if not kb_id:
+ return result
+
+ db = _get_db()
+ session = db.get_session()
+ try:
+ manager = DocumentManager(session)
+ docs = manager.list_documents_by_kb(kb_id)
+ finally:
+ session.close()
+
+ documents = []
+ for doc in docs:
+ documents.append({
+ "id": doc.id,
+ "name": doc.name,
+ "file_path": doc.file_path,
+ "file_type": doc.file_type,
+ "chunk_size": doc.chunk_size,
+ "created_at": doc.created_at.isoformat() if doc.created_at else None,
+ "updated_at": doc.updated_at.isoformat() if doc.updated_at else None
+ })
+
+ result["success"] = True
+ result["message"] = f"找到 {len(documents)} 个文档"
+ result["data"] = {
+ "documents": documents,
+ "count": len(documents)
+ }
+ except Exception as e:
+ logger.exception(f"[list_documents] 获取文档列表失败: {e}")
+ result["message"] = "获取文档列表失败"
+
+ return result
+
+
+def delete_document(doc_name: str) -> Dict[str, Any]:
+ """
+ 删除当前知识库下的文档
+
+ :param doc_name: 文档名称
+ :return: 删除结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ kb_id = _ensure_active_kb(result)
+ if not kb_id:
+ return result
+
+ db = _get_db()
+ session = db.get_session()
+ try:
+ manager = DocumentManager(session)
+ if manager.delete_document(kb_id, doc_name):
+ result["success"] = True
+ result["message"] = f"成功删除文档: {doc_name}"
+ result["data"] = {"doc_name": doc_name}
+ else:
+ result["message"] = f"文档 '{doc_name}' 不存在或删除失败"
+ finally:
+ session.close()
+ except Exception as e:
+ logger.exception(f"[delete_document] 删除文档失败: {e}")
+ result["message"] = "删除文档失败"
+
+ return result
+
+
+async def update_document(doc_name: str, chunk_size: int) -> Dict[str, Any]:
+ """
+ 修改文档的chunk_size并重新解析(异步)
+
+ :param doc_name: 文档名称
+ :param chunk_size: 新的chunk大小
+ :return: 修改结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ kb_id = _ensure_active_kb(result)
+ if not kb_id:
+ return result
+
+ db = _get_db()
+ session = db.get_session()
+ try:
+ success, message, data = await _update_document(session, kb_id, doc_name, chunk_size)
+ result["success"] = success
+ result["message"] = message
+ result["data"] = data or {}
+ finally:
+ session.close()
+ except Exception as e:
+ logger.exception(f"[update_document] 修改文档失败: {e}")
+ result["message"] = "修改文档失败"
+
+ return result
+
+
+def export_database(export_path: str) -> Dict[str, Any]:
+ """
+ 导出整个kb.db数据库文件
+
+ :param export_path: 导出路径(绝对路径)
+ :return: 导出结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ if not os.path.exists(_db_path):
+ result["message"] = "数据库文件不存在"
+ return result
+
+ if not export_path:
+ result["message"] = "导出路径不能为空"
+ return result
+
+ # 确保导出路径以 .db 结尾
+ if not export_path.endswith(('.db', '.sqlite', '.sqlite3')):
+ export_path += '.db'
+
+ # 确保目标目录存在
+ export_dir = os.path.dirname(export_path)
+ if export_dir and not os.path.exists(export_dir):
+ os.makedirs(export_dir, exist_ok=True)
+
+ shutil.copy2(_db_path, export_path)
+
+ result["success"] = True
+ result["message"] = f"成功导出数据库到: {export_path}"
+ result["data"] = {
+ "source_path": _db_path,
+ "export_path": export_path
+ }
+ except Exception as e:
+ logger.exception(f"[export_database] 导出数据库失败: {e}")
+ result["message"] = f"导出数据库失败: {str(e)}"
+
+ return result
+
+
+def import_database(source_db_path: str) -> Dict[str, Any]:
+ """
+ 导入一个.db数据库文件,将其中的内容合并到kb.db中
+
+ :param source_db_path: 源数据库文件路径(绝对路径)
+ :return: 导入结果
+ """
+ result = {
+ "success": False,
+ "message": "",
+ "data": {}
+ }
+
+ try:
+ if not source_db_path:
+ result["message"] = "源数据库路径不能为空"
+ return result
+
+ if not os.path.exists(source_db_path):
+ result["message"] = f"源数据库文件不存在: {source_db_path}"
+ return result
+
+ db = _get_db()
+ imported_kb_count, imported_doc_count = db.import_database(source_db_path)
+
+ result["success"] = True
+ result["message"] = f"成功导入,共 {imported_kb_count} 个知识库,{imported_doc_count} 个文档"
+ result["data"] = {
+ "source_path": source_db_path,
+ "imported_kb_count": imported_kb_count,
+ "imported_doc_count": imported_doc_count
+ }
+ except Exception as e:
+ logger.exception(f"[import_database] 导入数据库失败: {e}")
+ result["message"] = f"导入数据库失败: {str(e)}"
+
+ return result
diff --git a/mcp_center/service/rag.service b/mcp_center/service/rag.service
new file mode 100644
index 00000000..f372512c
--- /dev/null
+++ b/mcp_center/service/rag.service
@@ -0,0 +1,20 @@
+[Unit]
+Description=Rag Server for MCP Center
+After=network.target
+
+[Service]
+User=root
+WorkingDirectory=/usr/lib/euler-copilot-framework/mcp_center
+# 关键:添加 PYTHONPATH,将工作目录加入模块搜索路径
+Environment=PYTHONUNBUFFERED=1
+Environment=PYTHONPATH=/usr/lib/euler-copilot-framework/mcp_center
+ExecStart=/usr/bin/python3 servers/rag/src/server.py
+Restart=always
+RestartSec=5
+KillMode=control-group
+KillSignal=SIGTERM
+TimeoutStopSec=30
+SuccessExitStatus=143
+
+[Install]
+WantedBy=multi-user.target
--
Gitee