diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..e69de29 diff --git a/README.md b/README.md index d6d8a73..b78608d 100644 --- a/README.md +++ b/README.md @@ -83,15 +83,14 @@ uv --directory . run qiniu-mcp-server --transport sse --port 8000 ## 开发 扩展功能,首先在 core 目录下新增一个业务目录(eg: 存储 -> storage),在此业务目录下完成功能拓展。 -在业务目录下新建一个 `loader.py` 文件,在此文件中定义 load 函数用于注册业务工具或者资源,最后在 `core` 目录下的 `__init__.py` +在业务包目录下的 `__init__.py` 文件中定义 load 函数用于注册业务工具或者资源,最后在 `core` 目录下的 `__init__.py` 中调用此 load 函数完成工具和资源的注册。 ```shell core ├── __init__.py # 各个业务工具或者资源加载 └── storage # 存储业务目录 - ├── __init__.py - ├── loader.py # 加载存储工具或者资源 + ├── __init__.py # 加载存储工具或者资源 ├── resource.py # 存储资源扩展 ├── storage.py # 存储工具类 └── tools.py # 存储工具扩展 @@ -139,6 +138,8 @@ npx @modelcontextprotocol/inspector uv --directory . run qiniu-mcp-server - 列举 qiniu 中所有的 bucket - 列举 qiniu 中 xxx bucket 的文件 - 读取 qiniu xxx bucket 中 yyy 的文件内容 + - 对 qiniu xxx bucket 中 yyy 的图片切个宽200像素的圆角 + - 刷新下 qiniu 的这个 cdn 链接:https://developer.qiniu.com/test.txt diff --git a/mcp_server/core/__init__.py b/mcp_server/core/__init__.py index e294ce4..e7dc170 100644 --- a/mcp_server/core/__init__.py +++ b/mcp_server/core/__init__.py @@ -2,15 +2,19 @@ from .storage import load as load_storage from .media_processing import load as load_media_processing from .cdn import load as load_cdn +from .version import load as load_version def load(): # 加载配置 cfg = config.load_config() + # 版本 + load_version(cfg) # 存储业务 load_storage(cfg) # CDN load_cdn(cfg) # 智能多媒体 load_media_processing(cfg) + diff --git a/mcp_server/core/storage/storage.py b/mcp_server/core/storage/storage.py index 842e1c5..88d1697 100644 --- a/mcp_server/core/storage/storage.py +++ b/mcp_server/core/storage/storage.py @@ -35,8 +35,8 @@ def __init__(self, cfg: config.Config = None): self.bucket_manager = qiniu.BucketManager(self.auth, preferred_scheme="https") def get_object_url( - self, bucket: str, key: str, disable_ssl: bool = False, expires: int = 3600 - ) -> list[str]: + self, bucket: str, key: str, disable_ssl: bool = False, expires: int = 3600 + ) -> list[dict[str:Any]]: """ 获取对象 :param disable_ssl: @@ -47,7 +47,8 @@ def get_object_url( 返回对象信息 """ # 获取下载域名 - domains_list, domain_response = self.bucket_manager.bucket_domain(bucket) + domains_getter = getattr(self.bucket_manager, "_BucketManager__uc_do_with_retrier") + domains_list, domain_response = domains_getter('/v3/domains?tbl={0}'.format(bucket)) if domain_response.status_code != 200: raise Exception( f"get bucket domain error:{domain_response.exception} reqId:{domain_response.req_id}" @@ -59,7 +60,21 @@ def get_object_url( ) http_schema = "https" if not disable_ssl else "http" - object_public_urls = {f"{http_schema}://{url}/{key}" for url in domains_list} + object_public_urls = [] + for domain in domains_list: + # 被冻结 + freeze_types = domain.get("freeze_types") + if freeze_types is not None: + continue + + domain_url = domain.get("domain") + if domain_url is None: + continue + + object_public_urls.append({ + "object_url": f"{http_schema}://{domain_url}/{key}", + "domain_type": "cdn" if domain.get("domaintype") is None or domain.get("domaintype") == 0 else "origin" + }) object_urls = [] bucket_info, bucket_info_response = self.bucket_manager.bucket_info(bucket) @@ -68,11 +83,15 @@ def get_object_url( f"get bucket domain error:{bucket_info_response.exception} reqId:{bucket_info_response.req_id}" ) if bucket_info["private"] != 0: - for url in object_public_urls: - object_urls.append(self.auth.private_download_url(url, expires=expires)) + for url_info in object_public_urls: + public_url = url_info.get("object_url") + if public_url is None: + continue + url_info["object_url"] = self.auth.private_download_url(public_url, expires=expires) + object_urls.append(url_info) else: - for url in object_public_urls: - object_urls.append(url) + for url_info in object_public_urls: + object_urls.append(url_info) return object_urls async def list_buckets(self, prefix: Optional[str] = None) -> List[dict]: @@ -82,11 +101,11 @@ async def list_buckets(self, prefix: Optional[str] = None) -> List[dict]: max_buckets = 50 async with self.s3_session.client( - "s3", - aws_access_key_id=self.config.access_key, - aws_secret_access_key=self.config.secret_key, - endpoint_url=self.config.endpoint_url, - region_name=self.config.region_name, + "s3", + aws_access_key_id=self.config.access_key, + aws_secret_access_key=self.config.secret_key, + endpoint_url=self.config.endpoint_url, + region_name=self.config.region_name, ) as s3: if self.config.buckets: # If buckets are configured, only return those @@ -116,7 +135,7 @@ async def list_buckets(self, prefix: Optional[str] = None) -> List[dict]: return buckets[:max_buckets] async def list_objects( - self, bucket: str, prefix: str = "", max_keys: int = 20, start_after: str = "" + self, bucket: str, prefix: str = "", max_keys: int = 20, start_after: str = "" ) -> List[dict]: """ List objects in a specific bucket using async client with pagination @@ -138,11 +157,11 @@ async def list_objects( max_keys = 100 async with self.s3_session.client( - "s3", - aws_access_key_id=self.config.access_key, - aws_secret_access_key=self.config.secret_key, - endpoint_url=self.config.endpoint_url, - region_name=self.config.region_name, + "s3", + aws_access_key_id=self.config.access_key, + aws_secret_access_key=self.config.secret_key, + endpoint_url=self.config.endpoint_url, + region_name=self.config.region_name, ) as s3: response = await s3.list_objects_v2( Bucket=bucket, @@ -153,7 +172,7 @@ async def list_objects( return response.get("Contents", []) async def get_object( - self, bucket: str, key: str, max_retries: int = 3 + self, bucket: str, key: str, max_retries: int = 3 ) -> Dict[str, Any]: """ Get object from S3 using streaming to handle large files and PDFs reliably. @@ -169,12 +188,12 @@ async def get_object( while attempt < max_retries: try: async with self.s3_session.client( - "s3", - aws_access_key_id=self.config.access_key, - aws_secret_access_key=self.config.secret_key, - endpoint_url=self.config.endpoint_url, - region_name=self.config.region_name, - config=self.s3_config, + "s3", + aws_access_key_id=self.config.access_key, + aws_secret_access_key=self.config.secret_key, + endpoint_url=self.config.endpoint_url, + region_name=self.config.region_name, + config=self.s3_config, ) as s3: # Get the object and its stream response = await s3.get_object(Bucket=bucket, Key=key) @@ -196,7 +215,7 @@ async def get_object( attempt += 1 if attempt < max_retries: - wait_time = 2**attempt + wait_time = 2 ** attempt logger.warning( f"Attempt {attempt} failed, retrying in {wait_time} seconds: {str(e)}" ) diff --git a/mcp_server/core/storage/tools.py b/mcp_server/core/storage/tools.py index 4ce88b3..a441f8a 100644 --- a/mcp_server/core/storage/tools.py +++ b/mcp_server/core/storage/tools.py @@ -17,7 +17,7 @@ def __init__(self, storage: StorageService): @tools.tool_meta( types.Tool( - name="ListBuckets", # https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html + name="ListBuckets", description="Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.", inputSchema={ "type": "object", @@ -37,7 +37,7 @@ async def list_buckets(self, **kwargs) -> list[types.TextContent]: @tools.tool_meta( types.Tool( - name="ListObjects", # https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html + name="ListObjects", description="Each request will return some or all (up to 100) objects in the bucket. You can use request parameters as selection criteria to return some objects in the bucket. If you want to continue listing, set start_after to the key of the last file in the last listing result so that you can list new content. To get a list of buckets, see ListBuckets.", inputSchema={ "type": "object", @@ -69,7 +69,7 @@ async def list_objects(self, **kwargs) -> list[types.TextContent]: @tools.tool_meta( types.Tool( - name="GetObject", # https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html + name="GetObject", description="Retrieves an object from Amazon S3. In the GetObject request, specify the full key name for the object. General purpose buckets - Both the virtual-hosted-style requests and the path-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg, specify the object key name as /photos/2006/February/sample.jpg. For a path-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. Directory buckets - Only virtual-hosted-style requests are supported. For a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg in the bucket named examplebucket--use1-az5--x-s3, specify the object key name as /photos/2006/February/sample.jpg. Also, when you make requests to this API operation, your requests are sent to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported.", inputSchema={ "type": "object", @@ -109,8 +109,8 @@ async def get_object(self, **kwargs) -> list[ImageContent] | list[TextContent]: @tools.tool_meta( types.Tool( - name="GetObjectURL", # https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html - description="获取文件下载的 URL", + name="GetObjectURL", + description="Get the file download URL, and note that the Bucket where the file is located must be bound to a domain name. If using Qiniu's test domain, HTTPS access will not be available, and users need to make adjustments for this themselves.", inputSchema={ "type": "object", "properties": { @@ -124,11 +124,11 @@ async def get_object(self, **kwargs) -> list[ImageContent] | list[TextContent]: }, "disable_ssl": { "type": "boolean", - "description": "是否禁用 SSL,默认不禁用使用 HTTP 协议,禁用后使用 HTTP 协议", + "description": "Whether to disable SSL. By default, it is not disabled (HTTP protocol is used). If disabled, the HTTP protocol will be used.", }, "expires": { "type": "integer", - "description": "下载链接中 Token 有效期,单位是秒;当空间是私有空间时,访问文件对象时需要对文件链接签名 Token,公有空间不签 Token。", + "description": "Token expiration time (in seconds) for download links. When the bucket is private, a signed Token is required to access file objects. Public buckets do not require Token signing.", }, }, "required": ["bucket", "key"], diff --git a/mcp_server/core/version/__init__.py b/mcp_server/core/version/__init__.py new file mode 100644 index 0000000..aaebce5 --- /dev/null +++ b/mcp_server/core/version/__init__.py @@ -0,0 +1,9 @@ +from .tools import register_tools +from ...config import config + + +def load(cfg: config.Config): + register_tools() + + +__all__ = ["load"] diff --git a/mcp_server/core/version/tools.py b/mcp_server/core/version/tools.py new file mode 100644 index 0000000..784070d --- /dev/null +++ b/mcp_server/core/version/tools.py @@ -0,0 +1,31 @@ + +from mcp import types + +from . import version +from ...tools import tools + + +class _ToolImpl: + def __init__(self): + pass + + @tools.tool_meta( + types.Tool( + name="Version", + description="qiniu mcp server version info.", + inputSchema={ + "type": "object", + "required": [], + } + ) + ) + def version(self, **kwargs) -> list[types.TextContent]: + return [types.TextContent(type="text", text=version.VERSION)] + +def register_tools(): + tool_impl = _ToolImpl() + tools.auto_register_tools( + [ + tool_impl.version, + ] + ) \ No newline at end of file diff --git a/mcp_server/core/version/version.py b/mcp_server/core/version/version.py new file mode 100644 index 0000000..611155c --- /dev/null +++ b/mcp_server/core/version/version.py @@ -0,0 +1,2 @@ + +VERSION = '0.1.0' \ No newline at end of file