Skip to content

Python: experimental py3.13 support #10276

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 20 commits into from
Feb 12, 2025
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/python-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Check version
run: |
echo "Building and uploading Python package version: ${{ github.event.release.tag_name }}"
Expand Down
4 changes: 4 additions & 0 deletions .github/workflows/python-integration-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install dependencies
run: |
uv sync --all-extras --dev
Expand Down Expand Up @@ -257,6 +258,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install dependencies
run: |
uv sync --all-extras --dev
Expand Down Expand Up @@ -321,6 +323,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install dependencies
run: |
uv sync --all-extras --dev
Expand Down Expand Up @@ -379,6 +382,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install dependencies
run: |
uv sync --all-extras --dev
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/python-lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install the project
run: uv sync --all-extras --dev
- uses: pre-commit/action@v3.0.1
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/python-test-coverage.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ env.UV_PYTHON }}
cache-dependency-glob: "**/uv.lock"
- name: Install the project
run: uv sync --all-extras --dev
- name: Test with pytest
Expand Down
17 changes: 5 additions & 12 deletions .github/workflows/python-unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,18 +20,10 @@ jobs:
python-version: ["3.10", "3.11", "3.12"]
os: [ubuntu-latest, windows-latest, macos-latest]
experimental: [false]
# include:
# - python-version: "3.13"
# os: "ubuntu-latest"
# experimental: true
# - python-version: "3.13t"
# os: "ubuntu-latest"
# experimental: true
# gil: 0
# - python-version: "3.13t"
# os: "ubuntu-latest"
# experimental: true
# gil: 1
include:
- python-version: "3.13"
os: "ubuntu-latest"
experimental: true
env:
UV_PYTHON: ${{ matrix.python-version }}
permissions:
Expand All @@ -47,6 +39,7 @@ jobs:
version: "0.5.x"
enable-cache: true
cache-suffix: ${{ runner.os }}-${{ matrix.python-version }}
cache-dependency-glob: "**/uv.lock"
- name: Install the project
run: uv sync --all-extras --dev -U --prerelease=if-necessary-or-explicit
- name: Test with pytest
Expand Down
4 changes: 2 additions & 2 deletions python/.pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ repos:
- id: pyupgrade
args: [--py310-plus]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.4
rev: v0.9.6
hooks:
- id: ruff
args: [ --fix, --exit-non-zero-on-fix ]
- id: ruff-format
- repo: https://github.com/astral-sh/uv-pre-commit
# uv version.
rev: 0.5.21
rev: 0.5.30
hooks:
# Update the uv lockfile
- id: uv-lock
Expand Down
8 changes: 4 additions & 4 deletions python/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ help:
echo ""
echo -e "\033[1mVARIABLES:\033[0m"
echo " PYTHON_VERSION - Python version to use. Default is 3.10"
echo " By default, 3.10, 3.11, and 3.12 are installed as well."
echo " By default, 3.10, 3.11, 3.12 and 3.13 are installed as well."

##############################
# INSTALL
Expand Down Expand Up @@ -68,8 +68,8 @@ endif
##############################
.ONESHELL:
install-python:
echo "Installing python 3.10, 3.11, 3.12"
uv python install 3.10 3.11 3.12
echo "Installing python versions"
uv python install 3.10 3.11 3.12 3.13

##############################
# INSTALL-PRE-COMMIT
Expand All @@ -87,7 +87,7 @@ install-sk:
echo "Creating and activating venv for python $(PYTHON_VERSION)"
uv venv --python $(PYTHON_VERSION)
echo "Installing Semantic Kernel and all dependencies"
uv sync --all-extras --dev
uv sync --all-extras --dev --prerelease=if-necessary-or-explicit

##############################
# CLEAN
Expand Down
12 changes: 7 additions & 5 deletions python/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ dependencies = [
"pybars4 ~= 0.9",
"jinja2 ~= 3.1",
"nest-asyncio ~= 1.6",
"scipy>=1.15.1",
]

### Optional dependencies
Expand All @@ -62,7 +63,7 @@ chroma = [
]
google = [
"google-cloud-aiplatform == 1.79.0",
"google-generativeai == 0.7"
"google-generativeai ~= 0.8"
]
hugging_face = [
"transformers[torch] ~= 4.28",
Expand All @@ -87,7 +88,7 @@ ollama = [
"ollama ~= 0.4"
]
onnx = [
"onnxruntime-genai ~= 0.5"
"onnxruntime-genai ~= 0.5; python_version < '3.13'"
]
anthropic = [
"anthropic ~= 0.32"
Expand All @@ -107,11 +108,11 @@ redis = [
"redisvl >= 0.3.6",
]
usearch = [
"usearch ~= 2.9",
"usearch ~= 2.16",
"pyarrow >= 12.0,< 20.0"
]
weaviate = [
"weaviate-client>=4.7,<5.0",
"weaviate-client>=4.10,<5.0",
]
pandas = [
"pandas ~= 2.2"
Expand Down Expand Up @@ -139,7 +140,8 @@ dev-dependencies = [
"snoop ~= 0.4",
"mypy >= 1.10",
"types-PyYAML ~= 6.0.12.20240311",
"ruff ~= 0.7",
"ruff ~= 0.9",
"pyright>=1.1.392.post0",
]
environments = [
"sys_platform == 'darwin'",
Expand Down
27 changes: 27 additions & 0 deletions python/pyrightconfig.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
{
"reportMissingImports": false,
"include": [
"semantic_kernel/**/*.py"
],
"pythonVersion": "3.10",
"exclude": [
"semantic_kernel/memory/*",
"semantic_kernel/planners/*",
"semantic_kernel/connectors/memory/astradb/*",
"semantic_kernel/connectors/memory/azure_ai_search/*",
"semantic_kernel/connectors/memory/azure_cognitive_search/*",
"semantic_kernel/connectors/memory/azure_cosmosdb/*",
"semantic_kernel/connectors/memory/azure_cosmosdb_no_sql/*",
"semantic_kernel/connectors/memory/chroma/*",
"semantic_kernel/connectors/memory/milvus/*",
"semantic_kernel/connectors/memory/mongodb_atlas/*",
"semantic_kernel/connectors/memory/pinecone/*",
"semantic_kernel/connectors/memory/postgres/*",
"semantic_kernel/connectors/memory/qdrant/*",
"semantic_kernel/connectors/memory/redis/*",
"semantic_kernel/connectors/memory/usearch/*",
"semantic_kernel/connectors/memory/weaviate/*"
],
"venvPath": ".",
"venv": ".venv"
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from semantic_kernel.agents import Agent
from semantic_kernel.contents.chat_message_content import ChatMessageContent

NEWLINE = "\n"


@experimental_class
class CustomSelectionStrategy(SelectionStrategy):
Expand Down Expand Up @@ -80,7 +82,7 @@ def get_system_message(self, agents: list["Agent"]) -> str:
Initially, the chat history may be empty.

Here are the agents with their indices, names, and descriptions:
{"\n".join(f"[{index}] {agent.name}:\n{agent.description}" for index, agent in enumerate(agents))}
{NEWLINE.join(f"[{index}] {agent.name}:{NEWLINE}{agent.description}" for index, agent in enumerate(agents))}

Your task is to select the next agent based on the conversation history.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
TERMINATE_TRUE_KEYWORD = "yes"
TERMINATE_FALSE_KEYWORD = "no"

NEWLINE = "\n"


class CustomTerminationStrategy(TerminationStrategy):
NUM_OF_RETRIES: ClassVar[int] = 3
Expand Down Expand Up @@ -82,7 +84,7 @@ def get_system_message(self) -> str:
The chat history may start empty as no agents have spoken yet.

Here are the agents with their indices, names, and descriptions:
{"\n".join(f"[{index}] {agent.name}:\n{agent.description}" for index, agent in enumerate(self.agents))}
{NEWLINE.join(f"[{index}] {agent.name}:{NEWLINE}{agent.description}" for index, agent in enumerate(self.agents))}

Your task is NOT to continue the conversation. Determine if the latest content is approved by all agents.
If approved, say "{TERMINATE_TRUE_KEYWORD}". Otherwise, say "{TERMINATE_FALSE_KEYWORD}".
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def get_agenda_for_prompt(self) -> str:
return "None"
agenda_str = "\n".join(
[
f"{i+1}. [{format_resource(item['resource'], ResourceConstraintUnit.TURNS)}] {item['title']}"
f"{i + 1}. [{format_resource(item['resource'], ResourceConstraintUnit.TURNS)}] {item['title']}"
for i, item in enumerate(agenda_items)
]
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def get_resource_instructions(self) -> tuple[str, str]:
resource_instructions = ""

if self.resource_constraint.mode == ResourceConstraintMode.EXACT:
exact_mode_instructions = f"""There {'are' if is_plural_remaining else 'is'} {formatted_remaining_resource} remaining (including this one) - the conversation will automatically terminate when 0 turns are left. \
exact_mode_instructions = f"""There {"are" if is_plural_remaining else "is"} {formatted_remaining_resource} remaining (including this one) - the conversation will automatically terminate when 0 turns are left. \
You should continue the conversation until it is automatically terminated. This means you should NOT preemptively end the conversation, \
either explicitly (by selecting the "End conversation" action) or implicitly (e.g. by telling the user that you have all required information and they should wait for the next step). \
Your goal is not to maximize efficiency (i.e. complete the artifact as quickly as possible then end the conversation), but rather to make the best use of ALL remaining turns available to you"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,16 +127,18 @@ async def _inner_get_chat_message_contents(
assert isinstance(settings, GoogleAIChatPromptExecutionSettings) # nosec

genai.configure(api_key=self.service_settings.api_key.get_secret_value())
if not self.service_settings.gemini_model_id:
raise ServiceInitializationError("The Google AI Gemini model ID is required.")
model = GenerativeModel(
self.service_settings.gemini_model_id,
model_name=self.service_settings.gemini_model_id,
system_instruction=filter_system_message(chat_history),
)

response: AsyncGenerateContentResponse = await model.generate_content_async(
contents=self._prepare_chat_history_for_request(chat_history),
generation_config=GenerationConfig(**settings.prepare_settings_dict()),
tools=settings.tools,
tool_config=settings.tool_config,
tool_config=settings.tool_config, # type: ignore
)

return [self._create_chat_message_content(response, candidate) for candidate in response.candidates]
Expand All @@ -154,16 +156,18 @@ async def _inner_get_streaming_chat_message_contents(
assert isinstance(settings, GoogleAIChatPromptExecutionSettings) # nosec

genai.configure(api_key=self.service_settings.api_key.get_secret_value())
if not self.service_settings.gemini_model_id:
raise ServiceInitializationError("The Google AI Gemini model ID is required.")
model = GenerativeModel(
self.service_settings.gemini_model_id,
model_name=self.service_settings.gemini_model_id,
system_instruction=filter_system_message(chat_history),
)

response: AsyncGenerateContentResponse = await model.generate_content_async(
contents=self._prepare_chat_history_for_request(chat_history),
generation_config=GenerationConfig(**settings.prepare_settings_dict()),
tools=settings.tools,
tool_config=settings.tool_config,
tool_config=settings.tool_config, # type: ignore
stream=True,
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,10 @@ async def _inner_get_text_contents(
assert isinstance(settings, GoogleAITextPromptExecutionSettings) # nosec

genai.configure(api_key=self.service_settings.api_key.get_secret_value())
if not self.service_settings.gemini_model_id:
raise ServiceInitializationError("The Google AI Gemini model ID is required.")
model = GenerativeModel(
self.service_settings.gemini_model_id,
model_name=self.service_settings.gemini_model_id,
)

response: AsyncGenerateContentResponse = await model.generate_content_async(
Expand All @@ -122,8 +124,10 @@ async def _inner_get_streaming_text_contents(
assert isinstance(settings, GoogleAITextPromptExecutionSettings) # nosec

genai.configure(api_key=self.service_settings.api_key.get_secret_value())
if not self.service_settings.gemini_model_id:
raise ServiceInitializationError("The Google AI Gemini model ID is required.")
model = GenerativeModel(
self.service_settings.gemini_model_id,
model_name=self.service_settings.gemini_model_id,
)

response: AsyncGenerateContentResponse = await model.generate_content_async(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,9 @@ async def generate_raw_embeddings(
assert isinstance(settings, GoogleAIEmbeddingPromptExecutionSettings) # nosec

genai.configure(api_key=self.service_settings.api_key.get_secret_value())
response: BatchEmbeddingDict = await genai.embed_content_async(
if not self.service_settings.embedding_model_id:
raise ServiceInitializationError("The Google AI embedding model ID is required.")
response: BatchEmbeddingDict = await genai.embed_content_async( # type: ignore
model=self.service_settings.embedding_model_id,
content=texts,
**settings.prepare_settings_dict(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,16 @@

from typing import Any

from transformers import GenerationConfig

from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings

try:
from transformers import GenerationConfig

ready = True
except ImportError:
GenerationConfig = Any
ready = False


class HuggingFacePromptExecutionSettings(PromptExecutionSettings):
"""Hugging Face prompt execution settings."""
Expand All @@ -19,8 +25,10 @@ class HuggingFacePromptExecutionSettings(PromptExecutionSettings):
temperature: float = 1.0
top_p: float = 1.0

def get_generation_config(self) -> GenerationConfig:
def get_generation_config(self) -> "GenerationConfig":
"""Get the generation config."""
if not ready:
raise ImportError("transformers is not installed.")
return GenerationConfig(
**self.model_dump(
include={"max_new_tokens", "pad_token_id", "eos_token_id", "temperature", "top_p"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
else:
from typing_extensions import override # pragma: no cover


import torch
from transformers import AutoTokenizer, TextIteratorStreamer, pipeline

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings


logger: logging.Logger = logging.getLogger(__name__)


Expand Down
Loading
Loading