From 2c9973d1190ee291e953ba0c73826d8822209505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Sirieix?= Date: Mon, 9 Dec 2024 10:40:24 +0100 Subject: [PATCH 1/3] chore: bump version --- literalai/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/literalai/version.py b/literalai/version.py index 6b12fa0..472f4a6 100644 --- a/literalai/version.py +++ b/literalai/version.py @@ -1 +1 @@ -__version__ = "0.1.102" +__version__ = "0.1.103" diff --git a/setup.py b/setup.py index 72ff767..b7071a7 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name="literalai", - version="0.1.102", # update version in literalai/version.py + version="0.1.103", # update version in literalai/version.py description="An SDK for observability in Python applications", long_description=open("README.md").read(), long_description_content_type="text/markdown", From 1a49aaacd5ded1b35d7a6b51c346836094b3e640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Sirieix?= Date: Mon, 9 Dec 2024 11:05:15 +0100 Subject: [PATCH 2/3] fix: fix linter issue --- .github/workflows/CI.yml | 3 +++ examples/langchain_variable.py | 2 +- .../instrumentation/llamaindex/event_handler.py | 6 +++--- literalai/observability/generation.py | 14 ++++++++------ literalai/prompt_engineering/prompt.py | 6 +++--- 5 files changed, 18 insertions(+), 13 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 0be0499..b0b6b64 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -4,6 +4,9 @@ on: workflow_dispatch: pull_request: branches: ["main"] + push: + branches: + - main permissions: contents: read diff --git a/examples/langchain_variable.py b/examples/langchain_variable.py index bd35b40..c58af52 100644 --- a/examples/langchain_variable.py +++ b/examples/langchain_variable.py @@ -32,7 +32,7 @@ cb = lai.langchain_callback() # Returns a langchain_openai.ChatOpenAI instance. -gpt_4o = init_chat_model( +gpt_4o = init_chat_model( # type: ignore model_provider=prompt.provider, **prompt.settings, ) diff --git a/literalai/instrumentation/llamaindex/event_handler.py b/literalai/instrumentation/llamaindex/event_handler.py index 96813b0..1afba41 100644 --- a/literalai/instrumentation/llamaindex/event_handler.py +++ b/literalai/instrumentation/llamaindex/event_handler.py @@ -144,8 +144,8 @@ def extract_query(x: Union[str, QueryBundle]): class LiteralEventHandler(BaseEventHandler): """This class handles events coming from LlamaIndex.""" - _client: "LiteralClient" = PrivateAttr(...) - _span_handler: "LiteralSpanHandler" = PrivateAttr(...) + _client: "LiteralClient" = PrivateAttr() + _span_handler: "LiteralSpanHandler" = PrivateAttr() runs: Dict[str, List[Step]] = {} streaming_run_ids: List[str] = [] _standalone_step_id: Optional[str] = None @@ -238,7 +238,7 @@ def handle(self, event: BaseEvent, **kwargs) -> None: thread_id=thread_id, content=query, ) - + # Retrieval wraps the Embedding step in LlamaIndex if isinstance(event, RetrievalStartEvent): run = self._client.start_step( diff --git a/literalai/observability/generation.py b/literalai/observability/generation.py index 026fc89..ed8c1a1 100644 --- a/literalai/observability/generation.py +++ b/literalai/observability/generation.py @@ -67,15 +67,16 @@ class BaseGeneration(Utils): to_dict(self) -> Dict: Converts the generation object to a dictionary. """ + id: Optional[str] = None prompt_id: Optional[str] = None provider: Optional[str] = None model: Optional[str] = None error: Optional[str] = None - settings: Optional[Dict] = Field(default_factory=dict) - variables: Optional[Dict] = Field(default_factory=dict) - tags: Optional[List[str]] = Field(default_factory=list) - metadata: Optional[Dict] = Field(default_factory=dict) + settings: Optional[Dict] = Field(default_factory=lambda: {}) + variables: Optional[Dict] = Field(default_factory=lambda: {}) + tags: Optional[List[str]] = Field(default_factory=lambda: []) + metadata: Optional[Dict] = Field(default_factory=lambda: {}) tools: Optional[List[Dict]] = None token_count: Optional[int] = None input_token_count: Optional[int] = None @@ -129,6 +130,7 @@ class CompletionGeneration(BaseGeneration, Utils): completion (Optional[str]): The generated completion text. type (GenerationType): The type of generation, which is set to GenerationType.COMPLETION. """ + prompt: Optional[str] = None completion: Optional[str] = None type = GenerationType.COMPLETION @@ -177,8 +179,9 @@ class ChatGeneration(BaseGeneration, Utils): message_completion (Optional[GenerationMessage]): The completion message of the chat generation. type (GenerationType): The type of generation, which is set to GenerationType.CHAT. """ + type = GenerationType.CHAT - messages: Optional[List[GenerationMessage]] = Field(default_factory=list) + messages: Optional[List[GenerationMessage]] = Field(default_factory=lambda: []) message_completion: Optional[GenerationMessage] = None def to_dict(self): @@ -213,4 +216,3 @@ def from_dict(self, generation_dict: Dict): messages=generation_dict.get("messages", []), message_completion=generation_dict.get("messageCompletion"), ) - diff --git a/literalai/prompt_engineering/prompt.py b/literalai/prompt_engineering/prompt.py index 00e5bea..1327301 100644 --- a/literalai/prompt_engineering/prompt.py +++ b/literalai/prompt_engineering/prompt.py @@ -67,7 +67,7 @@ class Prompt(Utils): Attributes ---------- - template_messages : List[GenerationMessage] + template_messages : List[GenerationMessage] The messages that make up the prompt. Messages can be of type `text` or `image`. Messages can reference variables. variables : List[PromptVariable] @@ -214,9 +214,9 @@ def to_langchain_chat_prompt_template(self, additional_messages=[]): class CustomChatPromptTemplate(ChatPromptTemplate): orig_messages: Optional[List[GenerationMessage]] = Field( - default_factory=list + default_factory=lambda: [] ) - default_vars: Optional[Dict] = Field(default_factory=dict) + default_vars: Optional[Dict] = Field(default_factory=lambda: {}) prompt_id: Optional[str] = None def format_messages(self, **kwargs: Any) -> List[BaseMessage]: From 2141421774c5ddfba6e4b0e13c53ad877133c99b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Sirieix?= Date: Mon, 9 Dec 2024 11:14:23 +0100 Subject: [PATCH 3/3] fix: fix linter issue --- .../llamaindex/event_handler.py | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/literalai/instrumentation/llamaindex/event_handler.py b/literalai/instrumentation/llamaindex/event_handler.py index 1afba41..29f09b0 100644 --- a/literalai/instrumentation/llamaindex/event_handler.py +++ b/literalai/instrumentation/llamaindex/event_handler.py @@ -100,7 +100,7 @@ def extract_document_info(nodes: List[NodeWithScore]): def build_message_dict(message: ChatMessage): - message_dict = { + message_dict: GenerationMessage = { "role": convert_message_role(message.role), "content": message.content, } @@ -163,21 +163,18 @@ def __init__( object.__setattr__(self, "_client", literal_client) object.__setattr__(self, "_span_handler", llama_index_span_handler) - def _convert_message( - self, - message: ChatMessage, - ): + def _convert_message(self, message: ChatMessage): tool_calls = message.additional_kwargs.get("tool_calls") - msg = GenerationMessage( - name=getattr(message, "name", None), - role=convert_message_role(message.role), - content="", - ) - - msg["content"] = message.content - - if tool_calls: - msg["tool_calls"] = [tool_call.to_dict() for tool_call in tool_calls] + msg: GenerationMessage = { + "name": getattr(message, "name", None), + "role": convert_message_role(message.role), + "content": message.content, + "tool_calls": ( + [tool_call.to_dict() for tool_call in tool_calls] + if tool_calls + else None + ), + } return msg