Skip to content

Commit 67cb1db

Browse files
committed
Try to remove type-ignore comments
1 parent d49b9d1 commit 67cb1db

File tree

11 files changed

+27
-27
lines changed

11 files changed

+27
-27
lines changed

src/neo4j_graphrag/embeddings/cohere.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
try:
2222
import cohere
2323
except ImportError:
24-
cohere = None # type: ignore
24+
cohere = None
2525

2626

2727
class CohereEmbeddings(Embedder):

src/neo4j_graphrag/embeddings/mistral.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
try:
2525
from mistralai import Mistral
2626
except ImportError:
27-
Mistral = None # type: ignore
27+
Mistral = None
2828

2929

3030
class MistralAIEmbeddings(Embedder):

src/neo4j_graphrag/llm/anthropic_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ def get_messages(
9191
raise LLMGenerationError(e.errors()) from e
9292
messages.extend(cast(Iterable[dict[str, Any]], message_history))
9393
messages.append(UserMessage(content=input).model_dump())
94-
return messages # type: ignore
94+
return messages
9595

9696
def invoke(
9797
self,

src/neo4j_graphrag/llm/cohere_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def get_messages(
9494
raise LLMGenerationError(e.errors()) from e
9595
messages.extend(cast(Iterable[dict[str, Any]], message_history))
9696
messages.append(UserMessage(content=input).model_dump())
97-
return messages # type: ignore
97+
return messages
9898

9999
def invoke(
100100
self,

src/neo4j_graphrag/llm/mistralai_llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535
from mistralai import Messages, Mistral
3636
from mistralai.models.sdkerror import SDKError
3737
except ImportError:
38-
Mistral = None # type: ignore
39-
SDKError = None # type: ignore
38+
Mistral = None
39+
SDKError = None
4040

4141

4242
class MistralAILLM(LLMInterface):

src/neo4j_graphrag/llm/ollama_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def get_messages(
7676
raise LLMGenerationError(e.errors()) from e
7777
messages.extend(cast(Iterable[dict[str, Any]], message_history))
7878
messages.append(UserMessage(content=input).model_dump())
79-
return messages # type: ignore
79+
return messages
8080

8181
def invoke(
8282
self,

src/neo4j_graphrag/llm/openai_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def get_messages(
8585
raise LLMGenerationError(e.errors()) from e
8686
messages.extend(cast(Iterable[dict[str, Any]], message_history))
8787
messages.append(UserMessage(content=input).model_dump())
88-
return messages # type: ignore
88+
return messages
8989

9090
def invoke(
9191
self,

tests/unit/llm/test_anthropic_llm.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def test_anthropic_invoke_happy_path(mock_anthropic: Mock) -> None:
4949
input_text = "may thy knife chip and shatter"
5050
response = llm.invoke(input_text)
5151
assert response.content == "generated text"
52-
llm.client.messages.create.assert_called_once_with( # type: ignore
52+
llm.client.messages.create.assert_called_once_with(
5353
messages=[{"role": "user", "content": input_text}],
5454
model="claude-3-opus-20240229",
5555
system=anthropic.NOT_GIVEN,
@@ -75,7 +75,7 @@ def test_anthropic_invoke_with_message_history_happy_path(mock_anthropic: Mock)
7575
response = llm.invoke(question, message_history) # type: ignore
7676
assert response.content == "generated text"
7777
message_history.append({"role": "user", "content": question})
78-
llm.client.messages.create.assert_called_once_with( # type: ignore[attr-defined]
78+
llm.client.messages.create.assert_called_once_with(
7979
messages=message_history,
8080
model="claude-3-opus-20240229",
8181
system=anthropic.NOT_GIVEN,
@@ -101,14 +101,14 @@ def test_anthropic_invoke_with_system_instruction(
101101
assert isinstance(response, LLMResponse)
102102
assert response.content == "generated text"
103103
messages = [{"role": "user", "content": question}]
104-
llm.client.messages.create.assert_called_with( # type: ignore[attr-defined]
104+
llm.client.messages.create.assert_called_with(
105105
model="claude-3-opus-20240229",
106106
system=system_instruction,
107107
messages=messages,
108108
**model_params,
109109
)
110110

111-
assert llm.client.messages.create.call_count == 1 # type: ignore
111+
assert llm.client.messages.create.call_count == 1
112112

113113

114114
def test_anthropic_invoke_with_message_history_and_system_instruction(
@@ -133,14 +133,14 @@ def test_anthropic_invoke_with_message_history_and_system_instruction(
133133
assert isinstance(response, LLMResponse)
134134
assert response.content == "generated text"
135135
message_history.append({"role": "user", "content": question})
136-
llm.client.messages.create.assert_called_with( # type: ignore[attr-defined]
136+
llm.client.messages.create.assert_called_with(
137137
model="claude-3-opus-20240229",
138138
system=system_instruction,
139139
messages=message_history,
140140
**model_params,
141141
)
142142

143-
assert llm.client.messages.create.call_count == 1 # type: ignore
143+
assert llm.client.messages.create.call_count == 1
144144

145145

146146
def test_anthropic_invoke_with_message_history_validation_error(
@@ -178,7 +178,7 @@ async def test_anthropic_ainvoke_happy_path(mock_anthropic: Mock) -> None:
178178
input_text = "may thy knife chip and shatter"
179179
response = await llm.ainvoke(input_text)
180180
assert response.content == "Return text"
181-
llm.async_client.messages.create.assert_awaited_once_with( # type: ignore
181+
llm.async_client.messages.create.assert_awaited_once_with(
182182
model="claude-3-opus-20240229",
183183
system=anthropic.NOT_GIVEN,
184184
messages=[{"role": "user", "content": input_text}],

tests/unit/llm/test_mistralai_llm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ def test_mistralai_llm_invoke_with_message_history(mock_mistral: Mock) -> None:
7171
messages = [{"role": "system", "content": system_instruction}]
7272
messages.extend(message_history)
7373
messages.append({"role": "user", "content": question})
74-
llm.client.chat.complete.assert_called_once_with( # type: ignore[attr-defined]
74+
llm.client.chat.complete.assert_called_once_with(
7575
messages=messages,
7676
model=model,
7777
)
@@ -103,12 +103,12 @@ def test_mistralai_llm_invoke_with_message_history_and_system_instruction(
103103
messages = [{"role": "system", "content": system_instruction}]
104104
messages.extend(message_history)
105105
messages.append({"role": "user", "content": question})
106-
llm.client.chat.complete.assert_called_once_with( # type: ignore[attr-defined]
106+
llm.client.chat.complete.assert_called_once_with(
107107
messages=messages,
108108
model=model,
109109
)
110110

111-
assert llm.client.chat.complete.call_count == 1 # type: ignore
111+
assert llm.client.chat.complete.call_count == 1
112112

113113

114114
@patch("neo4j_graphrag.llm.mistralai_llm.Mistral")

tests/unit/llm/test_ollama_llm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
5555
messages = [
5656
{"role": "user", "content": question},
5757
]
58-
llm.client.chat.assert_called_once_with( # type: ignore[attr-defined]
58+
llm.client.chat.assert_called_once_with(
5959
model=model, messages=messages, options=model_params
6060
)
6161

@@ -80,7 +80,7 @@ def test_ollama_invoke_with_system_instruction_happy_path(mock_import: Mock) ->
8080
assert response.content == "ollama chat response"
8181
messages = [{"role": "system", "content": system_instruction}]
8282
messages.append({"role": "user", "content": question})
83-
llm.client.chat.assert_called_once_with( # type: ignore[attr-defined]
83+
llm.client.chat.assert_called_once_with(
8484
model=model, messages=messages, options=model_params
8585
)
8686

@@ -108,7 +108,7 @@ def test_ollama_invoke_with_message_history_happy_path(mock_import: Mock) -> Non
108108
assert response.content == "ollama chat response"
109109
messages = [m for m in message_history]
110110
messages.append({"role": "user", "content": question})
111-
llm.client.chat.assert_called_once_with( # type: ignore[attr-defined]
111+
llm.client.chat.assert_called_once_with(
112112
model=model, messages=messages, options=model_params
113113
)
114114

@@ -144,10 +144,10 @@ def test_ollama_invoke_with_message_history_and_system_instruction(
144144
messages = [{"role": "system", "content": system_instruction}]
145145
messages.extend(message_history)
146146
messages.append({"role": "user", "content": question})
147-
llm.client.chat.assert_called_once_with( # type: ignore[attr-defined]
147+
llm.client.chat.assert_called_once_with(
148148
model=model, messages=messages, options=model_params
149149
)
150-
assert llm.client.chat.call_count == 1 # type: ignore
150+
assert llm.client.chat.call_count == 1
151151

152152

153153
@patch("builtins.__import__")

0 commit comments

Comments
 (0)