Skip to content

Commit 0d0824e

Browse files
Consistenly use double quotes in dspy langing page (#8447)
1 parent 77331de commit 0d0824e

File tree

1 file changed

+23
-23
lines changed

1 file changed

+23
-23
lines changed

docs/docs/index.md

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -31,16 +31,16 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving
3131

3232
```python linenums="1"
3333
import dspy
34-
lm = dspy.LM('openai/gpt-4o-mini', api_key='YOUR_OPENAI_API_KEY')
34+
lm = dspy.LM("openai/gpt-4o-mini", api_key="YOUR_OPENAI_API_KEY")
3535
dspy.configure(lm=lm)
3636
```
3737

3838
=== "Anthropic"
39-
You can authenticate by setting the ANTHROPIC_API_KEY env variable or passing `api_key` below.
39+
You can authenticate by setting the `ANTHROPIC_API_KEY` env variable or passing `api_key` below.
4040

4141
```python linenums="1"
4242
import dspy
43-
lm = dspy.LM('anthropic/claude-3-opus-20240229', api_key='YOUR_ANTHROPIC_API_KEY')
43+
lm = dspy.LM("anthropic/claude-3-opus-20240229", api_key="YOUR_ANTHROPIC_API_KEY")
4444
dspy.configure(lm=lm)
4545
```
4646

@@ -50,19 +50,19 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving
5050
```python linenums="1"
5151
import dspy
5252
lm = dspy.LM(
53-
'databricks/databricks-llama-4-maverick',
53+
"databricks/databricks-llama-4-maverick",
5454
api_key="YOUR_DATABRICKS_ACCESS_TOKEN",
5555
api_base="YOUR_DATABRICKS_WORKSPACE_URL", # e.g.: https://dbc-64bf4923-e39e.cloud.databricks.com/serving-endpoints
5656
)
5757
dspy.configure(lm=lm)
5858
```
5959

6060
=== "Gemini"
61-
You can authenticate by setting the GEMINI_API_KEY env variable or passing `api_key` below.
61+
You can authenticate by setting the `GEMINI_API_KEY` env variable or passing `api_key` below.
6262

6363
```python linenums="1"
6464
import dspy
65-
lm = dspy.LM('gemini/gemini-2.5-flash', api_key='YOUR_GEMINI_API_KEY')
65+
lm = dspy.LM("gemini/gemini-2.5-flash", api_key="YOUR_GEMINI_API_KEY")
6666
dspy.configure(lm=lm)
6767
```
6868

@@ -78,7 +78,7 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving
7878

7979
```python linenums="1"
8080
import dspy
81-
lm = dspy.LM('ollama_chat/llama3.2', api_base='http://localhost:11434', api_key='')
81+
lm = dspy.LM("ollama_chat/llama3.2", api_base="http://localhost:11434", api_key="")
8282
dspy.configure(lm=lm)
8383
```
8484

@@ -99,7 +99,7 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving
9999
```python linenums="1"
100100
lm = dspy.LM("openai/meta-llama/Llama-3.1-8B-Instruct",
101101
api_base="http://localhost:7501/v1", # ensure this points to your port
102-
api_key="local", model_type='chat')
102+
api_key="local", model_type="chat")
103103
dspy.configure(lm=lm)
104104
```
105105

@@ -118,7 +118,7 @@ Instead of wrangling prompts or training jobs, DSPy (Declarative Self-improving
118118

119119
```python linenums="1"
120120
import dspy
121-
lm = dspy.LM('openai/your-model-name', api_key='PROVIDER_API_KEY', api_base='YOUR_PROVIDER_URL')
121+
lm = dspy.LM("openai/your-model-name", api_key="PROVIDER_API_KEY", api_base="YOUR_PROVIDER_URL")
122122
dspy.configure(lm=lm)
123123
```
124124

@@ -161,10 +161,10 @@ DSPy shifts your focus from tinkering with prompt strings to **programming with
161161

162162
```python linenums="1"
163163
def search_wikipedia(query: str) -> list[str]:
164-
results = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')(query, k=3)
165-
return [x['text'] for x in results]
164+
results = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")(query, k=3)
165+
return [x["text"] for x in results]
166166
167-
rag = dspy.ChainOfThought('context, question -> response')
167+
rag = dspy.ChainOfThought("context, question -> response")
168168

169169
question = "What's the name of the castle that David Gregory inherited?"
170170
rag(context=search_wikipedia(question), question=question)
@@ -187,7 +187,7 @@ DSPy shifts your focus from tinkering with prompt strings to **programming with
187187
"""Classify sentiment of a given sentence."""
188188
189189
sentence: str = dspy.InputField()
190-
sentiment: Literal['positive', 'negative', 'neutral'] = dspy.OutputField()
190+
sentiment: Literal["positive", "negative", "neutral"] = dspy.OutputField()
191191
confidence: float = dspy.OutputField()
192192

193193
classify = dspy.Predict(Classify)
@@ -239,8 +239,8 @@ DSPy shifts your focus from tinkering with prompt strings to **programming with
239239
return dspy.PythonInterpreter({}).execute(expression)
240240

241241
def search_wikipedia(query: str):
242-
results = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')(query, k=3)
243-
return [x['text'] for x in results]
242+
results = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")(query, k=3)
243+
return [x["text"] for x in results]
244244

245245
react = dspy.ReAct("question -> answer: float", tools=[evaluate_math, search_wikipedia])
246246

@@ -311,14 +311,14 @@ DSPy shifts your focus from tinkering with prompt strings to **programming with
311311

312312
??? "Using DSPy in practice: from quick scripting to building sophisticated systems."
313313

314-
Standard prompts conflate interface (what should the LM do?) with implementation (how do we tell it to do that?). DSPy isolates the former as _signatures_ so we can infer the latter or learn it from data — in the context of a bigger program.
314+
Standard prompts conflate interface ("what should the LM do?") with implementation ("how do we tell it to do that?"). DSPy isolates the former as _signatures_ so we can infer the latter or learn it from data — in the context of a bigger program.
315315

316316
Even before you start using optimizers, DSPy's modules allow you to script effective LM systems as ergonomic, portable _code_. Across many tasks and LMs, we maintain _signature test suites_ that assess the reliability of the built-in DSPy adapters. Adapters are the components that map signatures to prompts prior to optimization. If you find a task where a simple prompt consistently outperforms idiomatic DSPy for your LM, consider that a bug and [file an issue](https://github.com/stanfordnlp/dspy/issues). We'll use this to improve the built-in adapters.
317317

318318

319319
## 2) **Optimizers** tune the prompts and weights of your AI modules.
320320

321-
DSPy provides you with the tools to compile high-level code with natural language annotations into the low-level computations, prompts, or weight updates that align your LM with your programs structure and metrics. If you change your code or your metrics, you can simply re-compile accordingly.
321+
DSPy provides you with the tools to compile high-level code with natural language annotations into the low-level computations, prompts, or weight updates that align your LM with your program's structure and metrics. If you change your code or your metrics, you can simply re-compile accordingly.
322322

323323
Given a few tens or hundreds of representative _inputs_ of your task and a _metric_ that can measure the quality of your system's outputs, you can use a DSPy optimizer. Different optimizers in DSPy work by **synthesizing good few-shot examples** for every module, like `dspy.BootstrapRS`,<sup>[1](https://arxiv.org/abs/2310.03714)</sup> **proposing and intelligently exploring better natural-language instructions** for every prompt, like `dspy.MIPROv2`,<sup>[2](https://arxiv.org/abs/2406.11695)</sup> and **building datasets for your modules and using them to finetune the LM weights** in your system, like `dspy.BootstrapFinetune`.<sup>[3](https://arxiv.org/abs/2407.10930)</sup>
324324

@@ -342,11 +342,11 @@ Given a few tens or hundreds of representative _inputs_ of your task and a _metr
342342
import dspy
343343
from dspy.datasets import HotPotQA
344344

345-
dspy.configure(lm=dspy.LM('openai/gpt-4o-mini'))
345+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini"))
346346

347347
def search_wikipedia(query: str) -> list[str]:
348-
results = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')(query, k=3)
349-
return [x['text'] for x in results]
348+
results = dspy.ColBERTv2(url="http://20.102.90.50:2017/wiki17_abstracts")(query, k=3)
349+
return [x["text"] for x in results]
350350

351351
trainset = [x.with_inputs('question') for x in HotPotQA(train_seed=2024, train_size=500).train]
352352
react = dspy.ReAct("question -> answer", tools=[search_wikipedia])
@@ -364,7 +364,7 @@ Given a few tens or hundreds of representative _inputs_ of your task and a _metr
364364
class RAG(dspy.Module):
365365
def __init__(self, num_docs=5):
366366
self.num_docs = num_docs
367-
self.respond = dspy.ChainOfThought('context, question -> response')
367+
self.respond = dspy.ChainOfThought("context, question -> response")
368368

369369
def forward(self, question):
370370
context = search(question, k=self.num_docs) # defined in tutorial linked below
@@ -403,10 +403,10 @@ Given a few tens or hundreds of representative _inputs_ of your task and a _metr
403403

404404
```python linenums="1"
405405
import dspy
406-
dspy.configure(lm=dspy.LM('openai/gpt-4o-mini-2024-07-18'))
406+
dspy.configure(lm=dspy.LM("openai/gpt-4o-mini-2024-07-18"))
407407

408408
# Define the DSPy module for classification. It will use the hint at training time, if available.
409-
signature = dspy.Signature("text, hint -> label").with_updated_fields('label', type_=Literal[tuple(CLASSES)])
409+
signature = dspy.Signature("text, hint -> label").with_updated_fields("label", type_=Literal[tuple(CLASSES)])
410410
classify = dspy.ChainOfThought(signature)
411411

412412
# Optimize via BootstrapFinetune.

0 commit comments

Comments
 (0)