Skip to content

Commit 7ebc54a

Browse files
authored
System prompt configurable (#96)
* Add an abstract base completer class * Add a plugin to set the system prompts * lint * Add settings icon * Remove the system prompt settings of chrome AI
1 parent 1692670 commit 7ebc54a

File tree

19 files changed

+272
-119
lines changed

19 files changed

+272
-119
lines changed

schema/chat.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
{
2-
"title": "Chat configuration",
2+
"title": "Chat Configuration",
33
"description": "Configuration for the chat panel",
44
"jupyter.lab.setting-icon": "jupyter-chat::chat",
55
"jupyter.lab.setting-icon-label": "Jupyter Chat",

schema/system-prompts.json

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
{
2+
"title": "AI system prompts",
3+
"description": "System prompts",
4+
"jupyter.lab.setting-icon": "@jupyterlite/ai:jupyternaut-lite",
5+
"jupyter.lab.setting-icon-label": "JupyterLite AI Chat",
6+
"type": "object",
7+
"properties": {
8+
"chatSystemPrompt": {
9+
"type": "string",
10+
"title": "Chat message system prompt",
11+
"description": "The system prompt for the chat messages",
12+
"default": "You are Jupyternaut, a conversational assistant living in JupyterLab to help users.\nYou are not a language model, but rather an application built on a foundation model from $provider_name$.\nYou are talkative and you provide lots of specific details from the foundation model's context.\nYou may use Markdown to format your response.\nIf your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).\nIf your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.\nAll dollar quantities (of USD) must be formatted in LaTeX, with the `$` symbol escaped by a single backslash `\\`.\n- Example prompt: `If I have \\\\$100 and spend \\\\$20, how much money do I have left?`\n- **Correct** response: `You have \\(\\$80\\) remaining.`\n- **Incorrect** response: `You have $80 remaining.`\nIf you do not know the answer to a question, answer truthfully by responding that you do not know.\nThe following is a friendly conversation between you and a human."
13+
},
14+
"completionSystemPrompt": {
15+
"type": "string",
16+
"title": "Completion system prompt",
17+
"description": "The system prompt for the completion",
18+
"default": "You are an application built to provide helpful code completion suggestions.\nYou should only produce code. Keep comments to minimum, use the programming language comment syntax. Produce clean code.\nThe code is written in JupyterLab, a data analysis and code development environment which can execute code extended with additional syntax for interactive features, such as magics.\nOnly give raw strings back, do not format the response using backticks.\nThe output should be a single string, and should only contain the code that will complete the give code passed as input, no explanation whatsoever. Do not include the prompt in the output, only the string that should be appended to the current input. Here is the code to complete:"
19+
}
20+
},
21+
"additionalProperties": false
22+
}

scripts/settings-checker.js

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,8 @@ const providers = {
4949
},
5050
ChromeAI: {
5151
path: 'node_modules/@langchain/community/experimental/llms/chrome_ai.d.ts',
52-
type: 'ChromeAIInputs'
52+
type: 'ChromeAIInputs',
53+
excludedProps: ['systemPrompt']
5354
},
5455
MistralAI: {
5556
path: 'node_modules/@langchain/mistralai/dist/chat_models.d.ts',

src/base-completer.ts

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,17 @@ import {
22
CompletionHandler,
33
IInlineCompletionContext
44
} from '@jupyterlab/completer';
5+
import { BaseLanguageModel } from '@langchain/core/language_models/base';
56
import { ReadonlyPartialJSONObject } from '@lumino/coreutils';
67

8+
import { DEFAULT_COMPLETION_SYSTEM_PROMPT } from './default-prompts';
9+
import { IAIProviderRegistry } from './tokens';
10+
711
export interface IBaseCompleter {
812
/**
913
* The completion prompt.
1014
*/
11-
prompt: string;
15+
readonly systemPrompt: string;
1216

1317
/**
1418
* The function to fetch a new completion.
@@ -24,6 +28,33 @@ export interface IBaseCompleter {
2428
): Promise<any>;
2529
}
2630

31+
export abstract class BaseCompleter implements IBaseCompleter {
32+
constructor(options: BaseCompleter.IOptions) {
33+
this._providerRegistry = options.providerRegistry;
34+
}
35+
36+
/**
37+
* Get the system prompt for the completion.
38+
*/
39+
get systemPrompt(): string {
40+
return (
41+
this._providerRegistry.completerSystemPrompt ??
42+
DEFAULT_COMPLETION_SYSTEM_PROMPT
43+
);
44+
}
45+
46+
/**
47+
* The fetch request for the LLM completer.
48+
*/
49+
abstract fetch(
50+
request: CompletionHandler.IRequest,
51+
context: IInlineCompletionContext
52+
): Promise<any>;
53+
54+
protected _providerRegistry: IAIProviderRegistry;
55+
protected abstract _completer: BaseLanguageModel<any, any>;
56+
}
57+
2758
/**
2859
* The namespace for the base completer.
2960
*/
@@ -32,6 +63,13 @@ export namespace BaseCompleter {
3263
* The options for the constructor of a completer.
3364
*/
3465
export interface IOptions {
66+
/**
67+
* The provider registry.
68+
*/
69+
providerRegistry: IAIProviderRegistry;
70+
/**
71+
* The settings of the provider.
72+
*/
3573
settings: ReadonlyPartialJSONObject;
3674
}
3775
}

src/chat-handler.ts

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@ import {
2323
} from '@langchain/core/messages';
2424
import { UUID } from '@lumino/coreutils';
2525

26+
import { DEFAULT_CHAT_SYSTEM_PROMPT } from './default-prompts';
2627
import { jupyternautLiteIcon } from './icons';
27-
import { chatSystemPrompt } from './provider';
2828
import { IAIProviderRegistry } from './tokens';
2929
import { AIChatModel } from './types/ai-model';
3030

@@ -56,15 +56,9 @@ export class ChatHandler extends AbstractChatModel {
5656
constructor(options: ChatHandler.IOptions) {
5757
super(options);
5858
this._providerRegistry = options.providerRegistry;
59-
this._prompt = chatSystemPrompt({
60-
provider_name: this._providerRegistry.currentName
61-
});
6259

6360
this._providerRegistry.providerChanged.connect(() => {
6461
this._errorMessage = this._providerRegistry.chatError;
65-
this._prompt = chatSystemPrompt({
66-
provider_name: this._providerRegistry.currentName
67-
});
6862
});
6963
}
7064

@@ -90,13 +84,12 @@ export class ChatHandler extends AbstractChatModel {
9084
}
9185

9286
/**
93-
* Getter and setter for the initial prompt.
87+
* Get/set the system prompt for the chat.
9488
*/
95-
get prompt(): string {
96-
return this._prompt;
97-
}
98-
set prompt(value: string) {
99-
this._prompt = value;
89+
get systemPrompt(): string {
90+
return (
91+
this._providerRegistry.chatSystemPrompt ?? DEFAULT_CHAT_SYSTEM_PROMPT
92+
);
10093
}
10194

10295
async sendMessage(message: INewMessage): Promise<boolean> {
@@ -131,7 +124,7 @@ export class ChatHandler extends AbstractChatModel {
131124

132125
this._history.messages.push(msg);
133126

134-
const messages = mergeMessageRuns([new SystemMessage(this._prompt)]);
127+
const messages = mergeMessageRuns([new SystemMessage(this.systemPrompt)]);
135128
messages.push(
136129
...this._history.messages.map(msg => {
137130
if (msg.sender.username === 'User') {
@@ -206,7 +199,6 @@ export class ChatHandler extends AbstractChatModel {
206199

207200
private _providerRegistry: IAIProviderRegistry;
208201
private _personaName = 'AI';
209-
private _prompt: string;
210202
private _errorMessage: string = '';
211203
private _history: IChatHistory = { messages: [] };
212204
private _defaultErrorMessage = 'AI provider not configured';

src/default-prompts.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
/*
2+
* Copyright (c) Jupyter Development Team.
3+
* Distributed under the terms of the Modified BSD License.
4+
*/
5+
6+
export const DEFAULT_CHAT_SYSTEM_PROMPT = `
7+
You are Jupyternaut, a conversational assistant living in JupyterLab to help users.
8+
You are not a language model, but rather an application built on a foundation model from $provider_name$.
9+
You are talkative and you provide lots of specific details from the foundation model's context.
10+
You may use Markdown to format your response.
11+
If your response includes code, they must be enclosed in Markdown fenced code blocks (with triple backticks before and after).
12+
If your response includes mathematical notation, they must be expressed in LaTeX markup and enclosed in LaTeX delimiters.
13+
All dollar quantities (of USD) must be formatted in LaTeX, with the \`$\` symbol escaped by a single backslash \`\\\`.
14+
- Example prompt: \`If I have \\\\$100 and spend \\\\$20, how much money do I have left?\`
15+
- **Correct** response: \`You have \\(\\$80\\) remaining.\`
16+
- **Incorrect** response: \`You have $80 remaining.\`
17+
If you do not know the answer to a question, answer truthfully by responding that you do not know.
18+
The following is a friendly conversation between you and a human.
19+
`;
20+
21+
export const DEFAULT_COMPLETION_SYSTEM_PROMPT = `
22+
You are an application built to provide helpful code completion suggestions.
23+
You should only produce code. Keep comments to minimum, use the
24+
programming language comment syntax. Produce clean code.
25+
The code is written in JupyterLab, a data analysis and code development
26+
environment which can execute code extended with additional syntax for
27+
interactive features, such as magics.
28+
Only give raw strings back, do not format the response using backticks.
29+
The output should be a single string, and should only contain the code that will complete the
30+
give code passed as input, no explanation whatsoever.
31+
Do not include the prompt in the output, only the string that should be appended to the current input.
32+
Here is the code to complete:
33+
`;

src/default-providers/Anthropic/completer.ts

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,24 +5,14 @@ import {
55
import { ChatAnthropic } from '@langchain/anthropic';
66
import { AIMessage, SystemMessage } from '@langchain/core/messages';
77

8-
import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9-
import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8+
import { BaseCompleter } from '../../base-completer';
109

11-
export class AnthropicCompleter implements IBaseCompleter {
10+
export class AnthropicCompleter extends BaseCompleter {
1211
constructor(options: BaseCompleter.IOptions) {
12+
super(options);
1313
this._completer = new ChatAnthropic({ ...options.settings });
1414
}
1515

16-
/**
17-
* Getter and setter for the initial prompt.
18-
*/
19-
get prompt(): string {
20-
return this._prompt;
21-
}
22-
set prompt(value: string) {
23-
this._prompt = value;
24-
}
25-
2616
async fetch(
2717
request: CompletionHandler.IRequest,
2818
context: IInlineCompletionContext
@@ -34,7 +24,7 @@ export class AnthropicCompleter implements IBaseCompleter {
3424
const trimmedPrompt = prompt.trim();
3525

3626
const messages = [
37-
new SystemMessage(this._prompt),
27+
new SystemMessage(this.systemPrompt),
3828
new AIMessage(trimmedPrompt)
3929
];
4030

@@ -65,6 +55,5 @@ export class AnthropicCompleter implements IBaseCompleter {
6555
}
6656
}
6757

68-
private _completer: ChatAnthropic;
69-
private _prompt: string = COMPLETION_SYSTEM_PROMPT;
58+
protected _completer: ChatAnthropic;
7059
}

src/default-providers/ChromeAI/completer.ts

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ import {
55
import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
66
import { HumanMessage, SystemMessage } from '@langchain/core/messages';
77

8-
import { BaseCompleter, IBaseCompleter } from '../../base-completer';
9-
import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
8+
import { BaseCompleter } from '../../base-completer';
109

1110
/**
1211
* Regular expression to match the '```' string at the start of a string.
@@ -29,21 +28,12 @@ const CODE_BLOCK_START_REGEX = /^```(?:[a-zA-Z]+)?\n?/;
2928
*/
3029
const CODE_BLOCK_END_REGEX = /```$/;
3130

32-
export class ChromeCompleter implements IBaseCompleter {
31+
export class ChromeCompleter extends BaseCompleter {
3332
constructor(options: BaseCompleter.IOptions) {
33+
super(options);
3434
this._completer = new ChromeAI({ ...options.settings });
3535
}
3636

37-
/**
38-
* Getter and setter for the initial prompt.
39-
*/
40-
get prompt(): string {
41-
return this._prompt;
42-
}
43-
set prompt(value: string) {
44-
this._prompt = value;
45-
}
46-
4737
async fetch(
4838
request: CompletionHandler.IRequest,
4939
context: IInlineCompletionContext
@@ -54,7 +44,7 @@ export class ChromeCompleter implements IBaseCompleter {
5444
const trimmedPrompt = prompt.trim();
5545

5646
const messages = [
57-
new SystemMessage(this._prompt),
47+
new SystemMessage(this.systemPrompt),
5848
new HumanMessage(trimmedPrompt)
5949
];
6050

@@ -79,6 +69,5 @@ export class ChromeCompleter implements IBaseCompleter {
7969
}
8070
}
8171

82-
private _completer: ChromeAI;
83-
private _prompt: string = COMPLETION_SYSTEM_PROMPT;
72+
protected _completer: ChromeAI;
8473
}

src/default-providers/ChromeAI/settings-schema.json

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,6 @@
1111
},
1212
"temperature": {
1313
"type": "number"
14-
},
15-
"systemPrompt": {
16-
"type": "string"
1714
}
1815
},
1916
"additionalProperties": false,

src/default-providers/MistralAI/completer.ts

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -10,16 +10,16 @@ import {
1010
import { ChatMistralAI } from '@langchain/mistralai';
1111
import { Throttler } from '@lumino/polling';
1212

13-
import { BaseCompleter, IBaseCompleter } from '../../base-completer';
14-
import { COMPLETION_SYSTEM_PROMPT } from '../../provider';
13+
import { BaseCompleter } from '../../base-completer';
1514

1615
/**
1716
* The Mistral API has a rate limit of 1 request per second
1817
*/
1918
const INTERVAL = 1000;
2019

21-
export class CodestralCompleter implements IBaseCompleter {
20+
export class CodestralCompleter extends BaseCompleter {
2221
constructor(options: BaseCompleter.IOptions) {
22+
super(options);
2323
this._completer = new ChatMistralAI({ ...options.settings });
2424
this._throttler = new Throttler(
2525
async (messages: BaseMessage[]) => {
@@ -46,16 +46,6 @@ export class CodestralCompleter implements IBaseCompleter {
4646
);
4747
}
4848

49-
/**
50-
* Getter and setter for the initial prompt.
51-
*/
52-
get prompt(): string {
53-
return this._prompt;
54-
}
55-
set prompt(value: string) {
56-
this._prompt = value;
57-
}
58-
5949
async fetch(
6050
request: CompletionHandler.IRequest,
6151
context: IInlineCompletionContext
@@ -64,7 +54,7 @@ export class CodestralCompleter implements IBaseCompleter {
6454
const prompt = text.slice(0, cursorOffset);
6555

6656
const messages: BaseMessage[] = [
67-
new SystemMessage(this._prompt),
57+
new SystemMessage(this.systemPrompt),
6858
new HumanMessage(prompt)
6959
];
7060

@@ -77,6 +67,5 @@ export class CodestralCompleter implements IBaseCompleter {
7767
}
7868

7969
private _throttler: Throttler;
80-
private _completer: ChatMistralAI;
81-
private _prompt: string = COMPLETION_SYSTEM_PROMPT;
70+
protected _completer: ChatMistralAI;
8271
}

0 commit comments

Comments
 (0)