|
1 |
| -const API_KEY = '' |
| 1 | +const configIpts = document.querySelectorAll('.config-ipt') |
2 | 2 |
|
3 | 3 | const searchInput = document.querySelector('.search-ipt')
|
4 | 4 | const searchBtn = document.querySelector('.search-btn')
|
5 |
| - |
6 | 5 | const messageList = document.querySelector('.message-list')
|
7 | 6 |
|
8 |
| -searchBtn.addEventListener('click', async () => { |
9 |
| - const searchContent = searchInput.value |
10 |
| - console.log('input', searchContent) |
| 7 | +let currentTab = null |
| 8 | +const config = { |
| 9 | + BASE_URL: '', |
| 10 | + API_KEY: '' |
| 11 | +} |
| 12 | +let searchContent = '' |
| 13 | + |
| 14 | +init() |
| 15 | +function init() { |
| 16 | + chrome.tabs |
| 17 | + .query({ |
| 18 | + active: true, |
| 19 | + lastFocusedWindow: true |
| 20 | + }) |
| 21 | + .then(([tab]) => (currentTab = tab)) |
| 22 | + |
| 23 | + chrome.storage.local.get(['BASE_URL', 'API_KEY']).then((res) => { |
| 24 | + config.BASE_URL = res.BASE_URL ?? '' |
| 25 | + config.API_KEY = res.API_KEY ?? '' |
| 26 | + |
| 27 | + configIpts.forEach((item) => { |
| 28 | + const name = item.dataset.name |
| 29 | + if (name === 'BASE_URL') { |
| 30 | + item.value = config.BASE_URL |
| 31 | + } else { |
| 32 | + item.value = config.API_KEY |
| 33 | + } |
11 | 34 |
|
12 |
| - const div = document.createElement('div') |
13 |
| - div.setAttribute('class', 'item') |
| 35 | + item.addEventListener('change', (evnet) => { |
| 36 | + const name = evnet.target.dataset.name |
| 37 | + const value = evnet.target.value |
| 38 | + |
| 39 | + chrome.storage.local.set({ [name]: value }) |
| 40 | + }) |
| 41 | + }) |
| 42 | + }) |
14 | 43 |
|
15 |
| - const aiResponseMessage = await fetchOpenAI(searchContent) |
| 44 | + searchBtn.addEventListener('click', async () => { |
| 45 | + searchContent = searchInput.value |
16 | 46 |
|
| 47 | + chrome.tabs.sendMessage(currentTab.id, 'get body content text') |
| 48 | + }) |
| 49 | + |
| 50 | + chrome.runtime.onMessage.addListener(async (bodyContentText) => { |
| 51 | + const el = document.createElement('div') |
| 52 | + el.setAttribute('class', 'item') |
| 53 | + messageList.insertBefore(el, messageList.firstElementChild) |
| 54 | + |
| 55 | + const reader = await fetchOpenAIStreamReader(searchContent, bodyContentText) |
| 56 | + await handleStreamReaderAnswer(el, reader) |
| 57 | + }) |
| 58 | +} |
| 59 | + |
| 60 | +async function fetchOpenAIStreamReader(searchContent, bodyContentText) { |
17 | 61 | console.log('searchContent', searchContent)
|
18 |
| - console.log('aiResponseMessage', aiResponseMessage) |
| 62 | + console.log('bodyContentText', bodyContentText) |
19 | 63 |
|
20 |
| - div.innerText = aiResponseMessage |
21 |
| - messageList.insertBefore(div, messageList.firstElementChild) |
22 |
| -}) |
| 64 | + const rule = ` |
| 65 | + 你需要基于名为page text提供的内容来回答名为 clien 的问题 |
23 | 66 |
|
24 |
| -async function fetchOpenAI(searchContent) { |
25 |
| - const bodyOptions = { |
| 67 | + 1.接收输入:接收名为page text的内容(body.innerText)和名为clien的问题。 |
| 68 | + 2.预处理:对body.innerText进行文本清理、分词和词性标注。 |
| 69 | + 3.问题分类:将clien的问题进行分类,确定其类型(如事实性、观点性等)。 |
| 70 | + 4.上下文分析:在body.innerText中查找与问题相关的段落或句子,并分析它们的上下文信 息。 |
| 71 | + 5.答案抽取或生成:根据问题的类型和上下文信息,抽取相关的答案片段或生成合适的回答。 |
| 72 | + 6.返回回答:将生成的回答返回给用户。 |
| 73 | + ` |
| 74 | + |
| 75 | + const Options = { |
26 | 76 | model: 'gpt-3.5-turbo',
|
27 | 77 | messages: [
|
28 |
| - { role: 'system', content: '你需要根据内容回答用户问题' }, |
29 |
| - { role: 'user', content: searchContent } |
| 78 | + { role: 'system', content: rule }, |
| 79 | + { role: 'user', name: 'page-text', content: bodyContentText }, |
| 80 | + { role: 'user', name: 'clien', content: searchContent } |
30 | 81 | ],
|
31 |
| - temperature: 0.1 |
| 82 | + stream: true |
32 | 83 | }
|
33 | 84 |
|
34 | 85 | try {
|
35 |
| - const response = await fetch('', { |
| 86 | + const response = await fetch(`${config.BASE_URL}/chat/completions`, { |
36 | 87 | headers: {
|
37 | 88 | 'Content-Type': 'application/json',
|
38 |
| - Authorization: `Bearer ${API_KEY}` |
| 89 | + Authorization: `Bearer ${config.API_KEY}` |
39 | 90 | },
|
40 | 91 | method: 'post',
|
41 |
| - body: JSON.stringify(bodyOptions) |
| 92 | + body: JSON.stringify(Options) |
42 | 93 | })
|
43 | 94 |
|
44 |
| - const responseData = await response.json() |
45 |
| - |
46 |
| - const result = responseData.choices[0].message.content |
47 |
| - |
48 |
| - return result |
| 95 | + return response.body.getReader() |
49 | 96 | } catch (error) {
|
50 |
| - console.log(`error: ${error.message}`) |
| 97 | + console.log(`fetchOpenAIStreamReader error: ${error.message}`) |
51 | 98 | }
|
52 | 99 | }
|
| 100 | + |
| 101 | +async function handleStreamReaderAnswer(el, reader) { |
| 102 | + const decoder = new TextDecoder() |
| 103 | + |
| 104 | + return reader.read().then(function pump({ done, value }) { |
| 105 | + if (done) return |
| 106 | + |
| 107 | + // 拿到当前切片的数据 |
| 108 | + const text = decoder.decode(value) |
| 109 | + |
| 110 | + const values = handleOpenAIChunkData(text) |
| 111 | + |
| 112 | + values.forEach((itemText) => { |
| 113 | + const item = JSON.parse(itemText) |
| 114 | + |
| 115 | + if (item.choices[0].finish_reason === 'stop') return |
| 116 | + |
| 117 | + console.log(item) |
| 118 | + const content = item.choices[0].delta.content |
| 119 | + el.innerText += content |
| 120 | + }) |
| 121 | + |
| 122 | + return reader.read().then(pump) |
| 123 | + }) |
| 124 | +} |
| 125 | + |
| 126 | +function handleOpenAIChunkData(chunk) { |
| 127 | + const values = chunk |
| 128 | + .split('data: ') |
| 129 | + .filter((text) => text && !text.includes('DONE')) |
| 130 | + |
| 131 | + console.log(values) |
| 132 | + |
| 133 | + return values |
| 134 | +} |
0 commit comments