@@ -142,11 +142,11 @@ local config = {
142
142
},
143
143
{
144
144
provider = " pplx" ,
145
- name = " ChatPerplexityMixtral " ,
145
+ name = " ChatPerplexityLlama3.1-8B " ,
146
146
chat = true ,
147
147
command = false ,
148
148
-- string with model name or table with model name and parameters
149
- model = { model = " mixtral-8x7b-instruct " , temperature = 1.1 , top_p = 1 },
149
+ model = { model = " llama-3.1-sonar-small-128k-chat " , temperature = 1.1 , top_p = 1 },
150
150
-- system prompt (use this to specify the persona/role of the AI)
151
151
system_prompt = require (" gp.defaults" ).chat_system_prompt ,
152
152
},
@@ -172,13 +172,15 @@ local config = {
172
172
},
173
173
{
174
174
provider = " ollama" ,
175
- name = " ChatOllamaLlama3" ,
175
+ name = " ChatOllamaLlama3.1-8B " ,
176
176
chat = true ,
177
177
command = false ,
178
178
-- string with model name or table with model name and parameters
179
179
model = {
180
- model = " llama3" ,
181
- num_ctx = 8192 ,
180
+ model = " llama3.1" ,
181
+ temperature = 0.6 ,
182
+ top_p = 1 ,
183
+ min_p = 0.05 ,
182
184
},
183
185
-- system prompt (use this to specify the persona/role of the AI)
184
186
system_prompt = " You are a general AI assistant." ,
@@ -223,7 +225,7 @@ local config = {
223
225
name = " CodeCopilot" ,
224
226
chat = false ,
225
227
command = true ,
226
- -- string with the Copilot engine name or table with engine name and parameters if applicable
228
+ -- string with model name or table with model name and parameters
227
229
model = { model = " gpt-4o" , temperature = 0.8 , top_p = 1 , n = 1 },
228
230
-- system prompt (use this to specify the persona/role of the AI)
229
231
system_prompt = require (" gp.defaults" ).code_system_prompt ,
@@ -239,11 +241,11 @@ local config = {
239
241
},
240
242
{
241
243
provider = " pplx" ,
242
- name = " CodePerplexityMixtral " ,
244
+ name = " CodePerplexityLlama3.1-8B " ,
243
245
chat = false ,
244
246
command = true ,
245
247
-- string with model name or table with model name and parameters
246
- model = { model = " mixtral-8x7b-instruct " , temperature = 0.8 , top_p = 1 },
248
+ model = { model = " llama-3.1-sonar-small-128k-chat " , temperature = 0.8 , top_p = 1 },
247
249
system_prompt = require (" gp.defaults" ).code_system_prompt ,
248
250
},
249
251
{
@@ -266,21 +268,18 @@ local config = {
266
268
},
267
269
{
268
270
provider = " ollama" ,
269
- name = " CodeOllamaLlama3" ,
271
+ name = " CodeOllamaLlama3.1-8B " ,
270
272
chat = false ,
271
273
command = true ,
272
- -- string with the Copilot engine name or table with engine name and parameters if applicable
274
+ -- string with model name or table with model name and parameters
273
275
model = {
274
- model = " llama3" ,
275
- temperature = 1.9 ,
276
+ model = " llama3.1 " ,
277
+ temperature = 0.4 ,
276
278
top_p = 1 ,
277
- num_ctx = 8192 ,
279
+ min_p = 0.05 ,
278
280
},
279
281
-- system prompt (use this to specify the persona/role of the AI)
280
- system_prompt = " You are an AI working as a code editor providing answers.\n\n "
281
- .. " Use 4 SPACES FOR INDENTATION.\n "
282
- .. " Please AVOID COMMENTARY OUTSIDE OF THE SNIPPET RESPONSE.\n "
283
- .. " START AND END YOUR ANSWER WITH:\n\n ```" ,
282
+ system_prompt = require (" gp.defaults" ).code_system_prompt ,
284
283
},
285
284
},
286
285
0 commit comments