fix(context-16k): model uses 16k tokens now

This commit is contained in:
Christopher 2023-06-21 20:33:13 -04:00
parent 7e8e3ba13e
commit 7f1af8007c

View File

@ -114,7 +114,7 @@ const openGPTContextPanelCommand = vscode.commands.registerCommand('extension.op
// Call OpenAI API with the question and file contents
try {
const chatCompletion = await openai.createChatCompletion({
model: "gpt-3.5-turbo",
model: "gpt-3.5-turbo-16k",
messages: [
{ role: "system", content: "Answer the coding questions, only provide the code and documentation, explaining the solution after providing the code." },
{ role: "user", content: question },