From afd26b271c795f734bcff5e43d3a01df9fc57122 Mon Sep 17 00:00:00 2001 From: Leyla Becker Date: Fri, 18 Jul 2025 23:33:42 -0500 Subject: [PATCH] organized code for further use --- src/extension.ts | 81 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/src/extension.ts b/src/extension.ts index ee470cc..884ee20 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -3,10 +3,68 @@ import * as vscode from 'vscode'; import { Ollama } from 'ollama/browser'; +const MODEL = 'deepseek-coder:1.3b'; + +const PREFIX_START = '' +const PREFIX_END = '' + +const SUFFIX_START = '' +const SUFFIX_END = '' + +const HOST = undefined; + +// TODO: make these configurable by extension setting const ollama = new Ollama({ - // host: 'http://defiant:11434' + host: HOST, }); +const getModelSupportsSuffix = async (model: string) => { + // TODO: get if model supports suffixes and use that if available + + // const response = await ollama.show({ + // model: model + // }) + + // model.capabilities.includes('suffix') + return false +} + +const getPrompt = (document: vscode.TextDocument, position: vscode.Position) => { + const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character)) + + const messageHeader = `In an english code base with the file.\nfile:\nproject {PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\nFile:\n${PREFIX_START}\n`; + + const prompt = messageHeader + .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled") + .replace("{FILE_NAME}", document.fileName) + .replace("{LANG}", document.languageId) + prefix; + + return prompt +} + +const getPromptWithSuffix = (document: vscode.TextDocument, position: vscode.Position) => { + const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character)) + const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length)) + + const messageSuffix = `End of the file:\n${SUFFIX_START}\n${suffix}\n${SUFFIX_END}\n` + const messagePrefix = `Start of the file:\n${PREFIX_START}` + + const messageHeader = `In an english code base with the file.\nfile:\nproject {PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\nThis is the end of and then the start of the file.` + .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled") + .replace("{FILE_NAME}", document.fileName) + .replace("{LANG}", document.languageId) + prefix; + + const prompt = `${messageHeader}\n${messageSuffix}\n${messagePrefix}\n`; + + return prompt +} + +const getSuffix = (document: vscode.TextDocument, position: vscode.Position) => { + const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length)) + + return suffix +} + // This method is called when your extension is activated // Your extension is activated the very first time the command is executed export function activate(context: vscode.ExtensionContext) { @@ -44,30 +102,19 @@ export function activate(context: vscode.ExtensionContext) { console.log('provideInlineCompletionItems triggered'); try { - - const DOCUMENT_START = '' - const DOCUMENT_END = '' - - const MESSAGE_HEADER = `\n{PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\n${DOCUMENT_START}\n`; - - const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character)) - const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length)) - - const prompt = MESSAGE_HEADER - .replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled") - .replace("{FILE_NAME}", document.fileName) - .replace("{LANG}", document.languageId) + prefix; - + const modelSupportsSuffix = await getModelSupportsSuffix(MODEL) + const prompt = modelSupportsSuffix ? getPrompt(document, position) : getPromptWithSuffix(document, position) + const suffix = modelSupportsSuffix ? undefined : getSuffix(document, position) const response = await ollama.generate({ - model: 'deepseek-r1:8b', + model: MODEL, prompt, suffix, raw: true, stream: true, options: { num_predict: 10, - stop: [DOCUMENT_END] + stop: [PREFIX_END] }, })