organized code for further use

This commit is contained in:
Leyla Becker 2025-07-18 23:33:42 -05:00
parent b57ed5108d
commit afd26b271c

View file

@ -3,10 +3,68 @@
import * as vscode from 'vscode'; import * as vscode from 'vscode';
import { Ollama } from 'ollama/browser'; import { Ollama } from 'ollama/browser';
const MODEL = 'deepseek-coder:1.3b';
const PREFIX_START = '<fileStart>'
const PREFIX_END = '</fileStart>'
const SUFFIX_START = '<fileEnd>'
const SUFFIX_END = '</fileEnd>'
const HOST = undefined;
// TODO: make these configurable by extension setting
const ollama = new Ollama({ const ollama = new Ollama({
// host: 'http://defiant:11434' host: HOST,
}); });
const getModelSupportsSuffix = async (model: string) => {
// TODO: get if model supports suffixes and use that if available
// const response = await ollama.show({
// model: model
// })
// model.capabilities.includes('suffix')
return false
}
const getPrompt = (document: vscode.TextDocument, position: vscode.Position) => {
const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character))
const messageHeader = `In an english code base with the file.\nfile:\nproject {PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\nFile:\n${PREFIX_START}\n`;
const prompt = messageHeader
.replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled")
.replace("{FILE_NAME}", document.fileName)
.replace("{LANG}", document.languageId) + prefix;
return prompt
}
const getPromptWithSuffix = (document: vscode.TextDocument, position: vscode.Position) => {
const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character))
const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length))
const messageSuffix = `End of the file:\n${SUFFIX_START}\n${suffix}\n${SUFFIX_END}\n`
const messagePrefix = `Start of the file:\n${PREFIX_START}`
const messageHeader = `In an english code base with the file.\nfile:\nproject {PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\nThis is the end of and then the start of the file.`
.replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled")
.replace("{FILE_NAME}", document.fileName)
.replace("{LANG}", document.languageId) + prefix;
const prompt = `${messageHeader}\n${messageSuffix}\n${messagePrefix}\n`;
return prompt
}
const getSuffix = (document: vscode.TextDocument, position: vscode.Position) => {
const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length))
return suffix
}
// This method is called when your extension is activated // This method is called when your extension is activated
// Your extension is activated the very first time the command is executed // Your extension is activated the very first time the command is executed
export function activate(context: vscode.ExtensionContext) { export function activate(context: vscode.ExtensionContext) {
@ -44,30 +102,19 @@ export function activate(context: vscode.ExtensionContext) {
console.log('provideInlineCompletionItems triggered'); console.log('provideInlineCompletionItems triggered');
try { try {
const modelSupportsSuffix = await getModelSupportsSuffix(MODEL)
const DOCUMENT_START = '<file>' const prompt = modelSupportsSuffix ? getPrompt(document, position) : getPromptWithSuffix(document, position)
const DOCUMENT_END = '</file>' const suffix = modelSupportsSuffix ? undefined : getSuffix(document, position)
const MESSAGE_HEADER = `\n{PROJECT_NAME}\nfile {FILE_NAME}\nlanguage {LANG}\n${DOCUMENT_START}\n`;
const prefix = document.getText(new vscode.Range(0, 0, position.line, position.character))
const suffix = document.getText(new vscode.Range(position.line, position.character, document.lineCount - 1, document.lineAt(document.lineCount - 1).text.length))
const prompt = MESSAGE_HEADER
.replace("{PROJECT_NAME}", vscode.workspace.name || "Untitled")
.replace("{FILE_NAME}", document.fileName)
.replace("{LANG}", document.languageId) + prefix;
const response = await ollama.generate({ const response = await ollama.generate({
model: 'deepseek-r1:8b', model: MODEL,
prompt, prompt,
suffix, suffix,
raw: true, raw: true,
stream: true, stream: true,
options: { options: {
num_predict: 10, num_predict: 10,
stop: [DOCUMENT_END] stop: [PREFIX_END]
}, },
}) })