From 04f8db150deae3a9cdd2c8940f9d59f96b58a359 Mon Sep 17 00:00:00 2001 From: Alessandro Mauri Date: Thu, 30 Nov 2023 00:00:11 +0100 Subject: [PATCH] more config --- TODO.md | 8 +++++++- package.json | 11 ++++++++++- src/extension.ts | 12 ++++++------ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/TODO.md b/TODO.md index 74f49dd..73164f5 100644 --- a/TODO.md +++ b/TODO.md @@ -3,4 +3,10 @@ [] - add fill in middle [x] - add config option to disable the extension [] - add command to test and query connection to server -[x] - add feedback when waiting response \ No newline at end of file +[x] - add feedback when waiting response +[] - add a chat window +[] - if the model is an instruct-type add the system prompt to the chat +[] - add an icon +[] - option to backup and restore model settings +[] - add a window to quickly modify model configs +[] - decorate ai generated text https://github.com/microsoft/vscode-extension-samples/tree/main/decorator-sample \ No newline at end of file diff --git a/package.json b/package.json index eb8bd8c..e400124 100644 --- a/package.json +++ b/package.json @@ -155,9 +155,18 @@ "dumbpilot.llamaTailfree_z": {"type": "number", "default": 0.5}, "dumbpilot.llamaSeed": {"type": "number", "default": -1}, "dumbpilot.llamaCachePrompt": { - "type": "bool", + "type": "boolean", "default": true, "description": "Enable prompt caching for faster results" + }, + "dumbpilot.llamaInstructModel": { + "type": "boolean", + "default": "false", + "description": "For use with instruct models" + }, + "dumbpilot.llamaSystemPrompt": { + "type": "string", + "description": "The system prompt that the model considers at the beginning of every request, used by instruct models" } } } diff --git a/src/extension.ts b/src/extension.ts index 0a8384b..3cf65e5 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -111,17 +111,13 @@ export function activate(context: vscode.ExtensionContext) { config = vscode.workspace.getConfiguration("dumbpilot"); })); - let completion_enabled: boolean = config.get("completionEnabled") as boolean; - // TODO: work with local configurations let disposable = vscode.commands.registerCommand("dumbpilot.enableCompletion", () => { - completion_enabled = true; config.update("completionEnabled", true); }); context.subscriptions.push(disposable); disposable = vscode.commands.registerCommand("dumbpilot.disableCompletion", () => { - completion_enabled = false; config.update("completionEnabled", false); }); @@ -131,11 +127,12 @@ export function activate(context: vscode.ExtensionContext) { const provider: vscode.InlineCompletionItemProvider = { async provideInlineCompletionItems(document, position, context, token) { - if (completion_enabled === false) { + // disable if predictive completion is disabled + if (config.get("completionEnabled") as boolean === false) { return null; } - // Since for every completion we want to query the server, we want to filter out + // Since for every completion we will query the server, we want to filter out // automatic completion invokes if (context.triggerKind === vscode.InlineCompletionTriggerKind.Automatic) { return null; @@ -224,6 +221,9 @@ export function activate(context: vscode.ExtensionContext) { } data = await response.json() as llamaData; + const gen_tokens = data.timings.predicted_n; + const gen_time = (data.timings.predicted_ms / 1000).toFixed(2); + showMessageWithTimeout(`predicted ${gen_tokens} tokens in ${gen_time} seconds`, 1500); } catch (e: any) { const err = e as TypeError;