last chance before changing to OpenAI API

master
Alessandro Mauri 11 months ago
parent 04f8db150d
commit efb85c2cb4
  1. 1
      TODO.md
  2. 84
      package.json
  3. 18
      src/extension.ts

@ -10,3 +10,4 @@
[] - option to backup and restore model settings
[] - add a window to quickly modify model configs
[] - decorate ai generated text https://github.com/microsoft/vscode-extension-samples/tree/main/decorator-sample
[] - when trying to use completion when there is an active selection either substitute the selection or use the selection as context instead of the whole file

@ -141,19 +141,75 @@
"default": "http://0.0.0.0:8080",
"description": "llama.cpp server address"
},
"dumbpilot.llamaCtxsize": {"type": "number", "default": 2048},
"dumbpilot.llamaMaxtokens": {"type": "number", "default": -1},
"dumbpilot.llamaMirostat": {"type": "number", "default": 0},
"dumbpilot.llamaRepeatPenalty": {"type": "number", "default": 1.11},
"dumbpilot.llamaFrequencyPenalty": {"type": "number", "default": 0.0},
"dumbpilot.llamaPresencePenalty": {"type": "number", "default": 0.0},
"dumbpilot.llamaRepeatCtx": {"type": "number", "default": 256},
"dumbpilot.llamaTemperature": {"type": "number", "default": 0.25},
"dumbpilot.llamaTop_p": {"type": "number", "default": 0.95},
"dumbpilot.llamaTop_k": {"type": "number", "default": 40},
"dumbpilot.llamaTypical_p": {"type": "number", "default": 0.95},
"dumbpilot.llamaTailfree_z": {"type": "number", "default": 0.5},
"dumbpilot.llamaSeed": {"type": "number", "default": -1},
"dumbpilot.llamaCtxsize": {
"type": "number",
"default": 2048
},
"dumbpilot.llamaMaxtokens": {
"type": "number",
"default": -1
},
"dumbpilot.llamaMirostat": {
"type": "number",
"default": 0
},
"dumbpilot.llamaRepeatPenalty": {
"type": "number",
"default": 1.11
},
"dumbpilot.llamaFrequencyPenalty": {
"type": "number",
"default": 0.0
},
"dumbpilot.llamaPresencePenalty": {
"type": "number",
"default": 0.0
},
"dumbpilot.llamaRepeatCtx": {
"type": "number",
"default": 256
},
"dumbpilot.llamaTemperature": {
"type": "number",
"default": 0.25
},
"dumbpilot.llamaTop_p": {
"type": "number",
"default": 0.95
},
"dumbpilot.llamaTop_k": {
"type": "number",
"default": 40
},
"dumbpilot.llamaTypical_p": {
"type": "number",
"default": 0.95
},
"dumbpilot.llamaTailfree_z": {
"type": "number",
"default": 0.5
},
"dumbpilot.llamaSeed": {
"type": "number",
"default": -1
},
"dumbpilot.fimBeginString": {
"type": "string",
"default": "<|fim▁begin|>"
},
"dumbpilot.fimHoleString": {
"type": "string",
"default": "<|fim▁hole|>"
},
"dumbpilot.fimEndString": {
"type": "string",
"default": "<|fim▁end|>"
},
"dumbpilot.useFillInMiddleRequest": {
"type": "boolean",
"default": false,
"description": "Use the fill in middle request type provided by llama.cpp server, otherwise use the FIM token strings to delimit the text"
},
"dumbpilot.llamaCachePrompt": {
"type": "boolean",
"default": true,
@ -161,7 +217,7 @@
},
"dumbpilot.llamaInstructModel": {
"type": "boolean",
"default": "false",
"default": false,
"description": "For use with instruct models"
},
"dumbpilot.llamaSystemPrompt": {

@ -173,6 +173,7 @@ export function activate(context: vscode.ExtensionContext) {
doc_before = pfx + ' ' + fname + sfx + '\n' + doc_before;
const fim = config.get("fimEnabled") as boolean;
const fimRequest = config.get("useFillInMiddleRequest") as boolean;
let req_str: string;
let request: llamaRequest = {
n_predict: config.get("llamaMaxtokens") as number,
@ -191,15 +192,26 @@ export function activate(context: vscode.ExtensionContext) {
cache_prompt: config.get("llamaCachePrompt") as boolean
};
// check if fill in middle is enabled and fill the request prompt accordingly
if (fim === true) {
req_str = '/infill';
request.input_prefix = doc_before;
request.input_suffix = doc_after;
if (fimRequest === true) {
req_str = '/infill';
request.input_prefix = doc_before;
request.input_suffix = doc_after;
} else {
const fim_beg = config.get("fimBeginString") as string;
const fim_hole = config.get("fimHoleString") as string;
const fim_end = config.get("fimEndString") as string;
req_str = '/completion';
request.prompt = fim_beg + doc_before + fim_hole + doc_after + fim_end;
}
} else {
req_str = '/completion';
request.prompt = doc_before;
}
console.log(JSON.stringify(request));
let data: llamaData;
// try to send the request to the running server
try {

Loading…
Cancel
Save