You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
dumbpilot/src/extension.ts

125 lines
4.0 KiB

import { ok } from 'assert';
import * as vscode from 'vscode';
import commentPrefix from './comments.json';
import {
LlamaRequest,
createLlamacppRequest,
llamacppRequestEndpoint,
llamacppMakeRequest,
} from './llamacpp-api';
import {
OpenAICompletionRequest,
createOpenAIAPIRequest,
openAIAPIRequestEndpoint,
openAIMakeRequest,
} from './openai-api';
import {
ResponseData,
showMessageWithTimeout,
showPendingStatusBar,
} from './common';
export function activate(context: vscode.ExtensionContext) {
console.log('dumbpilot is now active');
let config = vscode.workspace.getConfiguration('dumbpilot');
// handle completion changes
context.subscriptions.push(
vscode.workspace.onDidChangeConfiguration((e) => {
config = vscode.workspace.getConfiguration('dumbpilot');
})
);
// TODO: work with local configurations
let disposable = vscode.commands.registerCommand('dumbpilot.enableCompletion', () => {
config.update('completionEnabled', true);
});
context.subscriptions.push(disposable);
disposable = vscode.commands.registerCommand('dumbpilot.disableCompletion', () => {
config.update('completionEnabled', false);
});
// Register a new provider of inline completions, this does not decide how it is invoked
// only what the completion should be
// https://github.com/microsoft/vscode-extension-samples/blob/main/inline-completions/src/extension.ts
const provider: vscode.InlineCompletionItemProvider = {
async provideInlineCompletionItems(document, position, context, token) {
// disable if predictive completion is disabled
if ((config.get('completionEnabled') as boolean) === false) {
return null;
}
// Since for every completion we will query the server, we want to filter out
// automatic completion invokes
if (context.triggerKind === vscode.InlineCompletionTriggerKind.Automatic) {
return null;
}
// FIXME: I don't know if this works
token.onCancellationRequested(() => {
console.log('dumbpilot: operation cancelled, may still be running on the server');
return null;
});
//console.log('dumbpilot: completion invoked at position: line=' + position.line + ' char=' + position.character);
const result: vscode.InlineCompletionList = {
items: [],
};
// Get the document's text and position to send to the model
const doc_text = document.getText();
const doc_off = document.offsetAt(position);
let doc_before = doc_text.substring(0, doc_off);
let doc_after = doc_text.substring(doc_off);
// TODO: prune text up to a maximum context length
// Prefix the filename in a comment
let pfx: string, sfx: string;
const fname = document.fileName.split('/').at(-1);
const lang = document.languageId;
const prefixes = commentPrefix;
pfx = (prefixes as any)[lang][0] as string;
sfx = (prefixes as any)[lang][1] as string;
// FIXME: is there a more efficient way?
doc_before = pfx + ' ' + fname + sfx + '\n' + doc_before;
// actially make the request
let data: ResponseData = { content: '', tokens: 0, time: 0 };
let promise: Promise<ResponseData>;
if (config.get('API') === 'OpenAI') {
const request: OpenAICompletionRequest = createOpenAIAPIRequest(
config,
doc_before,
doc_after
);
const endpoint: string = openAIAPIRequestEndpoint(config);
promise = openAIMakeRequest(request, endpoint);
} else {
const request: LlamaRequest = createLlamacppRequest(config, doc_before, doc_after);
const endpoint: string = llamacppRequestEndpoint(config);
promise = llamacppMakeRequest(request, endpoint);
}
showPendingStatusBar('dumbpilot waiting', promise);
data = await promise;
showMessageWithTimeout(
`predicted ${data.tokens} tokens in ${data.time.toFixed(2)} seconds`,
1500
);
result.items.push({
insertText: data.content,
range: new vscode.Range(position, position),
});
return result;
},
};
vscode.languages.registerInlineCompletionItemProvider({ pattern: '**' }, provider);
}
// This method is called when your extension is deactivated
export function deactivate() {}