@ -5,6 +5,7 @@ import {
showMessageWithTimeout ,
showMessageWithTimeout ,
showPendingStatusBar ,
showPendingStatusBar ,
} from './common' ;
} from './common' ;
import { config } from 'process' ;
// oogabooga/text-generation-webui OpenAI compatible API
// oogabooga/text-generation-webui OpenAI compatible API
// https://github.com/oobabooga/text-generation-webui/wiki/12-%E2%80%90-OpenAI-API
// https://github.com/oobabooga/text-generation-webui/wiki/12-%E2%80%90-OpenAI-API
@ -73,7 +74,7 @@ type OpenAICompletionSuccessResponse = {
model : string ;
model : string ;
object ? : string ;
object ? : string ;
usage : {
usage : {
completion_tokens : number ;
completion_tokens? : number ;
prompt_tokens : number ;
prompt_tokens : number ;
total_tokens : number ;
total_tokens : number ;
} ;
} ;
@ -108,7 +109,7 @@ export function createOpenAIAPIRequest(
typical_p : config.get ( 'llamaTypical_p' ) as number ,
typical_p : config.get ( 'llamaTypical_p' ) as number ,
tfs : config.get ( 'llamaTailfree_z,' ) as number ,
tfs : config.get ( 'llamaTailfree_z,' ) as number ,
seed : config.get ( 'llamaSeed' ) as number ,
seed : config.get ( 'llamaSeed' ) as number ,
stream : fals e ,
stream : con fig.get ( 'llamaAPIStr eam' ) ,
} ;
} ;
const fim = config . get ( 'fimEnabled' ) as boolean ;
const fim = config . get ( 'fimEnabled' ) as boolean ;
@ -131,7 +132,7 @@ export function openAIAPIRequestEndpoint(config: vscode.WorkspaceConfiguration):
}
}
export async function openAIMakeRequest (
export async function openAIMakeRequest (
request : OpenAICompletionRequest ,
request_body : OpenAICompletionRequest ,
endpoint : string
endpoint : string
) : Promise < ResponseData > {
) : Promise < ResponseData > {
let ret : ResponseData = {
let ret : ResponseData = {
@ -140,45 +141,67 @@ export async function openAIMakeRequest(
time : 0 ,
time : 0 ,
} ;
} ;
let data : OpenAICompletionResponse ;
let data : OpenAICompletionResponse ;
const is_stream : boolean = request_body . stream === true ? true : false ;
// format the request
const request : RequestInit = {
method : 'POST' ,
headers : {
'content-type' : 'application/json; charset=UTF-8' ,
} ,
body : JSON.stringify ( request_body ) ,
} ;
// try to send the request to the running server
// try to send the request to the running server
try {
try {
const response_promise = fetch ( endpoint , {
const response_promise = fetch ( endpoint , request ) ;
method : 'POST' ,
headers : {
// if doing a stream request we have to attach a reader and join
'content-type' : 'application/json; charset=UTF-8' ,
// the individual responses
} ,
body : JSON.stringify ( request ) ,
} ) ;
showPendingStatusBar ( 'dumbpilot waiting' , response_promise ) ;
// TODO: measure the time it takes the server to respond
let resp_time : number = 0 ;
const response = await response_promise ;
const response = await response_promise ;
if ( response . ok === false ) {
// read the data chunk by chunk using asynchronous iteration
throw new Error ( 'llama server request is not ok??' ) ;
if ( response . body === null ) {
throw new Error ( 'null response body' ) ;
}
}
data = ( await response . json ( ) ) as OpenAICompletionResponse ;
// start a timer
const timer_start = performance . now ( ) ;
// check wether the remote gave back an error
if ( Object . hasOwn ( data , 'detail' ) === true ) {
for await ( const chunk of response . body ) {
data = data as OpenAICompletionFailureResponse ;
// FIXME: why the fuck do I have to do this shite
// TODO: why did it error?
let data_text = new TextDecoder ( ) . decode ( chunk ) ;
throw new Error ( 'OpenAI Endpoint Error' ) ;
data_text = data_text . substring ( data_text . indexOf ( '{' ) ) ;
let data : OpenAICompletionResponse ;
try {
data = JSON . parse ( data_text ) ;
} catch ( e : any ) {
console . error ( e ) ;
return ret ;
}
//console.log(JSON.stringify(data));
if ( Object . hasOwn ( data , 'detail' ) === true ) {
data = data as OpenAICompletionFailureResponse ;
// TODO: why did it error?
throw new Error ( 'OpenAI Endpoint Error' ) ;
}
// unpack the data
data = data as OpenAICompletionSuccessResponse ;
// FIXME: why the choices may be multiple?
// TODO: display the multiple choices
//console.log(data.choices[0].text);
ret . content += data . choices [ 0 ] . text ;
ret . tokens += data . usage ? . completion_tokens || 0 ;
}
}
// unpack the data
// stop the timer
data = data as OpenAICompletionSuccessResponse ;
const timer_end = performance . now ( ) ;
// FIXME: why the choices may be multiple?
ret . time = ( timer_end - timer_start ) / 1000.0 ;
// TODO: display the multiple choices
ret . content = data . choices [ 0 ] . text ;
ret . tokens = data . usage . completion_tokens ;
ret . time = resp_time ;
showMessageWithTimeout ( ` predicted ${ ret . tokens } tokens in ${ ret . time } seconds ` , 1500 ) ;
} catch ( e : any ) {
} catch ( e : any ) {
console . error ( e ) ;
const err = e as TypeError ;
const err = e as TypeError ;
const cause : FetchErrorCause = err . cause as FetchErrorCause ;
const cause : FetchErrorCause = err . cause as FetchErrorCause ;
const estr : string =
const estr : string =