mirror of
https://github.com/SilasMarvin/lsp-ai.git
synced 2025-12-18 15:04:29 +01:00
Added tests and vscode extension is working
This commit is contained in:
@@ -22,12 +22,62 @@
|
||||
}
|
||||
],
|
||||
"configuration": {
|
||||
"title": "Configuration",
|
||||
"title": "LSP-AI",
|
||||
"properties": {
|
||||
"configuration.json": {
|
||||
"type": "json",
|
||||
"default": "{}",
|
||||
"description": "JSON configuration for LSP AI"
|
||||
"lsp-ai.serverConfiguration": {
|
||||
"type": "object",
|
||||
"default": {
|
||||
"memory": {
|
||||
"file_store": {}
|
||||
},
|
||||
"models": {
|
||||
"model1": {
|
||||
"type": "openai",
|
||||
"chat_endpoint": "https://api.openai.com/v1/chat/completions",
|
||||
"model": "gpt-4o",
|
||||
"auth_token_env_var_name": "OPENAI_API_KEY"
|
||||
}
|
||||
}
|
||||
},
|
||||
"description": "JSON configuration for LSP-AI language server"
|
||||
},
|
||||
"lsp-ai.generationConfiguration": {
|
||||
"type": "object",
|
||||
"default": {
|
||||
"model": "model1",
|
||||
"parameters": {
|
||||
"max_tokens": 128,
|
||||
"max_context": 1024,
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a code completion tool. Use the [CONTEXT] and [CURRENT_CODE] provided to replace the <CURSOR> with the correct code. Do not reply with anything but valid code"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[CONTEXT]\nprint(\"hello\")\n\n[CURRENT_CODE]\ndef print_to_screen(a): pri<CURSOR>\n\nprint_to_screen(\"test\")"
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": "nt_to_screen(a)"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[CONTEXT]\ndef mul_two_nums(a, b):\n return a * b\n\n[CURRENT_CODE]\n# Test 5 * 25\nass<CURSOR>"
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": "ert mul_two_nums(5, 25) == 125"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[CONTENT]\n{CONTENT}\n\n[CURRENT_CODE]\n{CODE}"
|
||||
}
|
||||
],
|
||||
"max_new_tokens": 32
|
||||
}
|
||||
},
|
||||
"description": "JSON configuration for LSP-AI generation"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,14 +11,16 @@ let client: LanguageClient;
|
||||
|
||||
export function activate(context: vscode.ExtensionContext) {
|
||||
// Configure the server options
|
||||
let serverOptions: ServerOptions = {
|
||||
const serverOptions: ServerOptions = {
|
||||
command: "lsp-ai",
|
||||
transport: TransportKind.stdio,
|
||||
};
|
||||
|
||||
// Options to control the language client
|
||||
let clientOptions: LanguageClientOptions = {
|
||||
documentSelector: [{ pattern: "**" }]
|
||||
const config = vscode.workspace.getConfiguration("lsp-ai");
|
||||
const clientOptions: LanguageClientOptions = {
|
||||
documentSelector: [{ pattern: "**" }],
|
||||
initializationOptions: config.serverConfiguration
|
||||
};
|
||||
|
||||
// Create the language client and start the client
|
||||
@@ -35,11 +37,15 @@ export function activate(context: vscode.ExtensionContext) {
|
||||
// Register generate function
|
||||
const generateCommand = 'lsp-ai.generation';
|
||||
const generateCommandHandler = (editor: vscode.TextEditor) => {
|
||||
console.log("THE GENERATION CONFIGURATION");
|
||||
console.log(config.generationConfiguration);
|
||||
let params = {
|
||||
textDocument: {
|
||||
uri: editor.document.uri.toString(),
|
||||
},
|
||||
position: editor.selection.active
|
||||
position: editor.selection.active,
|
||||
model: config.generationConfiguration.model,
|
||||
parameters: config.generationConfiguration.parameters
|
||||
};
|
||||
client.sendRequest("textDocument/generation", params).then(result => {
|
||||
editor.edit((edit) => {
|
||||
@@ -51,6 +57,7 @@ export function activate(context: vscode.ExtensionContext) {
|
||||
};
|
||||
context.subscriptions.push(vscode.commands.registerTextEditorCommand(generateCommand, generateCommandHandler));
|
||||
|
||||
// Register as an inline completion provider
|
||||
vscode.languages.registerInlineCompletionItemProvider({ pattern: '**' },
|
||||
{
|
||||
provideInlineCompletionItems: async (document: vscode.TextDocument, position: vscode.Position) => {
|
||||
@@ -58,7 +65,9 @@ export function activate(context: vscode.ExtensionContext) {
|
||||
textDocument: {
|
||||
uri: document.uri.toString(),
|
||||
},
|
||||
position: position
|
||||
position: position,
|
||||
model: config.generationConfiguration.model,
|
||||
parameters: config.generationConfiguration.parameters
|
||||
};
|
||||
const result = await client.sendRequest("textDocument/generation", params);
|
||||
return [new vscode.InlineCompletionItem(result["generatedText"])];
|
||||
|
||||
105
src/config.rs
105
src/config.rs
@@ -26,12 +26,14 @@ pub enum ValidModel {
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ChatMessage {
|
||||
pub role: String,
|
||||
pub content: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Chat {
|
||||
pub completion: Option<Vec<ChatMessage>>,
|
||||
pub generation: Option<Vec<ChatMessage>>,
|
||||
@@ -41,6 +43,7 @@ pub struct Chat {
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[allow(clippy::upper_case_acronyms)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct FIM {
|
||||
pub start: String,
|
||||
pub middle: String,
|
||||
@@ -48,6 +51,7 @@ pub struct FIM {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct PostgresML {
|
||||
pub database_url: Option<String>,
|
||||
#[serde(default)]
|
||||
@@ -55,12 +59,14 @@ pub struct PostgresML {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct FileStore {
|
||||
#[serde(default)]
|
||||
pub crawl: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Model {
|
||||
pub repository: String,
|
||||
pub name: Option<String>,
|
||||
@@ -75,6 +81,7 @@ const fn n_ctx_default() -> u32 {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct LLaMACPP {
|
||||
// The model to use
|
||||
#[serde(flatten)]
|
||||
@@ -90,6 +97,7 @@ const fn api_max_requests_per_second_default() -> f32 {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct OpenAI {
|
||||
// The auth token env var name
|
||||
pub auth_token_env_var_name: Option<String>,
|
||||
@@ -106,6 +114,7 @@ pub struct OpenAI {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Anthropic {
|
||||
// The auth token env var name
|
||||
pub auth_token_env_var_name: Option<String>,
|
||||
@@ -127,12 +136,12 @@ pub struct Completion {
|
||||
pub model: String,
|
||||
|
||||
// Args are deserialized by the backend using them
|
||||
#[serde(flatten)]
|
||||
#[serde(default)]
|
||||
pub kwargs: Kwargs,
|
||||
pub parameters: Kwargs,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ValidConfig {
|
||||
pub memory: ValidMemoryBackend,
|
||||
pub models: HashMap<String, ValidModel>,
|
||||
@@ -204,12 +213,31 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
// This makes testing much easier.
|
||||
#[cfg(test)]
|
||||
impl Config {
|
||||
pub fn default_with_file_store_without_models() -> Self {
|
||||
Self {
|
||||
config: ValidConfig {
|
||||
memory: ValidMemoryBackend::FileStore(FileStore { crawl: false }),
|
||||
models: HashMap::new(),
|
||||
completion: None,
|
||||
},
|
||||
_client_params: ValidClientParams {
|
||||
_root_uri: None,
|
||||
_workspace_folders: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
#[cfg(feature = "llamacpp")]
|
||||
fn llama_cpp_config() {
|
||||
let args = json!({
|
||||
"initializationOptions": {
|
||||
@@ -227,13 +255,15 @@ mod test {
|
||||
},
|
||||
"completion": {
|
||||
"model": "model1",
|
||||
"fim": {
|
||||
"start": "<fim_prefix>",
|
||||
"middle": "<fim_suffix>",
|
||||
"end": "<fim_middle>"
|
||||
},
|
||||
"max_context": 1024,
|
||||
"max_new_tokens": 32,
|
||||
"parameters": {
|
||||
"fim": {
|
||||
"start": "<fim_prefix>",
|
||||
"middle": "<fim_suffix>",
|
||||
"end": "<fim_middle>"
|
||||
},
|
||||
"max_context": 1024,
|
||||
"max_new_tokens": 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -257,17 +287,52 @@ mod test {
|
||||
},
|
||||
"completion": {
|
||||
"model": "model1",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a code completion chatbot. Use the following context to complete the next segement of code. \n\n{CONTEXT}",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Complete the following code: \n\n{CODE}"
|
||||
}
|
||||
],
|
||||
"max_new_tokens": 32,
|
||||
"parameters": {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "Test",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_new_tokens": 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Config::new(args).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn anthropic_config() {
|
||||
let args = json!({
|
||||
"initializationOptions": {
|
||||
"memory": {
|
||||
"file_store": {}
|
||||
},
|
||||
"models": {
|
||||
"model1": {
|
||||
"type": "anthropic",
|
||||
"completions_endpoint": "https://api.anthropic.com/v1/messages",
|
||||
"model": "claude-3-haiku-20240307",
|
||||
"auth_token_env_var_name": "ANTHROPIC_API_KEY",
|
||||
},
|
||||
},
|
||||
"completion": {
|
||||
"model": "model1",
|
||||
"parameters": {
|
||||
"system": "Test",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_new_tokens": 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -62,17 +62,18 @@ impl FileStore {
|
||||
.iter()
|
||||
.filter(|f| **f != current_document_uri)
|
||||
{
|
||||
let needed = characters.saturating_sub(rope.len_chars());
|
||||
let needed = characters.saturating_sub(rope.len_chars() + 1);
|
||||
if needed == 0 {
|
||||
break;
|
||||
}
|
||||
let file_map = self.file_map.lock();
|
||||
let r = file_map.get(file).context("Error file not found")?;
|
||||
let slice_max = needed.min(r.len_chars());
|
||||
let slice_max = needed.min(r.len_chars() + 1);
|
||||
let rope_str_slice = r
|
||||
.get_slice(0..slice_max)
|
||||
.get_slice(0..slice_max - 1)
|
||||
.context("Error getting slice")?
|
||||
.to_string();
|
||||
rope.insert(0, "\n");
|
||||
rope.insert(0, &rope_str_slice);
|
||||
cursor_index += slice_max;
|
||||
}
|
||||
@@ -225,7 +226,7 @@ impl MemoryBackend for FileStore {
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn renamed_file(&self, params: lsp_types::RenameFilesParams) -> anyhow::Result<()> {
|
||||
async fn renamed_files(&self, params: lsp_types::RenameFilesParams) -> anyhow::Result<()> {
|
||||
for file_rename in params.files {
|
||||
let mut file_map = self.file_map.lock();
|
||||
if let Some(rope) = file_map.remove(&file_rename.old_uri) {
|
||||
@@ -235,3 +236,283 @@ impl MemoryBackend for FileStore {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use lsp_types::{
|
||||
DidOpenTextDocumentParams, FileRename, Position, Range, RenameFilesParams,
|
||||
TextDocumentContentChangeEvent, TextDocumentIdentifier, TextDocumentItem,
|
||||
VersionedTextDocumentIdentifier,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
fn generate_base_file_store() -> anyhow::Result<FileStore> {
|
||||
let config = Config::default_with_file_store_without_models();
|
||||
let file_store_config = if let config::ValidMemoryBackend::FileStore(file_store_config) =
|
||||
config.config.memory.clone()
|
||||
{
|
||||
file_store_config
|
||||
} else {
|
||||
anyhow::bail!("requires a file_store_config")
|
||||
};
|
||||
Ok(FileStore::new(file_store_config, config))
|
||||
}
|
||||
|
||||
fn generate_filler_text_document(uri: Option<&str>, text: Option<&str>) -> TextDocumentItem {
|
||||
let uri = uri.unwrap_or("file://filler/");
|
||||
let text = text.unwrap_or("Here is the document body");
|
||||
TextDocumentItem {
|
||||
uri: reqwest::Url::parse(uri).unwrap(),
|
||||
language_id: "filler".to_string(),
|
||||
version: 0,
|
||||
text: text.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_open_document() -> anyhow::Result<()> {
|
||||
let params = lsp_types::DidOpenTextDocumentParams {
|
||||
text_document: generate_filler_text_document(None, None),
|
||||
};
|
||||
let file_store = generate_base_file_store()?;
|
||||
file_store.opened_text_document(params).await?;
|
||||
let file = file_store
|
||||
.file_map
|
||||
.lock()
|
||||
.get("file://filler/")
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert_eq!(file.to_string(), "Here is the document body");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_rename_document() -> anyhow::Result<()> {
|
||||
let params = lsp_types::DidOpenTextDocumentParams {
|
||||
text_document: generate_filler_text_document(None, None),
|
||||
};
|
||||
let file_store = generate_base_file_store()?;
|
||||
file_store.opened_text_document(params).await?;
|
||||
|
||||
let params = RenameFilesParams {
|
||||
files: vec![FileRename {
|
||||
old_uri: "file://filler/".to_string(),
|
||||
new_uri: "file://filler2/".to_string(),
|
||||
}],
|
||||
};
|
||||
file_store.renamed_files(params).await?;
|
||||
|
||||
let file = file_store
|
||||
.file_map
|
||||
.lock()
|
||||
.get("file://filler2/")
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert_eq!(file.to_string(), "Here is the document body");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_change_document() -> anyhow::Result<()> {
|
||||
let text_document = generate_filler_text_document(None, None);
|
||||
|
||||
let params = DidOpenTextDocumentParams {
|
||||
text_document: text_document.clone(),
|
||||
};
|
||||
let file_store = generate_base_file_store()?;
|
||||
file_store.opened_text_document(params).await?;
|
||||
|
||||
let params = lsp_types::DidChangeTextDocumentParams {
|
||||
text_document: VersionedTextDocumentIdentifier {
|
||||
uri: text_document.uri.clone(),
|
||||
version: 1,
|
||||
},
|
||||
content_changes: vec![TextDocumentContentChangeEvent {
|
||||
range: Some(Range {
|
||||
start: Position {
|
||||
line: 0,
|
||||
character: 1,
|
||||
},
|
||||
end: Position {
|
||||
line: 0,
|
||||
character: 3,
|
||||
},
|
||||
}),
|
||||
range_length: None,
|
||||
text: "a".to_string(),
|
||||
}],
|
||||
};
|
||||
file_store.changed_text_document(params).await?;
|
||||
let file = file_store
|
||||
.file_map
|
||||
.lock()
|
||||
.get("file://filler/")
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert_eq!(file.to_string(), "Hae is the document body");
|
||||
|
||||
let params = lsp_types::DidChangeTextDocumentParams {
|
||||
text_document: VersionedTextDocumentIdentifier {
|
||||
uri: text_document.uri,
|
||||
version: 1,
|
||||
},
|
||||
content_changes: vec![TextDocumentContentChangeEvent {
|
||||
range: None,
|
||||
range_length: None,
|
||||
text: "abc".to_string(),
|
||||
}],
|
||||
};
|
||||
file_store.changed_text_document(params).await?;
|
||||
let file = file_store
|
||||
.file_map
|
||||
.lock()
|
||||
.get("file://filler/")
|
||||
.unwrap()
|
||||
.clone();
|
||||
assert_eq!(file.to_string(), "abc");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_build_prompt() -> anyhow::Result<()> {
|
||||
let text_document = generate_filler_text_document(
|
||||
None,
|
||||
Some(
|
||||
r#"Document Top
|
||||
Here is a more complicated document
|
||||
|
||||
Some text
|
||||
|
||||
The end with a trailing new line
|
||||
"#,
|
||||
),
|
||||
);
|
||||
|
||||
// Test basic completion
|
||||
let params = lsp_types::DidOpenTextDocumentParams {
|
||||
text_document: text_document.clone(),
|
||||
};
|
||||
let file_store = generate_base_file_store()?;
|
||||
file_store.opened_text_document(params).await?;
|
||||
|
||||
let params = json!({});
|
||||
let prompt = file_store
|
||||
.build_prompt(
|
||||
&TextDocumentPositionParams {
|
||||
text_document: TextDocumentIdentifier {
|
||||
uri: text_document.uri.clone(),
|
||||
},
|
||||
position: Position {
|
||||
line: 0,
|
||||
character: 10,
|
||||
},
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(prompt.context, "");
|
||||
assert_eq!("Document T", prompt.code);
|
||||
|
||||
// Test FIM
|
||||
let params = json!({
|
||||
"fim": {
|
||||
"start": "SS",
|
||||
"middle": "MM",
|
||||
"end": "EE"
|
||||
}
|
||||
});
|
||||
let prompt = file_store
|
||||
.build_prompt(
|
||||
&TextDocumentPositionParams {
|
||||
text_document: TextDocumentIdentifier {
|
||||
uri: text_document.uri.clone(),
|
||||
},
|
||||
position: Position {
|
||||
line: 0,
|
||||
character: 10,
|
||||
},
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(prompt.context, "");
|
||||
let text = r#"SSDocument TMMop
|
||||
Here is a more complicated document
|
||||
|
||||
Some text
|
||||
|
||||
The end with a trailing new line
|
||||
EE"#
|
||||
.to_string();
|
||||
assert_eq!(text, prompt.code);
|
||||
|
||||
// Test chat
|
||||
let params = json!({
|
||||
"messages": []
|
||||
});
|
||||
let prompt = file_store
|
||||
.build_prompt(
|
||||
&TextDocumentPositionParams {
|
||||
text_document: TextDocumentIdentifier {
|
||||
uri: text_document.uri.clone(),
|
||||
},
|
||||
position: Position {
|
||||
line: 0,
|
||||
character: 10,
|
||||
},
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(prompt.context, "");
|
||||
let text = r#"Document T<CURSOR>op
|
||||
Here is a more complicated document
|
||||
|
||||
Some text
|
||||
|
||||
The end with a trailing new line
|
||||
"#
|
||||
.to_string();
|
||||
assert_eq!(text, prompt.code);
|
||||
|
||||
// Test multi-file
|
||||
let text_document2 = generate_filler_text_document(
|
||||
Some("file://filler2"),
|
||||
Some(
|
||||
r#"Document Top2
|
||||
Here is a more complicated document
|
||||
|
||||
Some text
|
||||
|
||||
The end with a trailing new line
|
||||
"#,
|
||||
),
|
||||
);
|
||||
let params = lsp_types::DidOpenTextDocumentParams {
|
||||
text_document: text_document2.clone(),
|
||||
};
|
||||
file_store.opened_text_document(params).await?;
|
||||
|
||||
let params = json!({});
|
||||
let prompt = file_store
|
||||
.build_prompt(
|
||||
&TextDocumentPositionParams {
|
||||
text_document: TextDocumentIdentifier {
|
||||
uri: text_document.uri.clone(),
|
||||
},
|
||||
position: Position {
|
||||
line: 0,
|
||||
character: 10,
|
||||
},
|
||||
},
|
||||
params,
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(prompt.context, "");
|
||||
assert_eq!(format!("{}\nDocument T", text_document2.text), prompt.code);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ pub trait MemoryBackend {
|
||||
&self,
|
||||
params: DidChangeTextDocumentParams,
|
||||
) -> anyhow::Result<()>;
|
||||
async fn renamed_file(&self, params: RenameFilesParams) -> anyhow::Result<()>;
|
||||
async fn renamed_files(&self, params: RenameFilesParams) -> anyhow::Result<()>;
|
||||
async fn build_prompt(
|
||||
&self,
|
||||
position: &TextDocumentPositionParams,
|
||||
|
||||
@@ -214,7 +214,7 @@ impl MemoryBackend for PostgresML {
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn renamed_file(&self, params: lsp_types::RenameFilesParams) -> anyhow::Result<()> {
|
||||
async fn renamed_files(&self, params: lsp_types::RenameFilesParams) -> anyhow::Result<()> {
|
||||
let mut task_collection = self.collection.clone();
|
||||
let task_params = params.clone();
|
||||
for file in task_params.files {
|
||||
@@ -240,6 +240,6 @@ impl MemoryBackend for PostgresML {
|
||||
.await
|
||||
.expect("PGML - Error adding pipeline to collection");
|
||||
}
|
||||
self.file_store.renamed_file(params).await
|
||||
self.file_store.renamed_files(params).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ async fn do_task(
|
||||
WorkerRequest::DidChangeTextDocument(params) => {
|
||||
memory_backend.changed_text_document(params).await?;
|
||||
}
|
||||
WorkerRequest::DidRenameFiles(params) => memory_backend.renamed_file(params).await?,
|
||||
WorkerRequest::DidRenameFiles(params) => memory_backend.renamed_files(params).await?,
|
||||
}
|
||||
anyhow::Ok(())
|
||||
}
|
||||
|
||||
@@ -27,9 +27,11 @@ const fn temperature_default() -> f32 {
|
||||
0.1
|
||||
}
|
||||
|
||||
// NOTE: We cannot deny unknown fields as the provided parameters may contain other fields relevant to other processes
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct AnthropicRunParams {
|
||||
chat: Vec<ChatMessage>,
|
||||
system: String,
|
||||
messages: Vec<ChatMessage>,
|
||||
#[serde(default = "max_tokens_default")]
|
||||
pub max_tokens: usize,
|
||||
#[serde(default = "top_p_default")]
|
||||
@@ -71,7 +73,9 @@ impl Anthropic {
|
||||
} else if let Some(token) = &self.configuration.auth_token {
|
||||
token.to_string()
|
||||
} else {
|
||||
anyhow::bail!("Please set `auth_token_env_var_name` or `auth_token` in `transformer->anthropic` to use an Anthropic");
|
||||
anyhow::bail!(
|
||||
"Please set `auth_token_env_var_name` or `auth_token` to use an Anthropic"
|
||||
);
|
||||
};
|
||||
let res: AnthropicChatResponse = client
|
||||
.post(
|
||||
@@ -110,12 +114,12 @@ impl Anthropic {
|
||||
prompt: &Prompt,
|
||||
params: AnthropicRunParams,
|
||||
) -> anyhow::Result<String> {
|
||||
let mut messages = format_chat_messages(¶ms.chat, prompt);
|
||||
if messages[0].role != "system" {
|
||||
anyhow::bail!(
|
||||
"When using Anthropic, the first message in chat must have role = `system`"
|
||||
)
|
||||
}
|
||||
let mut messages = vec![ChatMessage {
|
||||
role: "system".to_string(),
|
||||
content: params.system.clone(),
|
||||
}];
|
||||
messages.extend_from_slice(¶ms.messages);
|
||||
let mut messages = format_chat_messages(&messages, prompt);
|
||||
let system_prompt = messages.remove(0).content;
|
||||
self.get_chat(system_prompt, messages, params).await
|
||||
}
|
||||
@@ -170,17 +174,14 @@ mod test {
|
||||
let anthropic = Anthropic::new(configuration);
|
||||
let prompt = Prompt::default_with_cursor();
|
||||
let run_params = json!({
|
||||
"chat": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a coding assistant. You job is to generate a code snippet to replace <CURSOR>.\n\nYour instructions are to:\n- Analyze the provided [Context Code] and [Current Code].\n- Generate a concise code snippet that can replace the <cursor> marker in the [Current Code].\n- Do not provide any explanations or modify any code above or below the <CURSOR> position.\n- The generated code should seamlessly fit into the existing code structure and context.\n- Ensure your answer is properly indented and formatted based on the <CURSOR> location.\n- Only respond with code. Do not respond with anything that is not valid code."
|
||||
},
|
||||
"system": "Test",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[Context code]:\n{CONTEXT}\n\n[Current code]:{CODE}"
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_tokens": 64
|
||||
"max_tokens": 2
|
||||
});
|
||||
let response = anthropic.do_completion(&prompt, run_params).await?;
|
||||
assert!(!response.insert_text.is_empty());
|
||||
@@ -197,17 +198,14 @@ mod test {
|
||||
let anthropic = Anthropic::new(configuration);
|
||||
let prompt = Prompt::default_with_cursor();
|
||||
let run_params = json!({
|
||||
"chat": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a coding assistant. You job is to generate a code snippet to replace <CURSOR>.\n\nYour instructions are to:\n- Analyze the provided [Context Code] and [Current Code].\n- Generate a concise code snippet that can replace the <cursor> marker in the [Current Code].\n- Do not provide any explanations or modify any code above or below the <CURSOR> position.\n- The generated code should seamlessly fit into the existing code structure and context.\n- Ensure your answer is properly indented and formatted based on the <CURSOR> location.\n- Only respond with code. Do not respond with anything that is not valid code."
|
||||
},
|
||||
"system": "Test",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[Context code]:\n{CONTEXT}\n\n[Current code]:{CODE}"
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_tokens": 64
|
||||
"max_tokens": 2
|
||||
});
|
||||
let response = anthropic.do_generate(&prompt, run_params).await?;
|
||||
assert!(!response.generated_text.is_empty());
|
||||
|
||||
@@ -24,6 +24,7 @@ const fn max_new_tokens_default() -> usize {
|
||||
32
|
||||
}
|
||||
|
||||
// NOTE: We cannot deny unknown fields as the provided parameters may contain other fields relevant to other processes
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct LLaMACPPRunParams {
|
||||
pub fim: Option<FIM>,
|
||||
@@ -120,28 +121,6 @@ mod test {
|
||||
use super::*;
|
||||
use serde_json::json;
|
||||
|
||||
// // "completion": [
|
||||
// // {
|
||||
// // "role": "system",
|
||||
// // "content": "You are a code completion chatbot. Use the following context to complete the next segement of code. Keep your response brief. Do not produce any text besides code. \n\n{context}",
|
||||
// // },
|
||||
// // {
|
||||
// // "role": "user",
|
||||
// // "content": "Complete the following code: \n\n{code}"
|
||||
// // }
|
||||
// // ],
|
||||
// // "generation": [
|
||||
// // {
|
||||
// // "role": "system",
|
||||
// // "content": "You are a code completion chatbot. Use the following context to complete the next segement of code. \n\n{context}",
|
||||
// // },
|
||||
// // {
|
||||
// // "role": "user",
|
||||
// // "content": "Complete the following code: \n\n{code}"
|
||||
// // }
|
||||
// // ],
|
||||
// "chat_template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}"
|
||||
|
||||
#[tokio::test]
|
||||
async fn llama_cpp_do_completion() -> anyhow::Result<()> {
|
||||
let configuration: config::LLaMACPP = serde_json::from_value(json!({
|
||||
|
||||
@@ -35,6 +35,7 @@ const fn temperature_default() -> f32 {
|
||||
0.1
|
||||
}
|
||||
|
||||
// NOTE: We cannot deny unknown fields as the provided parameters may contain other fields relevant to other processes
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct OpenAIRunParams {
|
||||
pub fim: Option<FIM>,
|
||||
@@ -262,11 +263,11 @@ mod test {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a coding assistant. Your job is to generate a code snippet to replace <CURSOR>.\n\nYour instructions are to:\n- Analyze the provided [Context Code] and [Current Code].\n- Generate a concise code snippet that can replace the <cursor> marker in the [Current Code].\n- Do not provide any explanations or modify any code above or below the <CURSOR> position.\n- The generated code should seamlessly fit into the existing code structure and context.\n- Ensure your answer is properly indented and formatted based on the <CURSOR> location.\n- Only respond with code. Do not respond with anything that is not valid code."
|
||||
"content": "Test"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[Context code]:\n{CONTEXT}\n\n[Current code]:{CODE}"
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_tokens": 64
|
||||
@@ -306,11 +307,11 @@ mod test {
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a coding assistant. Your job is to generate a code snippet to replace <CURSOR>.\n\nYour instructions are to:\n- Analyze the provided [Context Code] and [Current Code].\n- Generate a concise code snippet that can replace the <cursor> marker in the [Current Code].\n- Do not provide any explanations or modify any code above or below the <CURSOR> position.\n- The generated code should seamlessly fit into the existing code structure and context.\n- Ensure your answer is properly indented and formatted based on the <CURSOR> location.\n- Only respond with code. Do not respond with anything that is not valid code."
|
||||
"content": "Test"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "[Context code]:\n{CONTEXT}\n\n[Current code]:{CODE}"
|
||||
"content": "Test {CONTEXT} - {CODE}"
|
||||
}
|
||||
],
|
||||
"max_tokens": 64
|
||||
|
||||
@@ -244,7 +244,7 @@ async fn do_completion(
|
||||
.completion
|
||||
.as_ref()
|
||||
.context("Completions is None")?
|
||||
.kwargs
|
||||
.parameters
|
||||
.clone(),
|
||||
)
|
||||
.unwrap();
|
||||
@@ -304,6 +304,8 @@ async fn do_generate(
|
||||
) -> anyhow::Result<Response> {
|
||||
let params = serde_json::to_value(request.params.parameters.clone()).unwrap();
|
||||
|
||||
eprintln!("{}", serde_json::to_string_pretty(¶ms).unwrap());
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
memory_backend_tx.send(memory_worker::WorkerRequest::Prompt(PromptRequest::new(
|
||||
request.params.text_document_position.clone(),
|
||||
|
||||
Reference in New Issue
Block a user