-
Notifications
You must be signed in to change notification settings - Fork 36
Expand file tree
/
Copy pathbasic_template.lua
More file actions
84 lines (78 loc) · 2.78 KB
/
basic_template.lua
File metadata and controls
84 lines (78 loc) · 2.78 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
return {
{
"Kurama622/llm.nvim",
dependencies = { "nvim-lua/plenary.nvim", "MunifTanjim/nui.nvim" },
cmd = { "LLMSessionToggle", "LLMSelectedTextHandler", "LLMAppHandler" },
config = function()
local tools = require("llm.tools") -- for app tools
require("llm").setup({
prompt = "You are a professional programmer.",
------------------- set your model parameters -------------------
-- You can choose to configure multiple models as needed.
-----------------------------------------------------------------
--- style1: set single model parameters
url = "https://models.inference.ai.azure.com/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
-- style2: set parameters of multiple models
-- (If you need to use multiple models and frequently switch between them.)
models = {
{
name = "ChatGPT",
url = "https://models.inference.ai.azure.com/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
},
{
name = "ChatGLM",
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions",
model = "glm-4-flash",
api_type = "zhipu",
max_tokens = 8000,
fetch_key = function()
return vim.env.GLM_KEY
end,
temperature = 0.3,
top_p = 0.7,
},
},
---------------- set your keymaps for interaction ---------------
keys = {
["Input:Submit"] = { mode = "n", key = "<cr>" },
["Input:Cancel"] = { mode = { "n", "i" }, key = "<C-c>" },
["Input:Resend"] = { mode = { "n", "i" }, key = "<C-r>" },
-- ...
},
---------------------- set your app tools ----------------------
app_handler = {
OptimCompare = {
handler = tools.action_handler,
opts = {
fetch_key = function()
return vim.env.GITHUB_TOKEN
end,
url = "https://models.inference.ai.azure.com/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
language = "Chinese",
},
["Your Tool Name"] = {
-- handler =
-- opts = {
-- fetch_key = function() return <your api key> end
-- }
-- url = "https://xxx",
-- model = "xxx"
-- api_type = ""
},
-- ...
},
},
})
end,
keys = {
{ "<leader>ac", mode = "n", "<cmd>LLMSessionToggle<cr>" },
{ "<leader>ao", mode = "x", "<cmd>LLMAppHandler OptimCompare<cr>", desc = " Optimize the Code" },
},
},
}