This commit is contained in:
2025-10-19 10:01:51 +01:00
parent 6a0c90bf26
commit 406e5cfe8e
7 changed files with 260 additions and 313 deletions

View File

@@ -1 +0,0 @@
AIHUBMIX_API_KEY=

View File

@@ -21,10 +21,10 @@ return {
-- Example nvim-tree.lua integration if needed -- Example nvim-tree.lua integration if needed
{ '<leader>a+', '<cmd>AiderTreeAddFile<cr>', desc = 'Add File from Tree to Aider', ft = 'NvimTree' }, { '<leader>a+', '<cmd>AiderTreeAddFile<cr>', desc = 'Add File from Tree to Aider', ft = 'NvimTree' },
{ '<leader>a-', '<cmd>AiderTreeDropFile<cr>', desc = 'Drop File from Tree from Aider', ft = 'NvimTree' }, { '<leader>a-', '<cmd>AiderTreeDropFile<cr>', desc = 'Drop File from Tree from Aider', ft = 'NvimTree' },
{ '<leader>am4', change_model_function('gpt-4.1'), desc = 'Switch aider model to GPT-4.1' }, { '<leader>am4', change_model_function 'gpt-4.1', desc = 'Switch aider model to GPT-4.1' },
{ '<leader>amo', change_model_function('openai/o4-mini'), desc = 'Switch aider model to o4-mini' }, { '<leader>amo', change_model_function 'openai/o4-mini', desc = 'Switch aider model to o4-mini' },
{ '<leader>amg', change_model_function('openai/gemini-2.5-pro'), desc = 'Switch aider model to Gemini 2.5 Pro' }, { '<leader>amg', change_model_function 'openai/gemini-2.5-pro', desc = 'Switch aider model to Gemini 2.5 Pro' },
{ '<leader>ams', change_model_function('openai/claude-sonnet-4'), desc = 'Switch aider model to Claude Sonnet 4' }, { '<leader>ams', change_model_function 'openai/claude-sonnet-4', desc = 'Switch aider model to Claude Sonnet 4' },
}, },
dependencies = { dependencies = {
'folke/snacks.nvim', 'folke/snacks.nvim',
@@ -52,7 +52,7 @@ return {
aider_cmd = 'aider', aider_cmd = 'aider',
args = { args = {
'--config=$HOME/.config/aider/aider.yaml', '--config=$HOME/.config/aider/aider.yaml',
'--env-file=$(pwd)/.aider.env', '--env-file=$(pwd)/aider.env',
'--watch', '--watch',
'--architect', '--architect',
}, },

View File

@@ -1,75 +0,0 @@
return {}
-- require('helpers').edit_cf('pa', '/lua/plugins/avante.lua')
--
-- return {
-- 'yetone/avante.nvim',
-- event = 'VeryLazy',
-- version = false, -- Never set this value to "*"! Never!
-- opts = {
-- -- add any opts here
-- -- for example
-- provider = 'aihubmix',
-- -- cursor_applying_provider = 'aihubmix_llama_versatile',
-- aihubmix = {
-- -- model = 'claude-3-7-sonnet-20250219',
-- model = 'DeepSeek-V3',
-- },
-- openai = {
-- endpoint = 'https://api.openai.com/v1',
-- model = 'gpt-4o', -- your desired model (or use gpt-4o, etc.)
-- timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
-- temperature = 0,
-- max_completion_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models)
-- --reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
-- },
-- vendors = {
-- aihubmix_llama_versatile = {
-- __inherited_from = 'openai',
-- api_key_name = 'AIHUBMIX_API_KEY',
-- endpoint = 'https://aihubmix.com/v1',
-- model = 'llama-3.3-70b-versatile',
-- },
-- },
-- },
-- -- if you want to build from source then do `make BUILD_FROM_SOURCE=true`
-- build = 'make',
-- -- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows
-- dependencies = {
-- 'nvim-treesitter/nvim-treesitter',
-- 'stevearc/dressing.nvim',
-- 'nvim-lua/plenary.nvim',
-- 'MunifTanjim/nui.nvim',
-- --- The below dependencies are optional,
-- 'echasnovski/mini.pick', -- for file_selector provider mini.pick
-- 'nvim-telescope/telescope.nvim', -- for file_selector provider telescope
-- 'hrsh7th/nvim-cmp', -- autocompletion for avante commands and mentions
-- 'ibhagwan/fzf-lua', -- for file_selector provider fzf
-- 'nvim-tree/nvim-web-devicons', -- or echasnovski/mini.icons
-- 'zbirenbaum/copilot.lua', -- for providers='copilot'
-- {
-- -- support for image pasting
-- 'HakonHarnes/img-clip.nvim',
-- event = 'VeryLazy',
-- opts = {
-- -- recommended settings
-- default = {
-- embed_image_as_base64 = false,
-- prompt_for_file_name = false,
-- drag_and_drop = {
-- insert_mode = true,
-- },
-- -- required for Windows users
-- use_absolute_path = true,
-- },
-- },
-- },
-- {
-- -- Make sure to set this up properly if you have lazy=true
-- 'MeanderingProgrammer/render-markdown.nvim',
-- opts = {
-- file_types = { 'markdown', 'Avante' },
-- },
-- ft = { 'markdown', 'Avante' },
-- },
-- },
-- }

View File

@@ -51,7 +51,7 @@ return {
}, },
sources = { sources = {
default = { 'codecompanion', 'lsp', 'path', 'snippets', 'lazydev' }, default = { 'lsp', 'path', 'snippets', 'lazydev' },
providers = { providers = {
lazydev = { module = 'lazydev.integrations.blink', score_offset = 100 }, lazydev = { module = 'lazydev.integrations.blink', score_offset = 100 },
-- avante = { module = 'blink-cmp-avante', name = 'Avante', opts = {} }, -- avante = { module = 'blink-cmp-avante', name = 'Avante', opts = {} },

View File

@@ -2,117 +2,94 @@ vim.cmd [[cab cc CodeCompanion]]
return { return {
'olimorris/codecompanion.nvim', 'olimorris/codecompanion.nvim',
opts = function(_, opts) config = function()
opts.adapters = { require('codecompanion').setup {
gpt = function() adapters = {
return require('codecompanion.adapters').extend('copilot', { http = {
schema = { openrouter = function()
model = { return require('codecompanion.adapters').extend('openai_compatible', {
default = 'gpt-4.1', env = {
}, url = vim.env.DEFAULT_OPENAI_API_BASE,
max_tokens = { api_key = vim.env.DEFAULT_OPENAI_API_KEY,
default = 1000000, chat_url = '/v1/chat/completions',
}, },
}, schema = {
}) model = {
end, default = vim.env.DEFAULT_AI_MODEL,
flash = function() },
return require('codecompanion.adapters').extend('copilot', { max_tokens = {
schema = { default = 1000000,
model = { },
default = 'gemini-2.0-flash-001', },
}, })
max_tokens = {
default = 1000000,
},
},
})
end,
gemini = function()
return require('codecompanion.adapters').extend('copilot', {
schema = {
model = {
default = 'gemini-2.5-pro',
},
max_tokens = {
default = 1000000,
},
},
})
end,
sonnet = function()
return require('codecompanion.adapters').extend('copilot', {
schema = {
model = {
default = 'claude-3.7-sonnet',
},
max_tokens = {
default = 1000000,
},
},
})
end,
}
opts.display = {
chat = {
show_settings = true,
start_in_insert_mode = false,
},
}
opts.strategies = {
chat = {
adapter = 'gpt',
slash_commands = {
-- codebase = require('vectorcode.integrations').codecompanion.chat.make_slash_command(),
},
tools = {
-- vectorcode = {
-- description = 'Run VectorCode to retrieve the project context.',
-- callback = require('vectorcode.integrations').codecompanion.chat.make_tool(),
-- },
['cmd_runner'] = {
opts = {
requires_approval = false,
},
},
},
roles = {
---@type string|fun(adapter: CodeCompanion.Adapter): string
llm = function(adapter)
return 'CodeCompanion (' .. adapter.formatted_name .. ': ' .. adapter.parameters.model .. ')'
end, end,
}, },
keymaps = { },
send = { display = {
modes = { n = '<C-s>', i = '<C-s>' }, chat = {
show_settings = true,
start_in_insert_mode = false,
},
},
strategies = {
chat = {
adapter = 'openrouter',
slash_commands = {
-- codebase = require('vectorcode.integrations').codecompanion.chat.make_slash_command(),
}, },
close = { tools = {
modes = { n = '<C-c>', i = '<C-c>' }, -- vectorcode = {
-- description = 'Run VectorCode to retrieve the project context.',
-- callback = require('vectorcode.integrations').codecompanion.chat.make_tool(),
-- },
['cmd_runner'] = {
opts = {
requires_approval = false,
},
},
},
roles = {
---@type string|fun(adapter: CodeCompanion.Adapter): string
llm = function(adapter)
return 'CodeCompanion (' .. adapter.formatted_name .. ': ' .. adapter.parameters.model .. ')'
end,
},
keymaps = {
send = {
modes = { n = '<C-s>', i = '<C-s>' },
},
close = {
modes = { n = '<C-c>', i = '<C-c>' },
},
},
},
inline = {
adapter = {
name = 'openrouter',
model = vim.env.FAST_MODEL,
},
},
cmd = {
adapter = {
name = 'openrouter',
model = vim.env.FAST_MODEL,
}, },
}, },
}, },
inline = { extensions = {
adapter = 'flash', mcphub = {
}, callback = 'mcphub.extensions.codecompanion',
} opts = {
show_result_in_chat = true,
opts.extensions = { make_vars = true,
mcphub = { make_slash_commands = true,
callback = 'mcphub.extensions.codecompanion', },
opts = {
show_result_in_chat = true,
make_vars = true,
make_slash_commands = true,
}, },
}, },
} system_prompt = function(opts)
local language = opts.language or 'English'
opts.system_prompt = function(opts) return string.format(
local language = opts.language or 'English' [[You are an AI programming assistant named "CodeCompanion". You are currently plugged into the Neovim text editor on a user's machine.
return string.format(
[[You are an AI programming assistant named "CodeCompanion". You are currently plugged into the Neovim text editor on a user's machine.
Your core tasks include: Your core tasks include:
- Answering general programming questions. - Answering general programming questions.
@@ -147,54 +124,53 @@ When given a task:
3. End your response with a short suggestion for the next user turn that directly supports continuing the conversation. 3. End your response with a short suggestion for the next user turn that directly supports continuing the conversation.
4. Provide exactly one complete reply per conversation turn. 4. Provide exactly one complete reply per conversation turn.
5. If necessary, execute multiple tools in a single turn.]], 5. If necessary, execute multiple tools in a single turn.]],
language language
) )
end end,
prompt_library = {
opts.prompt_library = { ['Code Expert'] = {
['Code Expert'] = { strategy = 'chat',
strategy = 'chat', description = 'Get some special advice from an LLM.',
description = 'Get some special advice from an LLM.', opts = {
opts = { mapping = '<Leader>ce',
mapping = '<Leader>ce', modes = { 'v' },
modes = { 'v' }, short_name = 'expert',
short_name = 'expert', auto_submit = true,
auto_submit = true, stop_context_insertion = true,
stop_context_insertion = true, user_prompt = true,
user_prompt = true,
},
prompts = {
{
role = 'system',
content = function(context)
return 'I want you to act as a senior'
.. context.filetype
.. 'developer I will ask you specific questions and I want you to return concise explanations and codeblock examples.'
end,
}, },
{ prompts = {
role = 'user', {
content = function(context) role = 'system',
local text = require('codecompanion.helpers.actions').get_code(context.start_line, context.end_line) content = function(context)
return 'I want you to act as a senior'
.. context.filetype
.. 'developer I will ask you specific questions and I want you to return concise explanations and codeblock examples.'
end,
},
{
role = 'user',
content = function(context)
local text = require('codecompanion.helpers.actions').get_code(context.start_line, context.end_line)
return 'I have the following code:\n\n```' .. context.filetype .. '\n' .. text .. '\n```\n\n' return 'I have the following code:\n\n```' .. context.filetype .. '\n' .. text .. '\n```\n\n'
end, end,
opts = { opts = {
contains_code = true, contains_code = true,
},
}, },
}, },
}, },
}, ['Games Master'] = {
['Games Master'] = { strategy = 'chat',
strategy = 'chat', description = 'A personal Games Master Assistant.',
description = 'A personal Games Master Assistant.', opts = {
opts = { user_prompt = false,
user_prompt = false, },
}, prompts = {
prompts = { {
{ role = 'system',
role = 'system', content = [[
content = [[
You are a personal Games Master Assistant. You are currently plugged in to the Neovim text editor on a user's machine. You are a personal Games Master Assistant. You are currently plugged in to the Neovim text editor on a user's machine.
Your core tasks include: Your core tasks include:
@@ -228,58 +204,58 @@ When given a task:
2. Provide exactly one complete reply per conversation turn. 2. Provide exactly one complete reply per conversation turn.
3. If necessary, execute multiple tools in a single turn. 3. If necessary, execute multiple tools in a single turn.
]], ]],
}, },
{
role = 'user',
content = '',
},
},
},
['PHPStan Fixer'] = {
strategy = 'workflow',
description = 'Use a workflow to fix PHPStan errors until there are none left.',
opts = {
short_name = 'phpstan',
},
prompts = {
{
{ {
name = 'Run PHPStan',
role = 'user', role = 'user',
opts = { auto_submit = false }, content = '',
content = function() },
-- Enable turbo mode!!! },
vim.g.codecompanion_auto_tool_mode = true },
['PHPStan Fixer'] = {
strategy = 'workflow',
description = 'Use a workflow to fix PHPStan errors until there are none left.',
opts = {
short_name = 'phpstan',
},
prompts = {
{
{
name = 'Run PHPStan',
role = 'user',
opts = { auto_submit = false },
content = function()
-- Enable turbo mode!!!
vim.g.codecompanion_auto_tool_mode = true
return [[PHPStan is a static analysis tool for PHP. It is currently reporting errors in the code. Your task is to fix these errors and run PHPStan again until there are no errors left. return [[PHPStan is a static analysis tool for PHP. It is currently reporting errors in the code. Your task is to fix these errors and run PHPStan again until there are no errors left.
First of all use the @cmd_runner tool to run the `composer type-check` command. This will run PHPStan and output type errors in the code.]] First of all use the @cmd_runner tool to run the `composer type-check` command. This will run PHPStan and output type errors in the code.]]
end, end,
},
}, },
},
{
{ {
name = 'Fetch files', {
role = 'user', name = 'Fetch files',
opts = { auto_submit = false }, role = 'user',
content = function() opts = { auto_submit = false },
-- Enable turbo mode!!! content = function()
vim.g.codecompanion_auto_tool_mode = true -- Enable turbo mode!!!
vim.g.codecompanion_auto_tool_mode = true
return 'PHPStan has reported errors. Look at the output and see where the files are reported. Use the @mcp tool to read all the offending files so you can implement the fixes.' return 'PHPStan has reported errors. Look at the output and see where the files are reported. Use the @mcp tool to read all the offending files so you can implement the fixes.'
end, end,
},
}, },
},
{
{ {
name = 'Fix errors and run', {
role = 'user', name = 'Fix errors and run',
opts = { auto_submit = false }, role = 'user',
content = function() opts = { auto_submit = false },
-- Enable turbo mode!!! content = function()
vim.g.codecompanion_auto_tool_mode = true -- Enable turbo mode!!!
vim.g.codecompanion_auto_tool_mode = true
return [[### Instructions return [[### Instructions
Now you have the errors and the appropriate context you can fix the errors. Now you have the errors and the appropriate context you can fix the errors.
### Steps to Follow ### Steps to Follow
@@ -311,31 +287,32 @@ We'll repeat this cycle until there are no errors. Ensure no deviations from the
- pick<T, TKeys> will create an array from T with only the keys in TKeys (e.g. pick<array{a: int, b: int, c: int}, 'a'|'b'> will create array{a: int, b: int}) - pick<T, TKeys> will create an array from T with only the keys in TKeys (e.g. pick<array{a: int, b: int, c: int}, 'a'|'b'> will create array{a: int, b: int})
- except<T, TKeys> works like pick only it excludes the keys in TKeys (e.g. except<array{a: int, b: int, c: int}, 'a'|'b'> will create array{c: int}) - except<T, TKeys> works like pick only it excludes the keys in TKeys (e.g. except<array{a: int, b: int, c: int}, 'a'|'b'> will create array{c: int})
- union<T, U> creates a union array of T and U (e.g. union<array{a: int}, array{b: int}> will create array{a: int, b: int})]] - union<T, U> creates a union array of T and U (e.g. union<array{a: int}, array{b: int}> will create array{a: int, b: int})]]
end, end,
},
}, },
},
{
{ {
name = 'Repeat On Failure', {
role = 'user', name = 'Repeat On Failure',
opts = { auto_submit = false }, role = 'user',
-- Scope this prompt to the cmd_runner tool opts = { auto_submit = false },
condition = function() -- Scope this prompt to the cmd_runner tool
return _G.codecompanion_current_tool == 'cmd_runner' condition = function()
end, return _G.codecompanion_current_tool == 'cmd_runner'
-- Repeat until the tests pass, as indicated by the testing flag end,
-- which the cmd_runner tool sets on the chat buffer -- Repeat until the tests pass, as indicated by the testing flag
repeat_until = function(chat) -- which the cmd_runner tool sets on the chat buffer
-- Check if the last message in the chat buffer contains "[ERROR] found" repeat_until = function(chat)
local messages = chat.messages -- Check if the last message in the chat buffer contains "[ERROR] found"
local last_message = messages[#messages] local messages = chat.messages
if last_message and last_message.role == 'assistant' then local last_message = messages[#messages]
local content = last_message.content if last_message and last_message.role == 'assistant' then
return not content:find '[ERROR] found' local content = last_message.content
end return not content:find '[ERROR] found'
return true end
end, return true
content = 'PHPStan is still reporting errors. Edit the code to fix the errors and run PHPStan again.', end,
content = 'PHPStan is still reporting errors. Edit the code to fix the errors and run PHPStan again.',
},
}, },
}, },
}, },

View File

@@ -1,21 +1,22 @@
return { return {}
'zbirenbaum/copilot.lua', -- return {
event = 'InsertEnter', -- 'zbirenbaum/copilot.lua',
config = function() -- event = 'InsertEnter',
require('copilot').setup { -- config = function()
suggestion = { -- require('copilot').setup {
enabled = true, -- suggestion = {
auto_trigger = true, -- enabled = true,
keymap = { -- auto_trigger = true,
accept = '<Tab>', -- keymap = {
}, -- accept = '<Tab>',
}, -- },
filetypes = { -- },
yaml = true, -- filetypes = {
markdown = true, -- yaml = true,
gitcommit = true, -- markdown = true,
gitrebase = true, -- gitcommit = true,
}, -- gitrebase = true,
} -- },
end, -- }
} -- end,
-- }

45
lua/plugins/minuet.lua Normal file
View File

@@ -0,0 +1,45 @@
-- AI code completion
-- A
return {
'milanglacier/minuet-ai.nvim',
dependencies = { 'nvim-lua/plenary.nvim' },
config = function()
require('minuet').setup {
virtualtext = {
auto_trigger_ft = { 'lua' },
keymap = {
accept = '<A-A>',
accept_line = '<A-a>',
-- accept n lines (prompts for number)
-- e.g. "A-z 2 CR" will accept 2 lines
accept_n_lines = '<A-z>',
-- Cycle to prev completion item, or manually invoke completion
prev = '<A-[>',
-- Cycle to next completion item, or manually invoke completion
next = '<A-]>',
dismiss = '<A-e>',
},
},
provider = 'openai_compatible',
request_timeout = 3,
throttle = 1500, -- Increase to reduce costs and avoid rate limits
debounce = 600, -- Increase to reduce costs and avoid rate limits
provider_options = {
openai_compatible = {
api_key = 'COMPLETION_OPENAI_API_KEY',
end_point = vim.env.COMPLETION_OPENAI_API_BASE .. '/v1/chat/completions',
model = vim.env.COMPLETION_MODEL,
name = 'Openrouter',
optional = {
max_tokens = 2000,
top_p = 0.9,
provider = {
-- Prioritize throughput for faster completion
sort = 'throughput',
},
},
},
},
}
end,
}