diff --git a/.env.example b/.env.example deleted file mode 100644 index fe7e08e..0000000 --- a/.env.example +++ /dev/null @@ -1 +0,0 @@ -AIHUBMIX_API_KEY= diff --git a/lua/plugins/aider.lua b/lua/plugins/aider.lua index ddf086d..9ba68c4 100644 --- a/lua/plugins/aider.lua +++ b/lua/plugins/aider.lua @@ -21,10 +21,10 @@ return { -- Example nvim-tree.lua integration if needed { 'a+', 'AiderTreeAddFile', desc = 'Add File from Tree to Aider', ft = 'NvimTree' }, { 'a-', 'AiderTreeDropFile', desc = 'Drop File from Tree from Aider', ft = 'NvimTree' }, - { 'am4', change_model_function('gpt-4.1'), desc = 'Switch aider model to GPT-4.1' }, - { 'amo', change_model_function('openai/o4-mini'), desc = 'Switch aider model to o4-mini' }, - { 'amg', change_model_function('openai/gemini-2.5-pro'), desc = 'Switch aider model to Gemini 2.5 Pro' }, - { 'ams', change_model_function('openai/claude-sonnet-4'), desc = 'Switch aider model to Claude Sonnet 4' }, + { 'am4', change_model_function 'gpt-4.1', desc = 'Switch aider model to GPT-4.1' }, + { 'amo', change_model_function 'openai/o4-mini', desc = 'Switch aider model to o4-mini' }, + { 'amg', change_model_function 'openai/gemini-2.5-pro', desc = 'Switch aider model to Gemini 2.5 Pro' }, + { 'ams', change_model_function 'openai/claude-sonnet-4', desc = 'Switch aider model to Claude Sonnet 4' }, }, dependencies = { 'folke/snacks.nvim', @@ -52,7 +52,7 @@ return { aider_cmd = 'aider', args = { '--config=$HOME/.config/aider/aider.yaml', - '--env-file=$(pwd)/.aider.env', + '--env-file=$(pwd)/aider.env', '--watch', '--architect', }, diff --git a/lua/plugins/avante.lua b/lua/plugins/avante.lua deleted file mode 100644 index 5d931c1..0000000 --- a/lua/plugins/avante.lua +++ /dev/null @@ -1,75 +0,0 @@ -return {} --- require('helpers').edit_cf('pa', '/lua/plugins/avante.lua') --- --- return { --- 'yetone/avante.nvim', --- event = 'VeryLazy', --- version = false, -- Never set this value to "*"! Never! --- opts = { --- -- add any opts here --- -- for example --- provider = 'aihubmix', --- -- cursor_applying_provider = 'aihubmix_llama_versatile', --- aihubmix = { --- -- model = 'claude-3-7-sonnet-20250219', --- model = 'DeepSeek-V3', --- }, --- openai = { --- endpoint = 'https://api.openai.com/v1', --- model = 'gpt-4o', -- your desired model (or use gpt-4o, etc.) --- timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models --- temperature = 0, --- max_completion_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models) --- --reasoning_effort = "medium", -- low|medium|high, only used for reasoning models --- }, --- vendors = { --- aihubmix_llama_versatile = { --- __inherited_from = 'openai', --- api_key_name = 'AIHUBMIX_API_KEY', --- endpoint = 'https://aihubmix.com/v1', --- model = 'llama-3.3-70b-versatile', --- }, --- }, --- }, --- -- if you want to build from source then do `make BUILD_FROM_SOURCE=true` --- build = 'make', --- -- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows --- dependencies = { --- 'nvim-treesitter/nvim-treesitter', --- 'stevearc/dressing.nvim', --- 'nvim-lua/plenary.nvim', --- 'MunifTanjim/nui.nvim', --- --- The below dependencies are optional, --- 'echasnovski/mini.pick', -- for file_selector provider mini.pick --- 'nvim-telescope/telescope.nvim', -- for file_selector provider telescope --- 'hrsh7th/nvim-cmp', -- autocompletion for avante commands and mentions --- 'ibhagwan/fzf-lua', -- for file_selector provider fzf --- 'nvim-tree/nvim-web-devicons', -- or echasnovski/mini.icons --- 'zbirenbaum/copilot.lua', -- for providers='copilot' --- { --- -- support for image pasting --- 'HakonHarnes/img-clip.nvim', --- event = 'VeryLazy', --- opts = { --- -- recommended settings --- default = { --- embed_image_as_base64 = false, --- prompt_for_file_name = false, --- drag_and_drop = { --- insert_mode = true, --- }, --- -- required for Windows users --- use_absolute_path = true, --- }, --- }, --- }, --- { --- -- Make sure to set this up properly if you have lazy=true --- 'MeanderingProgrammer/render-markdown.nvim', --- opts = { --- file_types = { 'markdown', 'Avante' }, --- }, --- ft = { 'markdown', 'Avante' }, --- }, --- }, --- } diff --git a/lua/plugins/blink.lua b/lua/plugins/blink.lua index 0ae36fa..26b3e38 100644 --- a/lua/plugins/blink.lua +++ b/lua/plugins/blink.lua @@ -51,7 +51,7 @@ return { }, sources = { - default = { 'codecompanion', 'lsp', 'path', 'snippets', 'lazydev' }, + default = { 'lsp', 'path', 'snippets', 'lazydev' }, providers = { lazydev = { module = 'lazydev.integrations.blink', score_offset = 100 }, -- avante = { module = 'blink-cmp-avante', name = 'Avante', opts = {} }, diff --git a/lua/plugins/codecompanion.lua b/lua/plugins/codecompanion.lua index 9facee3..5efe1c9 100644 --- a/lua/plugins/codecompanion.lua +++ b/lua/plugins/codecompanion.lua @@ -2,117 +2,94 @@ vim.cmd [[cab cc CodeCompanion]] return { 'olimorris/codecompanion.nvim', - opts = function(_, opts) - opts.adapters = { - gpt = function() - return require('codecompanion.adapters').extend('copilot', { - schema = { - model = { - default = 'gpt-4.1', - }, - max_tokens = { - default = 1000000, - }, - }, - }) - end, - flash = function() - return require('codecompanion.adapters').extend('copilot', { - schema = { - model = { - default = 'gemini-2.0-flash-001', - }, - max_tokens = { - default = 1000000, - }, - }, - }) - end, - gemini = function() - return require('codecompanion.adapters').extend('copilot', { - schema = { - model = { - default = 'gemini-2.5-pro', - }, - max_tokens = { - default = 1000000, - }, - }, - }) - end, - sonnet = function() - return require('codecompanion.adapters').extend('copilot', { - schema = { - model = { - default = 'claude-3.7-sonnet', - }, - max_tokens = { - default = 1000000, - }, - }, - }) - end, - } - - opts.display = { - chat = { - show_settings = true, - start_in_insert_mode = false, - }, - } - - opts.strategies = { - chat = { - adapter = 'gpt', - slash_commands = { - -- codebase = require('vectorcode.integrations').codecompanion.chat.make_slash_command(), - }, - tools = { - -- vectorcode = { - -- description = 'Run VectorCode to retrieve the project context.', - -- callback = require('vectorcode.integrations').codecompanion.chat.make_tool(), - -- }, - ['cmd_runner'] = { - opts = { - requires_approval = false, - }, - }, - }, - roles = { - ---@type string|fun(adapter: CodeCompanion.Adapter): string - llm = function(adapter) - return 'CodeCompanion (' .. adapter.formatted_name .. ': ' .. adapter.parameters.model .. ')' + config = function() + require('codecompanion').setup { + adapters = { + http = { + openrouter = function() + return require('codecompanion.adapters').extend('openai_compatible', { + env = { + url = vim.env.DEFAULT_OPENAI_API_BASE, + api_key = vim.env.DEFAULT_OPENAI_API_KEY, + chat_url = '/v1/chat/completions', + }, + schema = { + model = { + default = vim.env.DEFAULT_AI_MODEL, + }, + max_tokens = { + default = 1000000, + }, + }, + }) end, }, - keymaps = { - send = { - modes = { n = '', i = '' }, + }, + display = { + chat = { + show_settings = true, + start_in_insert_mode = false, + }, + }, + strategies = { + chat = { + adapter = 'openrouter', + slash_commands = { + -- codebase = require('vectorcode.integrations').codecompanion.chat.make_slash_command(), }, - close = { - modes = { n = '', i = '' }, + tools = { + -- vectorcode = { + -- description = 'Run VectorCode to retrieve the project context.', + -- callback = require('vectorcode.integrations').codecompanion.chat.make_tool(), + -- }, + ['cmd_runner'] = { + opts = { + requires_approval = false, + }, + }, + }, + roles = { + ---@type string|fun(adapter: CodeCompanion.Adapter): string + llm = function(adapter) + return 'CodeCompanion (' .. adapter.formatted_name .. ': ' .. adapter.parameters.model .. ')' + end, + }, + keymaps = { + send = { + modes = { n = '', i = '' }, + }, + close = { + modes = { n = '', i = '' }, + }, + }, + }, + inline = { + adapter = { + name = 'openrouter', + model = vim.env.FAST_MODEL, + }, + }, + cmd = { + adapter = { + name = 'openrouter', + model = vim.env.FAST_MODEL, }, }, }, - inline = { - adapter = 'flash', - }, - } - - opts.extensions = { - mcphub = { - callback = 'mcphub.extensions.codecompanion', - opts = { - show_result_in_chat = true, - make_vars = true, - make_slash_commands = true, + extensions = { + mcphub = { + callback = 'mcphub.extensions.codecompanion', + opts = { + show_result_in_chat = true, + make_vars = true, + make_slash_commands = true, + }, }, }, - } - - opts.system_prompt = function(opts) - local language = opts.language or 'English' - return string.format( - [[You are an AI programming assistant named "CodeCompanion". You are currently plugged into the Neovim text editor on a user's machine. + system_prompt = function(opts) + local language = opts.language or 'English' + return string.format( + [[You are an AI programming assistant named "CodeCompanion". You are currently plugged into the Neovim text editor on a user's machine. Your core tasks include: - Answering general programming questions. @@ -147,54 +124,53 @@ When given a task: 3. End your response with a short suggestion for the next user turn that directly supports continuing the conversation. 4. Provide exactly one complete reply per conversation turn. 5. If necessary, execute multiple tools in a single turn.]], - language - ) - end - - opts.prompt_library = { - ['Code Expert'] = { - strategy = 'chat', - description = 'Get some special advice from an LLM.', - opts = { - mapping = 'ce', - modes = { 'v' }, - short_name = 'expert', - auto_submit = true, - stop_context_insertion = true, - user_prompt = true, - }, - prompts = { - { - role = 'system', - content = function(context) - return 'I want you to act as a senior' - .. context.filetype - .. 'developer I will ask you specific questions and I want you to return concise explanations and codeblock examples.' - end, + language + ) + end, + prompt_library = { + ['Code Expert'] = { + strategy = 'chat', + description = 'Get some special advice from an LLM.', + opts = { + mapping = 'ce', + modes = { 'v' }, + short_name = 'expert', + auto_submit = true, + stop_context_insertion = true, + user_prompt = true, }, - { - role = 'user', - content = function(context) - local text = require('codecompanion.helpers.actions').get_code(context.start_line, context.end_line) + prompts = { + { + role = 'system', + content = function(context) + return 'I want you to act as a senior' + .. context.filetype + .. 'developer I will ask you specific questions and I want you to return concise explanations and codeblock examples.' + end, + }, + { + role = 'user', + content = function(context) + local text = require('codecompanion.helpers.actions').get_code(context.start_line, context.end_line) - return 'I have the following code:\n\n```' .. context.filetype .. '\n' .. text .. '\n```\n\n' - end, - opts = { - contains_code = true, + return 'I have the following code:\n\n```' .. context.filetype .. '\n' .. text .. '\n```\n\n' + end, + opts = { + contains_code = true, + }, }, }, }, - }, - ['Games Master'] = { - strategy = 'chat', - description = 'A personal Games Master Assistant.', - opts = { - user_prompt = false, - }, - prompts = { - { - role = 'system', - content = [[ + ['Games Master'] = { + strategy = 'chat', + description = 'A personal Games Master Assistant.', + opts = { + user_prompt = false, + }, + prompts = { + { + role = 'system', + content = [[ You are a personal Games Master Assistant. You are currently plugged in to the Neovim text editor on a user's machine. Your core tasks include: @@ -228,58 +204,58 @@ When given a task: 2. Provide exactly one complete reply per conversation turn. 3. If necessary, execute multiple tools in a single turn. ]], - }, - { - role = 'user', - content = '', - }, - }, - }, - ['PHPStan Fixer'] = { - strategy = 'workflow', - description = 'Use a workflow to fix PHPStan errors until there are none left.', - opts = { - short_name = 'phpstan', - }, - prompts = { - { + }, { - name = 'Run PHPStan', role = 'user', - opts = { auto_submit = false }, - content = function() - -- Enable turbo mode!!! - vim.g.codecompanion_auto_tool_mode = true + content = '', + }, + }, + }, + ['PHPStan Fixer'] = { + strategy = 'workflow', + description = 'Use a workflow to fix PHPStan errors until there are none left.', + opts = { + short_name = 'phpstan', + }, + prompts = { + { + { + name = 'Run PHPStan', + role = 'user', + opts = { auto_submit = false }, + content = function() + -- Enable turbo mode!!! + vim.g.codecompanion_auto_tool_mode = true - return [[PHPStan is a static analysis tool for PHP. It is currently reporting errors in the code. Your task is to fix these errors and run PHPStan again until there are no errors left. + return [[PHPStan is a static analysis tool for PHP. It is currently reporting errors in the code. Your task is to fix these errors and run PHPStan again until there are no errors left. First of all use the @cmd_runner tool to run the `composer type-check` command. This will run PHPStan and output type errors in the code.]] - end, + end, + }, }, - }, - { { - name = 'Fetch files', - role = 'user', - opts = { auto_submit = false }, - content = function() - -- Enable turbo mode!!! - vim.g.codecompanion_auto_tool_mode = true + { + name = 'Fetch files', + role = 'user', + opts = { auto_submit = false }, + content = function() + -- Enable turbo mode!!! + vim.g.codecompanion_auto_tool_mode = true - return 'PHPStan has reported errors. Look at the output and see where the files are reported. Use the @mcp tool to read all the offending files so you can implement the fixes.' - end, + return 'PHPStan has reported errors. Look at the output and see where the files are reported. Use the @mcp tool to read all the offending files so you can implement the fixes.' + end, + }, }, - }, - { { - name = 'Fix errors and run', - role = 'user', - opts = { auto_submit = false }, - content = function() - -- Enable turbo mode!!! - vim.g.codecompanion_auto_tool_mode = true + { + name = 'Fix errors and run', + role = 'user', + opts = { auto_submit = false }, + content = function() + -- Enable turbo mode!!! + vim.g.codecompanion_auto_tool_mode = true - return [[### Instructions + return [[### Instructions Now you have the errors and the appropriate context you can fix the errors. ### Steps to Follow @@ -311,31 +287,32 @@ We'll repeat this cycle until there are no errors. Ensure no deviations from the - pick will create an array from T with only the keys in TKeys (e.g. pick will create array{a: int, b: int}) - except works like pick only it excludes the keys in TKeys (e.g. except will create array{c: int}) - union creates a union array of T and U (e.g. union will create array{a: int, b: int})]] - end, + end, + }, }, - }, - { { - name = 'Repeat On Failure', - role = 'user', - opts = { auto_submit = false }, - -- Scope this prompt to the cmd_runner tool - condition = function() - return _G.codecompanion_current_tool == 'cmd_runner' - end, - -- Repeat until the tests pass, as indicated by the testing flag - -- which the cmd_runner tool sets on the chat buffer - repeat_until = function(chat) - -- Check if the last message in the chat buffer contains "[ERROR] found" - local messages = chat.messages - local last_message = messages[#messages] - if last_message and last_message.role == 'assistant' then - local content = last_message.content - return not content:find '[ERROR] found' - end - return true - end, - content = 'PHPStan is still reporting errors. Edit the code to fix the errors and run PHPStan again.', + { + name = 'Repeat On Failure', + role = 'user', + opts = { auto_submit = false }, + -- Scope this prompt to the cmd_runner tool + condition = function() + return _G.codecompanion_current_tool == 'cmd_runner' + end, + -- Repeat until the tests pass, as indicated by the testing flag + -- which the cmd_runner tool sets on the chat buffer + repeat_until = function(chat) + -- Check if the last message in the chat buffer contains "[ERROR] found" + local messages = chat.messages + local last_message = messages[#messages] + if last_message and last_message.role == 'assistant' then + local content = last_message.content + return not content:find '[ERROR] found' + end + return true + end, + content = 'PHPStan is still reporting errors. Edit the code to fix the errors and run PHPStan again.', + }, }, }, }, diff --git a/lua/plugins/copilot.lua b/lua/plugins/copilot.lua index cd65cef..83b2a93 100644 --- a/lua/plugins/copilot.lua +++ b/lua/plugins/copilot.lua @@ -1,21 +1,22 @@ -return { - 'zbirenbaum/copilot.lua', - event = 'InsertEnter', - config = function() - require('copilot').setup { - suggestion = { - enabled = true, - auto_trigger = true, - keymap = { - accept = '', - }, - }, - filetypes = { - yaml = true, - markdown = true, - gitcommit = true, - gitrebase = true, - }, - } - end, -} +return {} +-- return { +-- 'zbirenbaum/copilot.lua', +-- event = 'InsertEnter', +-- config = function() +-- require('copilot').setup { +-- suggestion = { +-- enabled = true, +-- auto_trigger = true, +-- keymap = { +-- accept = '', +-- }, +-- }, +-- filetypes = { +-- yaml = true, +-- markdown = true, +-- gitcommit = true, +-- gitrebase = true, +-- }, +-- } +-- end, +-- } diff --git a/lua/plugins/minuet.lua b/lua/plugins/minuet.lua new file mode 100644 index 0000000..3f10a8c --- /dev/null +++ b/lua/plugins/minuet.lua @@ -0,0 +1,45 @@ +-- AI code completion +-- A +return { + 'milanglacier/minuet-ai.nvim', + dependencies = { 'nvim-lua/plenary.nvim' }, + config = function() + require('minuet').setup { + virtualtext = { + auto_trigger_ft = { 'lua' }, + keymap = { + accept = '', + accept_line = '', + -- accept n lines (prompts for number) + -- e.g. "A-z 2 CR" will accept 2 lines + accept_n_lines = '', + -- Cycle to prev completion item, or manually invoke completion + prev = '', + -- Cycle to next completion item, or manually invoke completion + next = '', + dismiss = '', + }, + }, + provider = 'openai_compatible', + request_timeout = 3, + throttle = 1500, -- Increase to reduce costs and avoid rate limits + debounce = 600, -- Increase to reduce costs and avoid rate limits + provider_options = { + openai_compatible = { + api_key = 'COMPLETION_OPENAI_API_KEY', + end_point = vim.env.COMPLETION_OPENAI_API_BASE .. '/v1/chat/completions', + model = vim.env.COMPLETION_MODEL, + name = 'Openrouter', + optional = { + max_tokens = 2000, + top_p = 0.9, + provider = { + -- Prioritize throughput for faster completion + sort = 'throughput', + }, + }, + }, + }, + } + end, +}