AI stuff
This commit is contained in:
@@ -1 +0,0 @@
|
||||
AIHUBMIX_API_KEY=
|
||||
@@ -21,10 +21,10 @@ return {
|
||||
-- Example nvim-tree.lua integration if needed
|
||||
{ '<leader>a+', '<cmd>AiderTreeAddFile<cr>', desc = 'Add File from Tree to Aider', ft = 'NvimTree' },
|
||||
{ '<leader>a-', '<cmd>AiderTreeDropFile<cr>', desc = 'Drop File from Tree from Aider', ft = 'NvimTree' },
|
||||
{ '<leader>am4', change_model_function('gpt-4.1'), desc = 'Switch aider model to GPT-4.1' },
|
||||
{ '<leader>amo', change_model_function('openai/o4-mini'), desc = 'Switch aider model to o4-mini' },
|
||||
{ '<leader>amg', change_model_function('openai/gemini-2.5-pro'), desc = 'Switch aider model to Gemini 2.5 Pro' },
|
||||
{ '<leader>ams', change_model_function('openai/claude-sonnet-4'), desc = 'Switch aider model to Claude Sonnet 4' },
|
||||
{ '<leader>am4', change_model_function 'gpt-4.1', desc = 'Switch aider model to GPT-4.1' },
|
||||
{ '<leader>amo', change_model_function 'openai/o4-mini', desc = 'Switch aider model to o4-mini' },
|
||||
{ '<leader>amg', change_model_function 'openai/gemini-2.5-pro', desc = 'Switch aider model to Gemini 2.5 Pro' },
|
||||
{ '<leader>ams', change_model_function 'openai/claude-sonnet-4', desc = 'Switch aider model to Claude Sonnet 4' },
|
||||
},
|
||||
dependencies = {
|
||||
'folke/snacks.nvim',
|
||||
@@ -52,7 +52,7 @@ return {
|
||||
aider_cmd = 'aider',
|
||||
args = {
|
||||
'--config=$HOME/.config/aider/aider.yaml',
|
||||
'--env-file=$(pwd)/.aider.env',
|
||||
'--env-file=$(pwd)/aider.env',
|
||||
'--watch',
|
||||
'--architect',
|
||||
},
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
return {}
|
||||
-- require('helpers').edit_cf('pa', '/lua/plugins/avante.lua')
|
||||
--
|
||||
-- return {
|
||||
-- 'yetone/avante.nvim',
|
||||
-- event = 'VeryLazy',
|
||||
-- version = false, -- Never set this value to "*"! Never!
|
||||
-- opts = {
|
||||
-- -- add any opts here
|
||||
-- -- for example
|
||||
-- provider = 'aihubmix',
|
||||
-- -- cursor_applying_provider = 'aihubmix_llama_versatile',
|
||||
-- aihubmix = {
|
||||
-- -- model = 'claude-3-7-sonnet-20250219',
|
||||
-- model = 'DeepSeek-V3',
|
||||
-- },
|
||||
-- openai = {
|
||||
-- endpoint = 'https://api.openai.com/v1',
|
||||
-- model = 'gpt-4o', -- your desired model (or use gpt-4o, etc.)
|
||||
-- timeout = 30000, -- Timeout in milliseconds, increase this for reasoning models
|
||||
-- temperature = 0,
|
||||
-- max_completion_tokens = 8192, -- Increase this to include reasoning tokens (for reasoning models)
|
||||
-- --reasoning_effort = "medium", -- low|medium|high, only used for reasoning models
|
||||
-- },
|
||||
-- vendors = {
|
||||
-- aihubmix_llama_versatile = {
|
||||
-- __inherited_from = 'openai',
|
||||
-- api_key_name = 'AIHUBMIX_API_KEY',
|
||||
-- endpoint = 'https://aihubmix.com/v1',
|
||||
-- model = 'llama-3.3-70b-versatile',
|
||||
-- },
|
||||
-- },
|
||||
-- },
|
||||
-- -- if you want to build from source then do `make BUILD_FROM_SOURCE=true`
|
||||
-- build = 'make',
|
||||
-- -- build = "powershell -ExecutionPolicy Bypass -File Build.ps1 -BuildFromSource false" -- for windows
|
||||
-- dependencies = {
|
||||
-- 'nvim-treesitter/nvim-treesitter',
|
||||
-- 'stevearc/dressing.nvim',
|
||||
-- 'nvim-lua/plenary.nvim',
|
||||
-- 'MunifTanjim/nui.nvim',
|
||||
-- --- The below dependencies are optional,
|
||||
-- 'echasnovski/mini.pick', -- for file_selector provider mini.pick
|
||||
-- 'nvim-telescope/telescope.nvim', -- for file_selector provider telescope
|
||||
-- 'hrsh7th/nvim-cmp', -- autocompletion for avante commands and mentions
|
||||
-- 'ibhagwan/fzf-lua', -- for file_selector provider fzf
|
||||
-- 'nvim-tree/nvim-web-devicons', -- or echasnovski/mini.icons
|
||||
-- 'zbirenbaum/copilot.lua', -- for providers='copilot'
|
||||
-- {
|
||||
-- -- support for image pasting
|
||||
-- 'HakonHarnes/img-clip.nvim',
|
||||
-- event = 'VeryLazy',
|
||||
-- opts = {
|
||||
-- -- recommended settings
|
||||
-- default = {
|
||||
-- embed_image_as_base64 = false,
|
||||
-- prompt_for_file_name = false,
|
||||
-- drag_and_drop = {
|
||||
-- insert_mode = true,
|
||||
-- },
|
||||
-- -- required for Windows users
|
||||
-- use_absolute_path = true,
|
||||
-- },
|
||||
-- },
|
||||
-- },
|
||||
-- {
|
||||
-- -- Make sure to set this up properly if you have lazy=true
|
||||
-- 'MeanderingProgrammer/render-markdown.nvim',
|
||||
-- opts = {
|
||||
-- file_types = { 'markdown', 'Avante' },
|
||||
-- },
|
||||
-- ft = { 'markdown', 'Avante' },
|
||||
-- },
|
||||
-- },
|
||||
-- }
|
||||
@@ -51,7 +51,7 @@ return {
|
||||
},
|
||||
|
||||
sources = {
|
||||
default = { 'codecompanion', 'lsp', 'path', 'snippets', 'lazydev' },
|
||||
default = { 'lsp', 'path', 'snippets', 'lazydev' },
|
||||
providers = {
|
||||
lazydev = { module = 'lazydev.integrations.blink', score_offset = 100 },
|
||||
-- avante = { module = 'blink-cmp-avante', name = 'Avante', opts = {} },
|
||||
|
||||
@@ -2,13 +2,20 @@ vim.cmd [[cab cc CodeCompanion]]
|
||||
|
||||
return {
|
||||
'olimorris/codecompanion.nvim',
|
||||
opts = function(_, opts)
|
||||
opts.adapters = {
|
||||
gpt = function()
|
||||
return require('codecompanion.adapters').extend('copilot', {
|
||||
config = function()
|
||||
require('codecompanion').setup {
|
||||
adapters = {
|
||||
http = {
|
||||
openrouter = function()
|
||||
return require('codecompanion.adapters').extend('openai_compatible', {
|
||||
env = {
|
||||
url = vim.env.DEFAULT_OPENAI_API_BASE,
|
||||
api_key = vim.env.DEFAULT_OPENAI_API_KEY,
|
||||
chat_url = '/v1/chat/completions',
|
||||
},
|
||||
schema = {
|
||||
model = {
|
||||
default = 'gpt-4.1',
|
||||
default = vim.env.DEFAULT_AI_MODEL,
|
||||
},
|
||||
max_tokens = {
|
||||
default = 1000000,
|
||||
@@ -16,54 +23,17 @@ return {
|
||||
},
|
||||
})
|
||||
end,
|
||||
flash = function()
|
||||
return require('codecompanion.adapters').extend('copilot', {
|
||||
schema = {
|
||||
model = {
|
||||
default = 'gemini-2.0-flash-001',
|
||||
},
|
||||
max_tokens = {
|
||||
default = 1000000,
|
||||
},
|
||||
},
|
||||
})
|
||||
end,
|
||||
gemini = function()
|
||||
return require('codecompanion.adapters').extend('copilot', {
|
||||
schema = {
|
||||
model = {
|
||||
default = 'gemini-2.5-pro',
|
||||
},
|
||||
max_tokens = {
|
||||
default = 1000000,
|
||||
},
|
||||
},
|
||||
})
|
||||
end,
|
||||
sonnet = function()
|
||||
return require('codecompanion.adapters').extend('copilot', {
|
||||
schema = {
|
||||
model = {
|
||||
default = 'claude-3.7-sonnet',
|
||||
},
|
||||
max_tokens = {
|
||||
default = 1000000,
|
||||
},
|
||||
},
|
||||
})
|
||||
end,
|
||||
}
|
||||
|
||||
opts.display = {
|
||||
display = {
|
||||
chat = {
|
||||
show_settings = true,
|
||||
start_in_insert_mode = false,
|
||||
},
|
||||
}
|
||||
|
||||
opts.strategies = {
|
||||
},
|
||||
strategies = {
|
||||
chat = {
|
||||
adapter = 'gpt',
|
||||
adapter = 'openrouter',
|
||||
slash_commands = {
|
||||
-- codebase = require('vectorcode.integrations').codecompanion.chat.make_slash_command(),
|
||||
},
|
||||
@@ -94,11 +64,19 @@ return {
|
||||
},
|
||||
},
|
||||
inline = {
|
||||
adapter = 'flash',
|
||||
adapter = {
|
||||
name = 'openrouter',
|
||||
model = vim.env.FAST_MODEL,
|
||||
},
|
||||
}
|
||||
|
||||
opts.extensions = {
|
||||
},
|
||||
cmd = {
|
||||
adapter = {
|
||||
name = 'openrouter',
|
||||
model = vim.env.FAST_MODEL,
|
||||
},
|
||||
},
|
||||
},
|
||||
extensions = {
|
||||
mcphub = {
|
||||
callback = 'mcphub.extensions.codecompanion',
|
||||
opts = {
|
||||
@@ -107,9 +85,8 @@ return {
|
||||
make_slash_commands = true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
opts.system_prompt = function(opts)
|
||||
},
|
||||
system_prompt = function(opts)
|
||||
local language = opts.language or 'English'
|
||||
return string.format(
|
||||
[[You are an AI programming assistant named "CodeCompanion". You are currently plugged into the Neovim text editor on a user's machine.
|
||||
@@ -149,9 +126,8 @@ When given a task:
|
||||
5. If necessary, execute multiple tools in a single turn.]],
|
||||
language
|
||||
)
|
||||
end
|
||||
|
||||
opts.prompt_library = {
|
||||
end,
|
||||
prompt_library = {
|
||||
['Code Expert'] = {
|
||||
strategy = 'chat',
|
||||
description = 'Get some special advice from an LLM.',
|
||||
@@ -340,6 +316,7 @@ We'll repeat this cycle until there are no errors. Ensure no deviations from the
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
end,
|
||||
dependencies = {
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
return {
|
||||
'zbirenbaum/copilot.lua',
|
||||
event = 'InsertEnter',
|
||||
config = function()
|
||||
require('copilot').setup {
|
||||
suggestion = {
|
||||
enabled = true,
|
||||
auto_trigger = true,
|
||||
keymap = {
|
||||
accept = '<Tab>',
|
||||
},
|
||||
},
|
||||
filetypes = {
|
||||
yaml = true,
|
||||
markdown = true,
|
||||
gitcommit = true,
|
||||
gitrebase = true,
|
||||
},
|
||||
}
|
||||
end,
|
||||
}
|
||||
return {}
|
||||
-- return {
|
||||
-- 'zbirenbaum/copilot.lua',
|
||||
-- event = 'InsertEnter',
|
||||
-- config = function()
|
||||
-- require('copilot').setup {
|
||||
-- suggestion = {
|
||||
-- enabled = true,
|
||||
-- auto_trigger = true,
|
||||
-- keymap = {
|
||||
-- accept = '<Tab>',
|
||||
-- },
|
||||
-- },
|
||||
-- filetypes = {
|
||||
-- yaml = true,
|
||||
-- markdown = true,
|
||||
-- gitcommit = true,
|
||||
-- gitrebase = true,
|
||||
-- },
|
||||
-- }
|
||||
-- end,
|
||||
-- }
|
||||
|
||||
45
lua/plugins/minuet.lua
Normal file
45
lua/plugins/minuet.lua
Normal file
@@ -0,0 +1,45 @@
|
||||
-- AI code completion
|
||||
-- A
|
||||
return {
|
||||
'milanglacier/minuet-ai.nvim',
|
||||
dependencies = { 'nvim-lua/plenary.nvim' },
|
||||
config = function()
|
||||
require('minuet').setup {
|
||||
virtualtext = {
|
||||
auto_trigger_ft = { 'lua' },
|
||||
keymap = {
|
||||
accept = '<A-A>',
|
||||
accept_line = '<A-a>',
|
||||
-- accept n lines (prompts for number)
|
||||
-- e.g. "A-z 2 CR" will accept 2 lines
|
||||
accept_n_lines = '<A-z>',
|
||||
-- Cycle to prev completion item, or manually invoke completion
|
||||
prev = '<A-[>',
|
||||
-- Cycle to next completion item, or manually invoke completion
|
||||
next = '<A-]>',
|
||||
dismiss = '<A-e>',
|
||||
},
|
||||
},
|
||||
provider = 'openai_compatible',
|
||||
request_timeout = 3,
|
||||
throttle = 1500, -- Increase to reduce costs and avoid rate limits
|
||||
debounce = 600, -- Increase to reduce costs and avoid rate limits
|
||||
provider_options = {
|
||||
openai_compatible = {
|
||||
api_key = 'COMPLETION_OPENAI_API_KEY',
|
||||
end_point = vim.env.COMPLETION_OPENAI_API_BASE .. '/v1/chat/completions',
|
||||
model = vim.env.COMPLETION_MODEL,
|
||||
name = 'Openrouter',
|
||||
optional = {
|
||||
max_tokens = 2000,
|
||||
top_p = 0.9,
|
||||
provider = {
|
||||
-- Prioritize throughput for faster completion
|
||||
sort = 'throughput',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
end,
|
||||
}
|
||||
Reference in New Issue
Block a user