Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,34 @@ Now you can select text and run it with command `:AIEdit /grammar`.

You can also combine roles `:AI /o1-mini /grammar helo world!`

Alternatively, `g:vim_ai_roles_config_file` can point to a directory.
Each `*.md` file in that directory is loaded as one role, where the file name is the role name.
Use markdown frontmatter to configure role options.
The markdown body is added to `options.initial_prompt` as a `>>> system` block.

```vim
let g:vim_ai_roles_config_file = '/path/to/my/roles'
```

```markdown
---
model: openai:gpt-5.2-high
temperature: 0.3
chat.max_tokens: 1200
---
fix spelling and grammar
```

The example above maps to role config values:

```ini
provider = openai
options.model = gpt-5.2
options.reasoning_effort = high
options.temperature = 0.3
options.initial_prompt = >>> system\n\nfix spelling and grammar
```

See [roles-example.ini](./roles-example.ini) for more examples.

## Reference
Expand Down
22 changes: 22 additions & 0 deletions doc/vim-ai.txt
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,28 @@ Roles are defined in the `.ini` file: >

let g:vim_ai_roles_config_file = '/path/to/my/roles.ini'

Alternatively, `g:vim_ai_roles_config_file` can point to a directory with
markdown files (`*.md`). Each file is loaded as one role, where file name is
the role name. Markdown frontmatter defines options and markdown body is added
to `options.initial_prompt` as `>>> system` block: >

let g:vim_ai_roles_config_file = '/path/to/my/roles'

---
model: openai:gpt-5.2-high
temperature: 0.3
chat.max_tokens: 1200
---
fix spelling and grammar

This maps to role values: >

provider = openai
options.model = gpt-5.2
options.reasoning_effort = high
options.temperature = 0.3
options.initial_prompt = >>> system\n\nfix spelling and grammar

Example of a role: >

[grammar]
Expand Down
124 changes: 122 additions & 2 deletions py/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import traceback
import configparser
import base64
import re

utils_py_imported = True

Expand Down Expand Up @@ -335,15 +336,134 @@ def enhance_roles_with_custom_function(roles):
else:
roles.update(vim.eval(roles_config_function + "()"))

def _parse_markdown_frontmatter(content, role_file_path):
lines = content.splitlines()
if not lines or lines[0].strip() != '---':
return {}, content.strip()

header = {}
end_index = -1
for index, raw_line in enumerate(lines[1:], start=1):
line = raw_line.strip()
if line == '---':
end_index = index
break
if not line or line.startswith('#'):
continue
if ':' not in line:
raise Exception(f"Invalid markdown header in role file: {role_file_path}")
key, value = line.split(':', 1)
value = value.strip()
if len(value) >= 2 and value[0] == value[-1] and value[0] in ('"', "'"):
value = value[1:-1]
header[key.strip()] = value

if end_index == -1:
raise Exception(f"Missing closing markdown header in role file: {role_file_path}")

prompt = '\n'.join(lines[end_index + 1:]).strip()
return header, prompt

def _parse_model_header_value(value):
parsed_provider = ''
parsed_model = value.strip()

provider_match = re.match(r'^([a-zA-Z0-9_-]+):(.*)$', parsed_model)
if provider_match:
parsed_provider = provider_match.group(1)
parsed_model = provider_match.group(2).strip()

reasoning_effort = ''
for suffix, effort in [('-high', 'high'), ('-medium', 'medium'), ('-low', 'low')]:
if parsed_model.endswith(suffix):
parsed_model = parsed_model[:-len(suffix)]
reasoning_effort = effort
break

return parsed_provider, parsed_model, reasoning_effort

def _make_markdown_section_name(role_name, key):
chunks = key.split('.', 1)
if len(chunks) > 1 and chunks[0] in ('chat', 'complete', 'edit', 'image'):
return f"{role_name}.{chunks[0]}", chunks[1]
return role_name, key

def _as_system_initial_prompt(text):
text = text.strip()
if not text:
return ''
if text.startswith('>>>'):
return text
return f">>> system\n\n{text}"

def _parse_markdown_role_file(role_name, role_file_path):
with open(role_file_path, 'r', encoding='utf-8') as file:
content = file.read()

header, prompt = _parse_markdown_frontmatter(content, role_file_path)
sections = {}

for key, value in header.items():
section_name, parsed_key = _make_markdown_section_name(role_name, key)
if not section_name in sections:
sections[section_name] = {}

if parsed_key == 'model':
provider, model, reasoning_effort = _parse_model_header_value(value)
if provider:
sections[section_name]['provider'] = provider
sections[section_name]['options.model'] = model
if reasoning_effort:
sections[section_name]['options.reasoning_effort'] = reasoning_effort
elif parsed_key == 'provider':
sections[section_name]['provider'] = value
elif parsed_key == 'prompt':
sections[section_name]['prompt'] = value
elif parsed_key.startswith('options.') or parsed_key.startswith('ui.'):
sections[section_name][parsed_key] = value
else:
sections[section_name][f"options.{parsed_key}"] = value

if prompt:
if not role_name in sections:
sections[role_name] = {}
parsed_prompt = _as_system_initial_prompt(prompt)
existing_initial_prompt = sections[role_name].get('options.initial_prompt', '').strip()
if existing_initial_prompt:
sections[role_name]['options.initial_prompt'] = f"{existing_initial_prompt}\n\n{parsed_prompt}"
else:
sections[role_name]['options.initial_prompt'] = parsed_prompt

return sections

def _read_roles_from_markdown_directory(roles_dir_path):
markdown_files = sorted(glob.glob(os.path.join(roles_dir_path, '*.md')))
markdown_files += sorted(glob.glob(os.path.join(roles_dir_path, '*.markdown')))

roles = {}
for role_file_path in markdown_files:
if os.path.isdir(role_file_path):
continue
role_name = os.path.splitext(os.path.basename(role_file_path))[0]
role_sections = _parse_markdown_role_file(role_name, role_file_path)
roles.update(role_sections)
return roles

def read_role_files():
plugin_root = vim.eval("s:plugin_root")
default_roles_config_path = str(os.path.join(plugin_root, "roles-default.ini"))
roles_config_path = os.path.expanduser(vim.eval("g:vim_ai_roles_config_file"))
if not os.path.exists(roles_config_path):
raise Exception(f"Role config file does not exist: {roles_config_path}")

roles = configparser.ConfigParser()
roles.read([default_roles_config_path, roles_config_path])
# Role prompts can contain '%' (for example "60 % shorter"), so interpolation
# must be disabled to avoid ConfigParser ValueError.
roles = configparser.ConfigParser(interpolation=None)
roles.read([default_roles_config_path])
if os.path.isdir(roles_config_path):
roles.read_dict(_read_roles_from_markdown_directory(roles_config_path))
else:
roles.read([roles_config_path])
return roles

def save_b64_to_file(path, b64_data):
Expand Down
48 changes: 48 additions & 0 deletions tests/context_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
from context import make_ai_context, make_prompt
from unittest.mock import patch
import vim
import os

dirname = os.path.dirname(__file__)
markdown_roles_dir = os.path.join(dirname, 'resources/roles-md')

default_config = {
"options": {
Expand Down Expand Up @@ -246,3 +250,47 @@ def test_role_config_all_params():
assert actual_options['top_logprobs'] == '5'
assert actual_options['top_p'] == '0.9'

def test_markdown_role_header_model_mapping():
default_eval = vim.eval
with patch('vim.eval', side_effect=lambda cmd: markdown_roles_dir if cmd == 'g:vim_ai_roles_config_file' else default_eval(cmd)):
context = make_ai_context({
'config_default': default_config,
'config_extension': {},
'user_instruction': '/markdown-role hello',
'user_selection': '',
'command_type': 'chat',
})
actual_config = context['config']
assert actual_config['provider'] == 'openai'
assert actual_config['options']['model'] == 'gpt-5.2'
assert actual_config['options']['reasoning_effort'] == 'high'
assert actual_config['options']['temperature'] == '0.3'
assert actual_config['options']['max_tokens'] == '1200'
assert actual_config['options']['initial_prompt'] == '>>> system\n\nmarkdown role prompt'
assert context['prompt'] == 'hello'

def test_markdown_image_role_header_mapping():
default_eval = vim.eval
with patch('vim.eval', side_effect=lambda cmd: markdown_roles_dir if cmd == 'g:vim_ai_roles_config_file' else default_eval(cmd)):
actual_context = make_ai_context({
'config_default': default_image_config,
'config_extension': {},
'user_instruction': '/markdown-image describe this image',
'user_selection': '',
'command_type': 'image',
})
assert actual_context['config']['provider'] == 'openai'
assert actual_context['config']['options']['model'] == 'gpt-image-1'
assert actual_context['config']['options']['size'] == '1024x1024'

def test_markdown_role_prompt_with_percent_sign():
default_eval = vim.eval
with patch('vim.eval', side_effect=lambda cmd: markdown_roles_dir if cmd == 'g:vim_ai_roles_config_file' else default_eval(cmd)):
context = make_ai_context({
'config_default': default_config,
'config_extension': {},
'user_instruction': '/markdown-role-percent hello',
'user_selection': '',
'command_type': 'chat',
})
assert context['config']['options']['initial_prompt'] == '>>> system\n\nRewrite with 60 % fewer words.'
5 changes: 5 additions & 0 deletions tests/resources/roles-md/markdown-image.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
image.model: openai:gpt-image-1
image.size: 1024x1024
---
image role prompt
4 changes: 4 additions & 0 deletions tests/resources/roles-md/markdown-role-percent.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
model: openai:gpt-5.2-high
---
Rewrite with 60 % fewer words.
6 changes: 6 additions & 0 deletions tests/resources/roles-md/markdown-role.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
model: openai:gpt-5.2-high
temperature: 0.3
chat.max_tokens: 1200
---
markdown role prompt
18 changes: 18 additions & 0 deletions tests/roles_test.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
from roles import load_ai_role_names
import os
from unittest.mock import patch
import vim

dirname = os.path.dirname(__file__)
markdown_roles_dir = os.path.join(dirname, 'resources/roles-md')

def test_role_completion():
role_names = load_ai_role_names('complete')
Expand Down Expand Up @@ -31,3 +37,15 @@ def test_role_chat_only():
def test_explicit_image_roles():
role_names = load_ai_role_names('image')
assert set(role_names) == { 'hd-image', 'hd', 'natural' }

def test_load_markdown_roles_from_directory():
default_eval = vim.eval
with patch('vim.eval', side_effect=lambda cmd: markdown_roles_dir if cmd == 'g:vim_ai_roles_config_file' else default_eval(cmd)):
role_names = load_ai_role_names('chat')
assert 'markdown-role' in role_names

def test_markdown_image_role_names():
default_eval = vim.eval
with patch('vim.eval', side_effect=lambda cmd: markdown_roles_dir if cmd == 'g:vim_ai_roles_config_file' else default_eval(cmd)):
role_names = load_ai_role_names('image')
assert 'markdown-image' in role_names