Compare commits

..

No commits in common. "chore/add-issue-templates" and "feat/highlights" have entirely different histories.

26 changed files with 467 additions and 612 deletions

View file

@ -1,78 +0,0 @@
name: Bug Report
description: Report a bug
title: 'bug: '
labels: [bug]
body:
- type: checkboxes
attributes:
label: Prerequisites
options:
- label:
I have searched [existing
issues](https://github.com/barrettruth/cp.nvim/issues)
required: true
- label: I have updated to the latest version
required: true
- type: textarea
attributes:
label: 'Neovim version'
description: 'Output of `nvim --version`'
render: text
validations:
required: true
- type: input
attributes:
label: 'Operating system'
placeholder: 'e.g. Arch Linux, macOS 15, Ubuntu 24.04'
validations:
required: true
- type: textarea
attributes:
label: Description
description: What happened? What did you expect?
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: Minimal steps to trigger the bug
value: |
1.
2.
3.
validations:
required: true
- type: textarea
attributes:
label: 'Health check'
description: 'Output of `:checkhealth cp`'
render: text
- type: textarea
attributes:
label: Minimal reproduction
description: |
Save the script below as `repro.lua`, edit if needed, and run:
```
nvim -u repro.lua
```
Confirm the bug reproduces with this config before submitting.
render: lua
value: |
vim.env.LAZY_STDPATH = '.repro'
load(vim.fn.system('curl -s https://raw.githubusercontent.com/folke/lazy.nvim/main/bootstrap.lua'))()
require('lazy.nvim').setup({
spec = {
{
'barrett-ruth/cp.nvim',
opts = {},
},
},
})
validations:
required: true

View file

@ -1,5 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Questions
url: https://github.com/barrettruth/cp.nvim/discussions
about: Ask questions and discuss ideas

View file

@ -1,30 +0,0 @@
name: Feature Request
description: Suggest a feature
title: 'feat: '
labels: [enhancement]
body:
- type: checkboxes
attributes:
label: Prerequisites
options:
- label:
I have searched [existing
issues](https://github.com/barrettruth/cp.nvim/issues)
required: true
- type: textarea
attributes:
label: Problem
description: What problem does this solve?
validations:
required: true
- type: textarea
attributes:
label: Proposed solution
validations:
required: true
- type: textarea
attributes:
label: Alternatives considered

View file

@ -1,112 +0,0 @@
name: ci
on:
workflow_call:
pull_request:
branches: [main]
push:
branches: [main]
jobs:
changes:
runs-on: ubuntu-latest
outputs:
lua: ${{ steps.changes.outputs.lua }}
python: ${{ steps.changes.outputs.python }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: changes
with:
filters: |
lua:
- 'lua/**'
- 'spec/**'
- 'plugin/**'
- 'after/**'
- 'ftdetect/**'
- '*.lua'
- '.luarc.json'
- 'stylua.toml'
- 'selene.toml'
python:
- 'scripts/**'
- 'scrapers/**'
- 'tests/**'
- 'pyproject.toml'
- 'uv.lock'
lua-format:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.lua == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: JohnnyMorganz/stylua-action@v4
with:
token: ${{ secrets.GITHUB_TOKEN }}
version: 2.1.0
args: --check .
lua-lint:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.lua == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: NTBBloodbath/selene-action@v1.0.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --display-style quiet .
lua-typecheck:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.lua == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: mrcjkb/lua-typecheck-action@v0
with:
checklevel: Warning
directories: lua
configpath: .luarc.json
python-format:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.python == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4
- run: uv tool install ruff
- run: ruff format --check .
python-lint:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.python == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4
- run: uv tool install ruff
- run: ruff check .
python-typecheck:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.python == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4
- run: uv sync --dev
- run: uvx ty check .
python-test:
runs-on: ubuntu-latest
needs: changes
if: ${{ needs.changes.outputs.python == 'true' }}
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v4
- run: uv sync --dev
- run: uv run camoufox fetch
- run: uv run pytest tests/ -v

View file

@ -1,21 +1,18 @@
name: luarocks
name: Release
on:
push:
tags:
- 'v*'
- '*'
workflow_dispatch:
jobs:
ci:
uses: ./.github/workflows/ci.yaml
publish:
needs: ci
publish-luarocks:
name: Publish to LuaRocks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: nvim-neorocks/luarocks-tag-release@v7
- name: Publish to LuaRocks
uses: nvim-neorocks/luarocks-tag-release@v7
env:
LUAROCKS_API_KEY: ${{ secrets.LUAROCKS_API_KEY }}

View file

@ -1,4 +1,4 @@
name: quality
name: Code Quality
on:
pull_request:

View file

@ -1,4 +1,4 @@
name: tests
name: Tests
on:
pull_request:

View file

@ -25,7 +25,7 @@ repos:
hooks:
- id: prettier
name: prettier
files: \.(md|toml|ya?ml|sh)$
files: \.(md|,toml,yaml,sh)$
- repo: local
hooks:

View file

@ -19,15 +19,6 @@ https://github.com/user-attachments/assets/e81d8dfb-578f-4a79-9989-210164fc0148
- **Language agnostic**: Works with any language
- **Diff viewer**: Compare expected vs actual output with 3 diff modes
## Installation
Install using your package manager of choice or via
[luarocks](https://luarocks.org/modules/barrettruth/cp.nvim):
```
luarocks install cp.nvim
```
## Optional Dependencies
- [uv](https://docs.astral.sh/uv/) for problem scraping

View file

@ -2,7 +2,7 @@ rockspec_format = '3.0'
package = 'cp.nvim'
version = 'scm-1'
source = { url = 'git://github.com/barrettruth/cp.nvim' }
source = { url = 'git://github.com/barrett-ruth/cp.nvim' }
build = { type = 'builtin' }
test_dependencies = {

View file

@ -205,66 +205,71 @@ Debug Builds ~
==============================================================================
CONFIGURATION *cp-config*
Configuration is done via `vim.g.cp_config`. Set this before using the plugin:
Here's an example configuration with lazy.nvim:
>lua
vim.g.cp_config = {
languages = {
cpp = {
extension = 'cc',
commands = {
build = { 'g++', '-std=c++17', '{source}', '-o', '{binary}',
'-fdiagnostics-color=always' },
run = { '{binary}' },
debug = { 'g++', '-std=c++17', '-fsanitize=address,undefined',
'{source}', '-o', '{binary}' },
{
'barrett-ruth/cp.nvim',
cmd = 'CP',
build = 'uv sync',
opts = {
languages = {
cpp = {
extension = 'cc',
commands = {
build = { 'g++', '-std=c++17', '{source}', '-o', '{binary}',
'-fdiagnostics-color=always' },
run = { '{binary}' },
debug = { 'g++', '-std=c++17', '-fsanitize=address,undefined',
'{source}', '-o', '{binary}' },
},
},
python = {
extension = 'py',
commands = {
run = { 'python', '{source}' },
debug = { 'python', '{source}' },
},
},
},
python = {
extension = 'py',
commands = {
run = { 'python', '{source}' },
debug = { 'python', '{source}' },
platforms = {
cses = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
overrides = {
cpp = { extension = 'cpp', commands = { build = { ... } } }
},
},
atcoder = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
},
codeforces = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
},
},
},
platforms = {
cses = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
overrides = {
cpp = { extension = 'cpp', commands = { build = { ... } } }
open_url = true,
debug = false,
ui = {
ansi = true,
run = {
width = 0.3,
next_test_key = '<c-n>', -- or nil to disable
prev_test_key = '<c-p>', -- or nil to disable
},
},
atcoder = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
},
codeforces = {
enabled_languages = { 'cpp', 'python' },
default_language = 'cpp',
},
},
open_url = true,
debug = false,
ui = {
ansi = true,
run = {
width = 0.3,
next_test_key = '<c-n>', -- or nil to disable
prev_test_key = '<c-p>', -- or nil to disable
},
panel = {
diff_modes = { 'side-by-side', 'git', 'vim' },
max_output_lines = 50,
},
diff = {
git = {
args = { 'diff', '--no-index', '--word-diff=plain',
'--word-diff-regex=.', '--no-prefix' },
panel = {
diff_mode = 'vim',
max_output_lines = 50,
},
diff = {
git = {
args = { 'diff', '--no-index', '--word-diff=plain',
'--word-diff-regex=.', '--no-prefix' },
},
},
picker = 'telescope',
},
picker = 'telescope',
},
}
}
<
@ -274,7 +279,7 @@ the default; per-platform overrides can tweak 'extension' or 'commands'.
For example, to run CodeForces contests with Python by default:
>lua
vim.g.cp_config = {
{
platforms = {
codeforces = {
default_language = 'python',
@ -285,7 +290,7 @@ For example, to run CodeForces contests with Python by default:
Any language is supported provided the proper configuration. For example, to
run CSES problems with Rust using the single schema:
>lua
vim.g.cp_config = {
{
languages = {
rust = {
extension = 'rs',
@ -373,10 +378,8 @@ run CSES problems with Rust using the single schema:
*cp.PanelConfig*
Fields: ~
{diff_modes} (string[], default: {'side-by-side', 'git', 'vim'})
List of diff modes to cycle through with 't' key.
First element is the default mode.
Valid modes: 'side-by-side', 'git', 'vim'.
{diff_mode} (string, default: "none") Diff backend: "none",
"vim", or "git".
{max_output_lines} (number, default: 50) Maximum lines of test output.
*cp.DiffConfig*
@ -848,20 +851,17 @@ PANEL KEYMAPS *cp-panel-keys*
<c-n> Navigate to next test case
<c-p> Navigate to previous test case
t Cycle through configured diff modes (see |cp.PanelConfig|)
t Cycle through diff modes: none → git → vim
q Exit panel and restore layout
<c-q> Exit interactive terminal and restore layout
Diff Modes ~
Three diff modes are available:
Three diff backends are available:
side-by-side Expected and actual output shown side-by-side (default)
vim Built-in vim diff (always available)
git Character-level git word-diff (requires git, more precise)
Configure which modes to cycle through via |cp.PanelConfig|.diff_modes.
The first element is used as the default mode.
none Nothing
vim Built-in vim diff (default, always available)
git Character-level git word-diff (requires git, more precise)
The git backend shows character-level changes with [-removed-] and {+added+}
markers.

View file

@ -18,7 +18,7 @@
---@field overrides? table<string, CpPlatformOverrides>
---@class PanelConfig
---@field diff_modes string[]
---@field diff_mode "none"|"vim"|"git"
---@field max_output_lines integer
---@class DiffGitConfig
@ -173,7 +173,7 @@ M.defaults = {
add_test_key = 'ga',
save_and_exit_key = 'q',
},
panel = { diff_modes = { 'side-by-side', 'git', 'vim' }, max_output_lines = 50 },
panel = { diff_mode = 'none', max_output_lines = 50 },
diff = {
git = {
args = { 'diff', '--no-index', '--word-diff=plain', '--word-diff-regex=.', '--no-prefix' },
@ -305,24 +305,7 @@ function M.setup(user_config)
vim.validate({
hooks = { cfg.hooks, { 'table' } },
ui = { cfg.ui, { 'table' } },
debug = { cfg.debug, { 'boolean', 'nil' }, true },
open_url = { cfg.open_url, { 'boolean', 'nil' }, true },
filename = { cfg.filename, { 'function', 'nil' }, true },
scrapers = {
cfg.scrapers,
function(v)
if type(v) ~= 'table' then
return false
end
for _, s in ipairs(v) do
if not vim.tbl_contains(constants.PLATFORMS, s) then
return false
end
end
return true
end,
('one of {%s}'):format(table.concat(constants.PLATFORMS, ',')),
},
before_run = { cfg.hooks.before_run, { 'function', 'nil' }, true },
before_debug = { cfg.hooks.before_debug, { 'function', 'nil' }, true },
setup_code = { cfg.hooks.setup_code, { 'function', 'nil' }, true },
@ -330,23 +313,14 @@ function M.setup(user_config)
setup_io_output = { cfg.hooks.setup_io_output, { 'function', 'nil' }, true },
})
local layouts = require('cp.ui.layouts')
vim.validate({
ansi = { cfg.ui.ansi, 'boolean' },
diff_modes = {
cfg.ui.panel.diff_modes,
diff_mode = {
cfg.ui.panel.diff_mode,
function(v)
if type(v) ~= 'table' then
return false
end
for _, mode in ipairs(v) do
if not layouts.DIFF_MODES[mode] then
return false
end
end
return true
return vim.tbl_contains({ 'none', 'vim', 'git' }, v)
end,
('one of {%s}'):format(table.concat(vim.tbl_keys(layouts.DIFF_MODES), ',')),
"diff_mode must be 'none', 'vim', or 'git'",
},
max_output_lines = {
cfg.ui.panel.max_output_lines,
@ -356,14 +330,6 @@ function M.setup(user_config)
'positive integer',
},
git = { cfg.ui.diff.git, { 'table' } },
git_args = { cfg.ui.diff.git.args, is_string_list, 'string[]' },
width = {
cfg.ui.run.width,
function(v)
return type(v) == 'number' and v > 0 and v <= 1
end,
'decimal between 0 and 1',
},
next_test_key = {
cfg.ui.run.next_test_key,
function(v)
@ -417,13 +383,6 @@ function M.setup(user_config)
end,
'nil or non-empty string',
},
picker = {
cfg.ui.picker,
function(v)
return v == nil or v == 'telescope' or v == 'fzf-lua'
end,
"nil, 'telescope', or 'fzf-lua'",
},
})
for id, lang in pairs(cfg.languages) do
@ -484,18 +443,7 @@ function M.get_language_for_platform(platform_id, language_id)
}
end
local platform_effective = cfg.runtime.effective[platform_id]
if not platform_effective then
return {
valid = false,
error = string.format(
'No runtime config for platform %s (plugin not initialized)',
platform_id
),
}
end
local effective = platform_effective[language_id]
local effective = cfg.runtime.effective[platform_id][language_id]
if not effective then
return {
valid = false,

View file

@ -11,25 +11,25 @@ if vim.fn.has('nvim-0.10.0') == 0 then
return {}
end
local user_config = {}
local config = nil
local initialized = false
local function ensure_initialized()
if initialized then
return
end
local user_config = vim.g.cp_config or {}
local config = config_module.setup(user_config)
config_module.set_current_config(config)
initialized = true
end
---@return nil
function M.handle_command(opts)
ensure_initialized()
local commands = require('cp.commands')
commands.handle_command(opts)
end
function M.setup(opts)
opts = opts or {}
user_config = opts
config = config_module.setup(user_config)
config_module.set_current_config(config)
initialized = true
end
function M.is_initialized()
return initialized
end

View file

@ -177,16 +177,6 @@ function M.compile_problem(debug, on_complete)
local language = state.get_language() or config.platforms[platform].default_language
local eff = config.runtime.effective[platform][language]
local source_file = state.get_source_file()
if source_file then
local buf = vim.fn.bufnr(source_file)
if buf ~= -1 and vim.api.nvim_buf_is_loaded(buf) and vim.bo[buf].modified then
vim.api.nvim_buf_call(buf, function()
vim.cmd.write({ mods = { silent = true, noautocmd = true } })
end)
end
end
local compile_config = (debug and eff.commands.debug) or eff.commands.build
if not compile_config then
@ -194,8 +184,6 @@ function M.compile_problem(debug, on_complete)
return
end
require('cp.utils').ensure_dirs()
local binary = debug and state.get_debug_file() or state.get_binary_file()
local substitutions = { source = state.get_source_file(), binary = binary }

View file

@ -186,7 +186,7 @@ function M.scrape_all_tests(platform, contest_id, callback)
return
end
vim.schedule(function()
require('cp.utils').ensure_dirs()
vim.system({ 'mkdir', '-p', 'build', 'io' }):wait()
local config = require('cp.config')
local base_name = config.default_filename(contest_id, ev.problem_id)
for i, t in ipairs(ev.tests) do

View file

@ -82,7 +82,7 @@ local function start_tests(platform, contest_id, problems)
return not vim.tbl_isempty(cache.get_test_cases(platform, contest_id, p.id))
end, problems)
if cached_len ~= #problems then
logger.log(('Fetching %s/%s problem tests...'):format(cached_len, #problems))
logger.log(('Fetching problem test data... (%d/%d)'):format(cached_len, #problems))
scraper.scrape_all_tests(platform, contest_id, function(ev)
local cached_tests = {}
if not ev.interactive and vim.tbl_isempty(ev.tests) then

View file

@ -3,13 +3,7 @@ local M = {}
local helpers = require('cp.helpers')
local utils = require('cp.utils')
M.DIFF_MODES = {
['side-by-side'] = 'side-by-side',
vim = 'vim',
git = 'git',
}
local function create_side_by_side_layout(parent_win, expected_content, actual_content)
local function create_none_diff_layout(parent_win, expected_content, actual_content)
local expected_buf = utils.create_buffer_with_options()
local actual_buf = utils.create_buffer_with_options()
helpers.clearcol(expected_buf)
@ -27,13 +21,8 @@ local function create_side_by_side_layout(parent_win, expected_content, actual_c
vim.api.nvim_set_option_value('filetype', 'cp', { buf = expected_buf })
vim.api.nvim_set_option_value('filetype', 'cp', { buf = actual_buf })
local label = M.DIFF_MODES['side-by-side']
vim.api.nvim_set_option_value(
'winbar',
('expected (diff: %s)'):format(label),
{ win = expected_win }
)
vim.api.nvim_set_option_value('winbar', ('actual (diff: %s)'):format(label), { win = actual_win })
vim.api.nvim_set_option_value('winbar', 'Expected', { win = expected_win })
vim.api.nvim_set_option_value('winbar', 'Actual', { win = actual_win })
local expected_lines = vim.split(expected_content, '\n', { plain = true, trimempty = true })
local actual_lines = vim.split(actual_content, '\n', { plain = true })
@ -44,7 +33,6 @@ local function create_side_by_side_layout(parent_win, expected_content, actual_c
return {
buffers = { expected_buf, actual_buf },
windows = { expected_win, actual_win },
mode = 'side-by-side',
cleanup = function()
pcall(vim.api.nvim_win_close, expected_win, true)
pcall(vim.api.nvim_win_close, actual_win, true)
@ -72,13 +60,8 @@ local function create_vim_diff_layout(parent_win, expected_content, actual_conte
vim.api.nvim_set_option_value('filetype', 'cp', { buf = expected_buf })
vim.api.nvim_set_option_value('filetype', 'cp', { buf = actual_buf })
local label = M.DIFF_MODES.vim
vim.api.nvim_set_option_value(
'winbar',
('expected (diff: %s)'):format(label),
{ win = expected_win }
)
vim.api.nvim_set_option_value('winbar', ('actual (diff: %s)'):format(label), { win = actual_win })
vim.api.nvim_set_option_value('winbar', 'Expected', { win = expected_win })
vim.api.nvim_set_option_value('winbar', 'Actual', { win = actual_win })
local expected_lines = vim.split(expected_content, '\n', { plain = true, trimempty = true })
local actual_lines = vim.split(actual_content, '\n', { plain = true })
@ -100,7 +83,6 @@ local function create_vim_diff_layout(parent_win, expected_content, actual_conte
return {
buffers = { expected_buf, actual_buf },
windows = { expected_win, actual_win },
mode = 'vim',
cleanup = function()
pcall(vim.api.nvim_win_close, expected_win, true)
pcall(vim.api.nvim_win_close, actual_win, true)
@ -121,8 +103,7 @@ local function create_git_diff_layout(parent_win, expected_content, actual_conte
vim.api.nvim_win_set_buf(diff_win, diff_buf)
vim.api.nvim_set_option_value('filetype', 'cp', { buf = diff_buf })
local label = M.DIFF_MODES.git
vim.api.nvim_set_option_value('winbar', ('diff: %s'):format(label), { win = diff_win })
vim.api.nvim_set_option_value('winbar', 'Expected vs Actual', { win = diff_win })
local diff_backend = require('cp.ui.diff')
local backend = diff_backend.get_best_backend('git')
@ -140,7 +121,6 @@ local function create_git_diff_layout(parent_win, expected_content, actual_conte
return {
buffers = { diff_buf },
windows = { diff_win },
mode = 'git',
cleanup = function()
pcall(vim.api.nvim_win_close, diff_win, true)
pcall(vim.api.nvim_buf_delete, diff_buf, { force = true })
@ -163,7 +143,6 @@ local function create_single_layout(parent_win, content)
return {
buffers = { buf },
windows = { win },
mode = 'single',
cleanup = function()
pcall(vim.api.nvim_win_close, win, true)
pcall(vim.api.nvim_buf_delete, buf, { force = true })
@ -174,14 +153,12 @@ end
function M.create_diff_layout(mode, parent_win, expected_content, actual_content)
if mode == 'single' then
return create_single_layout(parent_win, actual_content)
elseif mode == 'side-by-side' then
return create_side_by_side_layout(parent_win, expected_content, actual_content)
elseif mode == 'none' then
return create_none_diff_layout(parent_win, expected_content, actual_content)
elseif mode == 'git' then
return create_git_diff_layout(parent_win, expected_content, actual_content)
elseif mode == 'vim' then
return create_vim_diff_layout(parent_win, expected_content, actual_content)
else
return create_side_by_side_layout(parent_win, expected_content, actual_content)
return create_vim_diff_layout(parent_win, expected_content, actual_content)
end
end
@ -214,13 +191,12 @@ function M.update_diff_panes(
actual_content = actual_content
end
local default_mode = config.ui.panel.diff_modes[1]
local desired_mode = is_compilation_failure and 'single' or (current_mode or default_mode)
local desired_mode = is_compilation_failure and 'single' or config.ui.panel.diff_mode
local highlight = require('cp.ui.highlight')
local diff_namespace = highlight.create_namespace()
local ansi_namespace = vim.api.nvim_create_namespace('cp_ansi_highlights')
if current_diff_layout and current_diff_layout.mode ~= desired_mode then
if current_diff_layout and current_mode ~= desired_mode then
local saved_pos = vim.api.nvim_win_get_cursor(0)
current_diff_layout.cleanup()
current_diff_layout = nil
@ -275,7 +251,7 @@ function M.update_diff_panes(
ansi_namespace
)
end
elseif desired_mode == 'side-by-side' then
elseif desired_mode == 'none' then
local expected_lines = vim.split(expected_content, '\n', { plain = true, trimempty = true })
local actual_lines = vim.split(actual_content, '\n', { plain = true })
utils.update_buffer_content(current_diff_layout.buffers[1], expected_lines, {})

View file

@ -13,7 +13,6 @@ local utils = require('cp.utils')
local current_diff_layout = nil
local current_mode = nil
local io_view_running = false
function M.disable()
local active_panel = state.get_active_panel()
@ -391,8 +390,6 @@ function M.ensure_io_view()
return
end
require('cp.utils').ensure_dirs()
local source_file = state.get_source_file()
if source_file then
local source_file_abs = vim.fn.fnamemodify(source_file, ':p')
@ -625,12 +622,6 @@ local function render_io_view_results(io_state, test_indices, mode, combined_res
end
function M.run_io_view(test_indices_arg, debug, mode)
if io_view_running then
logger.log('Tests already running', vim.log.levels.WARN)
return
end
io_view_running = true
logger.log(('%s tests...'):format(debug and 'Debugging' or 'Running'), vim.log.levels.INFO, true)
mode = mode or 'combined'
@ -642,7 +633,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
'No platform/contest/problem configured. Use :CP <platform> <contest> [...] first.',
vim.log.levels.ERROR
)
io_view_running = false
return
end
@ -650,7 +640,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
local contest_data = cache.get_contest_data(platform, contest_id)
if not contest_data or not contest_data.index_map then
logger.log('No test cases available.', vim.log.levels.ERROR)
io_view_running = false
return
end
@ -667,13 +656,11 @@ function M.run_io_view(test_indices_arg, debug, mode)
local combined = cache.get_combined_test(platform, contest_id, problem_id)
if not combined then
logger.log('No combined test available', vim.log.levels.ERROR)
io_view_running = false
return
end
else
if not run.load_test_cases() then
logger.log('No test cases available', vim.log.levels.ERROR)
io_view_running = false
return
end
end
@ -694,7 +681,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
),
vim.log.levels.WARN
)
io_view_running = false
return
end
end
@ -712,7 +698,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
local io_state = state.get_io_view_state()
if not io_state then
io_view_running = false
return
end
@ -726,7 +711,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
execute.compile_problem(debug, function(compile_result)
if not vim.api.nvim_buf_is_valid(io_state.output_buf) then
io_view_running = false
return
end
@ -746,7 +730,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
local ns = vim.api.nvim_create_namespace('cp_io_view_compile_error')
utils.update_buffer_content(io_state.output_buf, lines, highlights, ns)
io_view_running = false
return
end
@ -754,7 +737,6 @@ function M.run_io_view(test_indices_arg, debug, mode)
local combined = cache.get_combined_test(platform, contest_id, problem_id)
if not combined then
logger.log('No combined test found', vim.log.levels.ERROR)
io_view_running = false
return
end
@ -763,21 +745,18 @@ function M.run_io_view(test_indices_arg, debug, mode)
run.run_combined_test(debug, function(result)
if not result then
logger.log('Failed to run combined test', vim.log.levels.ERROR)
io_view_running = false
return
end
if vim.api.nvim_buf_is_valid(io_state.output_buf) then
render_io_view_results(io_state, test_indices, mode, result, combined.input)
end
io_view_running = false
end)
else
run.run_all_test_cases(test_indices, debug, nil, function()
if vim.api.nvim_buf_is_valid(io_state.output_buf) then
render_io_view_results(io_state, test_indices, mode, nil, nil)
end
io_view_running = false
end)
end
end)
@ -880,9 +859,6 @@ function M.toggle_panel(panel_opts)
end
local function refresh_panel()
if state.get_active_panel() ~= 'run' then
return
end
if not test_buffers.tab_buf or not vim.api.nvim_buf_is_valid(test_buffers.tab_buf) then
return
end
@ -908,10 +884,6 @@ function M.toggle_panel(panel_opts)
vim.cmd.normal({ 'zz', bang = true })
end)
end
if test_windows.tab_win and vim.api.nvim_win_is_valid(test_windows.tab_win) then
vim.api.nvim_set_current_win(test_windows.tab_win)
end
end
local function navigate_test_case(delta)
@ -928,15 +900,15 @@ function M.toggle_panel(panel_opts)
M.toggle_panel()
end, { buffer = buf, silent = true })
vim.keymap.set('n', 't', function()
local modes = config.ui.panel.diff_modes
local modes = { 'none', 'git', 'vim' }
local current_idx = 1
for i, mode in ipairs(modes) do
if current_mode == mode then
if config.ui.panel.diff_mode == mode then
current_idx = i
break
end
end
current_mode = modes[(current_idx % #modes) + 1]
config.ui.panel.diff_mode = modes[(current_idx % #modes) + 1]
refresh_panel()
end, { buffer = buf, silent = true })
vim.keymap.set('n', '<c-n>', function()
@ -970,9 +942,6 @@ function M.toggle_panel(panel_opts)
local function finalize_panel()
vim.schedule(function()
if state.get_active_panel() ~= 'run' then
return
end
if config.ui.ansi then
require('cp.ui.ansi').setup_highlight_groups()
end

View file

@ -262,8 +262,4 @@ function M.cwd_executables()
return out
end
function M.ensure_dirs()
vim.system({ 'mkdir', '-p', 'build', 'io' }):wait()
end
return M

0
new
View file

View file

@ -266,31 +266,43 @@ class AtcoderScraper(BaseScraper):
return "atcoder"
async def scrape_contest_metadata(self, contest_id: str) -> MetadataResult:
try:
rows = await asyncio.to_thread(_scrape_tasks_sync, contest_id)
async def impl(cid: str) -> MetadataResult:
try:
rows = await asyncio.to_thread(_scrape_tasks_sync, cid)
except requests.HTTPError as e:
if e.response is not None and e.response.status_code == 404:
return self._create_metadata_error(
f"No problems found for contest {cid}", cid
)
raise
problems = _to_problem_summaries(rows)
if not problems:
return self._metadata_error(
f"No problems found for contest {contest_id}"
return self._create_metadata_error(
f"No problems found for contest {cid}", cid
)
return MetadataResult(
success=True,
error="",
contest_id=contest_id,
contest_id=cid,
problems=problems,
url=f"https://atcoder.jp/contests/{contest_id}/tasks/{contest_id}_%s",
)
except Exception as e:
return self._metadata_error(str(e))
return await self._safe_execute("metadata", impl, contest_id)
async def scrape_contest_list(self) -> ContestListResult:
try:
contests = await _fetch_all_contests_async()
async def impl() -> ContestListResult:
try:
contests = await _fetch_all_contests_async()
except Exception as e:
return self._create_contests_error(str(e))
if not contests:
return self._contests_error("No contests found")
return self._create_contests_error("No contests found")
return ContestListResult(success=True, error="", contests=contests)
except Exception as e:
return self._contests_error(str(e))
return await self._safe_execute("contests", impl)
async def stream_tests_for_category_async(self, category_id: str) -> None:
rows = await asyncio.to_thread(_scrape_tasks_sync, category_id)

View file

@ -1,8 +1,9 @@
import asyncio
import sys
from abc import ABC, abstractmethod
from typing import Any, Awaitable, Callable, ParamSpec, cast
from .models import CombinedTest, ContestListResult, MetadataResult, TestsResult
from .models import ContestListResult, MetadataResult, TestsResult
P = ParamSpec("P")
class BaseScraper(ABC):
@ -19,65 +20,57 @@ class BaseScraper(ABC):
@abstractmethod
async def stream_tests_for_category_async(self, category_id: str) -> None: ...
def _usage(self) -> str:
name = self.platform_name
return f"Usage: {name}.py metadata <id> | tests <id> | contests"
def _create_metadata_error(
self, error_msg: str, contest_id: str = ""
) -> MetadataResult:
return MetadataResult(
success=False,
error=f"{self.platform_name}: {error_msg}",
contest_id=contest_id,
problems=[],
url="",
)
def _metadata_error(self, msg: str) -> MetadataResult:
return MetadataResult(success=False, error=msg, url="")
def _create_tests_error(
self, error_msg: str, problem_id: str = "", url: str = ""
) -> TestsResult:
from .models import CombinedTest
def _tests_error(self, msg: str) -> TestsResult:
return TestsResult(
success=False,
error=msg,
problem_id="",
error=f"{self.platform_name}: {error_msg}",
problem_id=problem_id,
combined=CombinedTest(input="", expected=""),
tests=[],
timeout_ms=0,
memory_mb=0,
interactive=False,
)
def _contests_error(self, msg: str) -> ContestListResult:
return ContestListResult(success=False, error=msg)
def _create_contests_error(self, error_msg: str) -> ContestListResult:
return ContestListResult(
success=False,
error=f"{self.platform_name}: {error_msg}",
contests=[],
)
async def _run_cli_async(self, args: list[str]) -> int:
if len(args) < 2:
print(self._metadata_error(self._usage()).model_dump_json())
return 1
mode = args[1]
match mode:
case "metadata":
if len(args) != 3:
print(self._metadata_error(self._usage()).model_dump_json())
return 1
result = await self.scrape_contest_metadata(args[2])
print(result.model_dump_json())
return 0 if result.success else 1
case "tests":
if len(args) != 3:
print(self._tests_error(self._usage()).model_dump_json())
return 1
await self.stream_tests_for_category_async(args[2])
return 0
case "contests":
if len(args) != 2:
print(self._contests_error(self._usage()).model_dump_json())
return 1
result = await self.scrape_contest_list()
print(result.model_dump_json())
return 0 if result.success else 1
case _:
print(
self._metadata_error(
f"Unknown mode: {mode}. {self._usage()}"
).model_dump_json()
)
return 1
def run_cli(self) -> None:
sys.exit(asyncio.run(self._run_cli_async(sys.argv)))
async def _safe_execute(
self,
operation: str,
func: Callable[P, Awaitable[Any]],
*args: P.args,
**kwargs: P.kwargs,
):
try:
return await func(*args, **kwargs)
except Exception as e:
if operation == "metadata":
contest_id = cast(str, args[0]) if args else ""
return self._create_metadata_error(str(e), contest_id)
elif operation == "tests":
problem_id = cast(str, args[1]) if len(args) > 1 else ""
return self._create_tests_error(str(e), problem_id)
elif operation == "contests":
return self._create_contests_error(str(e))
else:
raise

View file

@ -1,8 +1,8 @@
#!/usr/bin/env python3
import asyncio
import json
import re
import sys
from typing import Any
import httpx
@ -10,11 +10,13 @@ from scrapling.fetchers import Fetcher
from .base import BaseScraper
from .models import (
CombinedTest,
ContestListResult,
ContestSummary,
MetadataResult,
ProblemSummary,
TestCase,
TestsResult,
)
BASE_URL = "https://www.codechef.com"
@ -60,40 +62,42 @@ class CodeChefScraper(BaseScraper):
return "codechef"
async def scrape_contest_metadata(self, contest_id: str) -> MetadataResult:
try:
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient() as client:
try:
data = await fetch_json(
client, API_CONTEST.format(contest_id=contest_id)
)
if not data.get("problems"):
return self._metadata_error(
f"No problems found for contest {contest_id}"
except httpx.HTTPStatusError as e:
return self._create_metadata_error(
f"Failed to fetch contest {contest_id}: {e}", contest_id
)
problems = []
for problem_code, problem_data in data["problems"].items():
if problem_data.get("category_name") == "main":
problems.append(
ProblemSummary(
id=problem_code,
name=problem_data.get("name", problem_code),
)
)
return MetadataResult(
success=True,
error="",
contest_id=contest_id,
problems=problems,
url=f"{BASE_URL}/{contest_id}",
if not data.get("problems"):
return self._create_metadata_error(
f"No problems found for contest {contest_id}", contest_id
)
except Exception as e:
return self._metadata_error(f"Failed to fetch contest {contest_id}: {e}")
problems = []
for problem_code, problem_data in data["problems"].items():
if problem_data.get("category_name") == "main":
problems.append(
ProblemSummary(
id=problem_code,
name=problem_data.get("name", problem_code),
)
)
return MetadataResult(
success=True,
error="",
contest_id=contest_id,
problems=problems,
url=f"{BASE_URL}/{contest_id}",
)
async def scrape_contest_list(self) -> ContestListResult:
async with httpx.AsyncClient() as client:
try:
data = await fetch_json(client, API_CONTESTS_ALL)
except httpx.HTTPStatusError as e:
return self._contests_error(f"Failed to fetch contests: {e}")
return self._create_contests_error(f"Failed to fetch contests: {e}")
all_contests = data.get("future_contests", []) + data.get(
"past_contests", []
)
@ -106,7 +110,7 @@ class CodeChefScraper(BaseScraper):
num = int(match.group(1))
max_num = max(max_num, num)
if max_num == 0:
return self._contests_error("No Starters contests found")
return self._create_contests_error("No Starters contests found")
contests = []
sem = asyncio.Semaphore(CONNECTIONS)
@ -248,5 +252,68 @@ class CodeChefScraper(BaseScraper):
print(json.dumps(payload), flush=True)
async def main_async() -> int:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: codechef.py metadata <contest_id> OR codechef.py tests <contest_id> OR codechef.py contests",
url="",
)
print(result.model_dump_json())
return 1
mode: str = sys.argv[1]
scraper = CodeChefScraper()
if mode == "metadata":
if len(sys.argv) != 3:
result = MetadataResult(
success=False,
error="Usage: codechef.py metadata <contest_id>",
url="",
)
print(result.model_dump_json())
return 1
contest_id = sys.argv[2]
result = await scraper.scrape_contest_metadata(contest_id)
print(result.model_dump_json())
return 0 if result.success else 1
if mode == "tests":
if len(sys.argv) != 3:
tests_result = TestsResult(
success=False,
error="Usage: codechef.py tests <contest_id>",
problem_id="",
combined=CombinedTest(input="", expected=""),
tests=[],
timeout_ms=0,
memory_mb=0,
)
print(tests_result.model_dump_json())
return 1
contest_id = sys.argv[2]
await scraper.stream_tests_for_category_async(contest_id)
return 0
if mode == "contests":
if len(sys.argv) != 2:
contest_result = ContestListResult(
success=False, error="Usage: codechef.py contests"
)
print(contest_result.model_dump_json())
return 1
contest_result = await scraper.scrape_contest_list()
print(contest_result.model_dump_json())
return 0 if contest_result.success else 1
result = MetadataResult(
success=False,
error=f"Unknown mode: {mode}. Use 'metadata <contest_id>', 'tests <contest_id>', or 'contests'",
url="",
)
print(result.model_dump_json())
return 1
def main() -> None:
sys.exit(asyncio.run(main_async()))
if __name__ == "__main__":
CodeChefScraper().run_cli()
main()

View file

@ -4,6 +4,7 @@ import asyncio
import json
import logging
import re
import sys
from typing import Any
import requests
@ -12,11 +13,13 @@ from scrapling.fetchers import Fetcher
from .base import BaseScraper
from .models import (
CombinedTest,
ContestListResult,
ContestSummary,
MetadataResult,
ProblemSummary,
TestCase,
TestsResult,
)
# suppress scrapling logging - https://github.com/D4Vinci/Scrapling/issues/31)
@ -86,14 +89,14 @@ def _extract_samples(block: Tag) -> tuple[list[TestCase], bool]:
if not st:
return [], False
input_pres: list[Tag] = [
inp.find("pre")
for inp in st.find_all("div", class_="input")
input_pres: list[Tag] = [ # type: ignore[misc]
inp.find("pre") # type: ignore[misc]
for inp in st.find_all("div", class_="input") # type: ignore[union-attr]
if isinstance(inp, Tag) and inp.find("pre")
]
output_pres: list[Tag] = [
out.find("pre")
for out in st.find_all("div", class_="output")
out.find("pre") # type: ignore[misc]
for out in st.find_all("div", class_="output") # type: ignore[union-attr]
if isinstance(out, Tag) and out.find("pre")
]
input_pres = [p for p in input_pres if isinstance(p, Tag)]
@ -206,46 +209,49 @@ class CodeforcesScraper(BaseScraper):
return "codeforces"
async def scrape_contest_metadata(self, contest_id: str) -> MetadataResult:
try:
problems = await asyncio.to_thread(
_scrape_contest_problems_sync, contest_id
)
async def impl(cid: str) -> MetadataResult:
problems = await asyncio.to_thread(_scrape_contest_problems_sync, cid)
if not problems:
return self._metadata_error(
f"No problems found for contest {contest_id}"
return self._create_metadata_error(
f"No problems found for contest {cid}", cid
)
return MetadataResult(
success=True,
error="",
contest_id=contest_id,
contest_id=cid,
problems=problems,
url=f"https://codeforces.com/contest/{contest_id}/problem/%s",
)
except Exception as e:
return self._metadata_error(str(e))
return await self._safe_execute("metadata", impl, contest_id)
async def scrape_contest_list(self) -> ContestListResult:
try:
r = requests.get(API_CONTEST_LIST_URL, timeout=TIMEOUT_SECONDS)
r.raise_for_status()
data = r.json()
if data.get("status") != "OK":
return self._contests_error("Invalid API response")
async def impl() -> ContestListResult:
try:
r = requests.get(API_CONTEST_LIST_URL, timeout=TIMEOUT_SECONDS)
r.raise_for_status()
data = r.json()
if data.get("status") != "OK":
return self._create_contests_error("Invalid API response")
contests: list[ContestSummary] = []
for c in data["result"]:
if c.get("phase") != "FINISHED":
continue
cid = str(c["id"])
name = c["name"]
contests.append(ContestSummary(id=cid, name=name, display_name=name))
contests: list[ContestSummary] = []
for c in data["result"]:
if c.get("phase") != "FINISHED":
continue
cid = str(c["id"])
name = c["name"]
contests.append(
ContestSummary(id=cid, name=name, display_name=name)
)
if not contests:
return self._contests_error("No contests found")
if not contests:
return self._create_contests_error("No contests found")
return ContestListResult(success=True, error="", contests=contests)
except Exception as e:
return self._contests_error(str(e))
return ContestListResult(success=True, error="", contests=contests)
except Exception as e:
return self._create_contests_error(str(e))
return await self._safe_execute("contests", impl)
async def stream_tests_for_category_async(self, category_id: str) -> None:
html = await asyncio.to_thread(_fetch_problems_html, category_id)
@ -275,5 +281,73 @@ class CodeforcesScraper(BaseScraper):
)
async def main_async() -> int:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> OR codeforces.py contests",
url="",
)
print(result.model_dump_json())
return 1
mode: str = sys.argv[1]
scraper = CodeforcesScraper()
if mode == "metadata":
if len(sys.argv) != 3:
result = MetadataResult(
success=False,
error="Usage: codeforces.py metadata <contest_id>",
url="",
)
print(result.model_dump_json())
return 1
contest_id = sys.argv[2]
result = await scraper.scrape_contest_metadata(contest_id)
print(result.model_dump_json())
return 0 if result.success else 1
if mode == "tests":
if len(sys.argv) != 3:
tests_result = TestsResult(
success=False,
error="Usage: codeforces.py tests <contest_id>",
problem_id="",
combined=CombinedTest(input="", expected=""),
tests=[],
timeout_ms=0,
memory_mb=0,
)
print(tests_result.model_dump_json())
return 1
contest_id = sys.argv[2]
await scraper.stream_tests_for_category_async(contest_id)
return 0
if mode == "contests":
if len(sys.argv) != 2:
contest_result = ContestListResult(
success=False, error="Usage: codeforces.py contests"
)
print(contest_result.model_dump_json())
return 1
contest_result = await scraper.scrape_contest_list()
print(contest_result.model_dump_json())
return 0 if contest_result.success else 1
result = MetadataResult(
success=False,
error="Unknown mode. Use 'metadata <contest_id>', 'tests <contest_id>', or 'contests'",
url="",
)
print(result.model_dump_json())
return 1
def main() -> None:
sys.exit(asyncio.run(main_async()))
if __name__ == "__main__":
CodeforcesScraper().run_cli()
main()

View file

@ -3,17 +3,20 @@
import asyncio
import json
import re
import sys
from typing import Any
import httpx
from .base import BaseScraper
from .models import (
CombinedTest,
ContestListResult,
ContestSummary,
MetadataResult,
ProblemSummary,
TestCase,
TestsResult,
)
BASE_URL = "https://cses.fi"
@ -258,5 +261,73 @@ class CSESScraper(BaseScraper):
print(json.dumps(payload), flush=True)
async def main_async() -> int:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: cses.py metadata <category_id> OR cses.py tests <category> OR cses.py contests",
url="",
)
print(result.model_dump_json())
return 1
mode: str = sys.argv[1]
scraper = CSESScraper()
if mode == "metadata":
if len(sys.argv) != 3:
result = MetadataResult(
success=False,
error="Usage: cses.py metadata <category_id>",
url="",
)
print(result.model_dump_json())
return 1
category_id = sys.argv[2]
result = await scraper.scrape_contest_metadata(category_id)
print(result.model_dump_json())
return 0 if result.success else 1
if mode == "tests":
if len(sys.argv) != 3:
tests_result = TestsResult(
success=False,
error="Usage: cses.py tests <category>",
problem_id="",
combined=CombinedTest(input="", expected=""),
tests=[],
timeout_ms=0,
memory_mb=0,
)
print(tests_result.model_dump_json())
return 1
category = sys.argv[2]
await scraper.stream_tests_for_category_async(category)
return 0
if mode == "contests":
if len(sys.argv) != 2:
contest_result = ContestListResult(
success=False, error="Usage: cses.py contests"
)
print(contest_result.model_dump_json())
return 1
contest_result = await scraper.scrape_contest_list()
print(contest_result.model_dump_json())
return 0 if contest_result.success else 1
result = MetadataResult(
success=False,
error=f"Unknown mode: {mode}. Use 'metadata <category>', 'tests <category>', or 'contests'",
url="",
)
print(result.model_dump_json())
return 1
def main() -> None:
sys.exit(asyncio.run(main_async()))
if __name__ == "__main__":
CSESScraper().run_cli()
main()

View file

@ -232,35 +232,33 @@ def run_scraper_offline(fixture_text):
case _:
raise AssertionError(f"Unknown scraper: {scraper_name}")
scraper_classes = {
"cses": "CSESScraper",
"atcoder": "AtcoderScraper",
"codeforces": "CodeforcesScraper",
"codechef": "CodeChefScraper",
}
def _run(scraper_name: str, mode: str, *args: str):
mod_path = ROOT / "scrapers" / f"{scraper_name}.py"
ns = _load_scraper_module(mod_path, scraper_name)
offline_fetches = _make_offline_fetches(scraper_name)
if scraper_name == "codeforces":
fetchers.Fetcher.get = offline_fetches["Fetcher.get"]
fetchers.Fetcher.get = offline_fetches["Fetcher.get"] # type: ignore[assignment]
requests.get = offline_fetches["requests.get"]
elif scraper_name == "atcoder":
ns._fetch = offline_fetches["_fetch"]
ns._get_async = offline_fetches["_get_async"]
elif scraper_name == "cses":
httpx.AsyncClient.get = offline_fetches["__offline_fetch_text"]
httpx.AsyncClient.get = offline_fetches["__offline_fetch_text"] # type: ignore[assignment]
elif scraper_name == "codechef":
httpx.AsyncClient.get = offline_fetches["__offline_get_async"]
fetchers.Fetcher.get = offline_fetches["Fetcher.get"]
httpx.AsyncClient.get = offline_fetches["__offline_get_async"] # type: ignore[assignment]
fetchers.Fetcher.get = offline_fetches["Fetcher.get"] # type: ignore[assignment]
scraper_class = getattr(ns, scraper_classes[scraper_name])
scraper = scraper_class()
main_async = getattr(ns, "main_async")
assert callable(main_async), f"main_async not found in {scraper_name}"
argv = [str(mod_path), mode, *args]
rc, out = _capture_stdout(scraper._run_cli_async(argv))
old_argv = sys.argv
sys.argv = argv
try:
rc, out = _capture_stdout(main_async())
finally:
sys.argv = old_argv
json_lines: list[Any] = []
for line in (_line for _line in out.splitlines() if _line.strip()):