Merge main into feat/io/view-togggle

Resolved conflicts:
- scrapers/atcoder.py: kept defensive if tests else '' checks
- scrapers/codechef.py: kept defensive if tests else '' checks
- tests/test_scrapers.py: kept comprehensive validation from main
- lua/cp/ui/views.lua: removed misplaced navigation code from loop
This commit is contained in:
Barrett Ruth 2025-11-05 23:01:04 -05:00
commit 0e778a128e
7 changed files with 197 additions and 62 deletions

View file

@ -2,7 +2,7 @@ minimum_pre_commit_version: '3.5.0'
repos:
- repo: https://github.com/JohnnyMorganz/StyLua
rev: v2.1.0
rev: v2.3.1
hooks:
- id: stylua-github
name: stylua (Lua formatter)
@ -10,7 +10,7 @@ repos:
pass_filenames: true
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.9
rev: v0.14.3
hooks:
- id: ruff-format
name: ruff (format)
@ -30,7 +30,7 @@ repos:
pass_filenames: false
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.1.0
rev: v4.0.0-alpha.8
hooks:
- id: prettier
name: prettier (format markdown)

View file

@ -34,15 +34,30 @@ COMMANDS *cp-commands*
:CP codeforces 1933 --lang python
<
View Commands ~
:CP run [--debug] [n]
:CP run [all|n|n,m,...] [--debug]
Run tests in I/O view (see |cp-io-view|).
Lightweight split showing test verdicts.
Without [n]: runs all tests, shows verdict summary
With [n]: runs test n, shows detailed output
Execution modes:
• :CP run Combined: single execution with all tests
(auto-switches to individual when multiple samples)
• :CP run all Individual: N separate executions
• :CP run n Individual: run test n only
• :CP run n,m,... Individual: run specific tests (e.g. nth and mth)
--debug: Use debug build (builds to build/<name>.dbg)
Combined mode runs all test inputs in one execution (matching
platform behavior for multi-test problems). When a problem has
multiple independent sample test cases, :CP run auto-switches to
individual mode to run each sample separately.
Examples: >
:CP run " All tests
:CP run --debug 2 " Test 2, debug build
:CP run " Combined: all tests, one execution
:CP run all " Individual: all tests, N executions
:CP run 2 " Individual: test 2 only
:CP run 1,3,5 " Individual: tests 1, 3, and 5
:CP run all --debug " Individual with debug build
<
:CP panel [--debug] [n]
Open full-screen test panel (see |cp-panel|).
@ -536,10 +551,27 @@ Example: Setting up and solving AtCoder contest ABC324
I/O VIEW *cp-io-view*
The I/O view provides lightweight test feedback in persistent side splits.
All test outputs are concatenated with verdict summaries at the bottom.
Test outputs are concatenated with verdict summaries at the bottom.
The |cp-panel| offers more fine-grained analysis with diff modes.
Access the I/O view with :CP run [n]
Execution Modes ~
The I/O view supports two execution modes:
Combined Mode (:CP run with single sample)
• Single execution with all test inputs concatenated
• Matches platform behavior (e.g. Codeforces multi-test format)
• Shows one verdict for the entire execution
• Input split: All test inputs concatenated
• Output split: Single program output + verdict
• Used when problem has one sample containing multiple test cases
Individual Mode (:CP run all / :CP run n / :CP run n,m,...)
• Separate execution for each test case
• Per-test verdicts for debugging
• Input split: Selected test inputs concatenated
• Output split: All test outputs concatenated + per-test verdicts
• Auto-selected when problem has multiple independent samples
Layout ~
@ -552,7 +584,7 @@ The I/O view appears as 30% width splits on the right side: >
│ │ 7 714 │
│ Solution Code │ │
│ │ Test 1: WA | 212.07/2000 ms | 1/512 MB |...│
│ │ Test 2: WA | 81.94/2000 ms | 1/512 MB |...│
│ │ Test 2: WA | 81.94/2000 ms | 1/512 MB |...│
│ ├─────────────────────────────────────────────┤
│ │ Input (Bottom Split) │
│ │ 1 2 3 │
@ -561,7 +593,7 @@ The I/O view appears as 30% width splits on the right side: >
└──────────────────────────┴─────────────────────────────────────────────┘
<
The output split shows:
1. Concatenated test outputs (separated by blank lines)
1. Program output (raw, preserving all formatting)
2. Space-aligned verdict summary with:
- Test number and status (AC/WA/TLE/MLE/RTE with color highlighting)
- Runtime: actual/limit in milliseconds
@ -570,8 +602,10 @@ The output split shows:
Usage ~
:CP run Run all tests
:CP run 3 Run test 3 only
:CP run Combined mode: all tests in one execution
:CP run all Individual mode: all tests separately
:CP run 3 Individual mode: test 3 only
:CP run 1,3,5 Individual mode: specific tests (1, 3, and 5)
Navigation ~

View file

@ -17,8 +17,11 @@ local actions = constants.ACTIONS
---@field problem_id? string
---@field interactor_cmd? string
---@field test_index? integer
---@field test_indices? integer[]
---@field mode? string
---@field debug? boolean
---@field language? string
---@field subcommand? string
--- Turn raw args into normalized structure to later dispatch
---@param args string[] The raw command-line mode args
@ -75,7 +78,7 @@ local function parse_command(args)
return { type = 'action', action = 'edit', test_index = test_index }
elseif first == 'run' or first == 'panel' then
local debug = false
local test_index = nil
local test_indices = nil
local mode = 'combined'
if #args == 2 then
@ -84,20 +87,39 @@ local function parse_command(args)
elseif args[2] == 'all' then
mode = 'individual'
else
local idx = tonumber(args[2])
if not idx then
return {
type = 'error',
message = ("Invalid argument '%s': expected test number, 'all', or --debug"):format(
args[2]
),
}
if args[2]:find(',') then
local indices = {}
for num in args[2]:gmatch('[^,]+') do
local idx = tonumber(num)
if not idx or idx < 1 or idx ~= math.floor(idx) then
return {
type = 'error',
message = ("Invalid test index '%s' in list"):format(num),
}
end
table.insert(indices, idx)
end
if #indices == 0 then
return { type = 'error', message = 'No valid test indices provided' }
end
test_indices = indices
mode = 'individual'
else
local idx = tonumber(args[2])
if not idx then
return {
type = 'error',
message = ("Invalid argument '%s': expected test number(s), 'all', or --debug"):format(
args[2]
),
}
end
if idx < 1 or idx ~= math.floor(idx) then
return { type = 'error', message = ("'%s' is not a valid test index"):format(idx) }
end
test_indices = { idx }
mode = 'individual'
end
if idx < 1 or idx ~= math.floor(idx) then
return { type = 'error', message = ("'%s' is not a valid test index"):format(idx) }
end
test_index = idx
mode = 'individual'
end
elseif #args == 3 then
if args[2] == 'all' then
@ -109,6 +131,30 @@ local function parse_command(args)
}
end
debug = true
elseif args[2]:find(',') then
local indices = {}
for num in args[2]:gmatch('[^,]+') do
local idx = tonumber(num)
if not idx or idx < 1 or idx ~= math.floor(idx) then
return {
type = 'error',
message = ("Invalid test index '%s' in list"):format(num),
}
end
table.insert(indices, idx)
end
if #indices == 0 then
return { type = 'error', message = 'No valid test indices provided' }
end
if args[3] ~= '--debug' then
return {
type = 'error',
message = ("Invalid argument '%s': expected --debug"):format(args[3]),
}
end
test_indices = indices
mode = 'individual'
debug = true
else
local idx = tonumber(args[2])
if not idx then
@ -126,21 +172,23 @@ local function parse_command(args)
message = ("Invalid argument '%s': expected --debug"):format(args[3]),
}
end
test_index = idx
test_indices = { idx }
mode = 'individual'
debug = true
end
elseif #args > 3 then
return {
type = 'error',
message = 'Too many arguments. Usage: :CP ' .. first .. ' [all|test_num] [--debug]',
message = 'Too many arguments. Usage: :CP '
.. first
.. ' [all|test_num[,test_num...]] [--debug]',
}
end
return {
type = 'action',
action = first,
test_index = test_index,
test_indices = test_indices,
debug = debug,
mode = mode,
}
@ -221,9 +269,12 @@ function M.handle_command(opts)
if cmd.action == 'interact' then
ui.toggle_interactive(cmd.interactor_cmd)
elseif cmd.action == 'run' then
ui.run_io_view(cmd.test_index, cmd.debug, cmd.mode)
ui.run_io_view(cmd.test_indices, cmd.debug, cmd.mode)
elseif cmd.action == 'panel' then
ui.toggle_panel({ debug = cmd.debug, test_index = cmd.test_index })
ui.toggle_panel({
debug = cmd.debug,
test_index = cmd.test_indices and cmd.test_indices[1] or nil,
})
elseif cmd.action == 'next' then
setup.navigate_problem(1, cmd.language)
elseif cmd.action == 'prev' then

View file

@ -274,10 +274,25 @@ local function save_all_tests()
local is_multi_test = contest_data.problems[contest_data.index_map[problem_id]].multi_test
or false
-- Generate combined test from individual test cases
local combined_input = table.concat(
vim.tbl_map(function(tc)
return tc.input
end, edit_state.test_cases),
'\n'
)
local combined_expected = table.concat(
vim.tbl_map(function(tc)
return tc.expected
end, edit_state.test_cases),
'\n'
)
cache.set_test_cases(
platform,
contest_id,
problem_id,
{ input = combined_input, expected = combined_expected },
edit_state.test_cases,
edit_state.constraints and edit_state.constraints.timeout_ms or 0,
edit_state.constraints and edit_state.constraints.memory_mb or 0,

View file

@ -405,7 +405,7 @@ function M.ensure_io_view()
end
end
function M.run_io_view(test_index, debug, mode)
function M.run_io_view(test_indices_arg, debug, mode)
mode = mode or 'combined'
local platform, contest_id, problem_id =
@ -425,6 +425,13 @@ function M.run_io_view(test_index, debug, mode)
return
end
if mode == 'combined' then
local test_cases = cache.get_test_cases(platform, contest_id, problem_id)
if test_cases and #test_cases > 1 then
mode = 'individual'
end
end
M.ensure_io_view()
local run = require('cp.runner.run')
@ -447,19 +454,21 @@ function M.run_io_view(test_index, debug, mode)
if mode == 'individual' then
local test_state = run.get_panel_state()
if test_index then
if test_index < 1 or test_index > #test_state.test_cases then
logger.log(
string.format(
'Test %d does not exist (only %d tests available)',
test_index,
#test_state.test_cases
),
vim.log.levels.WARN
)
return
if test_indices_arg then
for _, idx in ipairs(test_indices_arg) do
if idx < 1 or idx > #test_state.test_cases then
logger.log(
string.format(
'Test %d does not exist (only %d tests available)',
idx,
#test_state.test_cases
),
vim.log.levels.WARN
)
return
end
end
test_indices = { test_index }
test_indices = test_indices_arg
else
for i = 1, #test_state.test_cases do
test_indices[i] = i
@ -512,6 +521,11 @@ function M.run_io_view(test_index, debug, mode)
if mode == 'combined' then
local combined = cache.get_combined_test(platform, contest_id, problem_id)
if not combined then
logger.log('No combined test found', vim.log.levels.ERROR)
return
end
run.load_test_cases()
local result = run.run_combined_test(debug)

View file

@ -71,7 +71,7 @@ def _retry_after_requests(details):
on_backoff=_retry_after_requests,
)
def _fetch(url: str) -> str:
r = _session.get(url, headers=HEADERS, timeout=TIMEOUT_SECONDS)
r = _session.get(url, headers=HEADERS, timeout=TIMEOUT_SECONDS, verify=False)
if r.status_code in RETRY_STATUS:
raise requests.HTTPError(response=r)
r.raise_for_status()
@ -243,7 +243,8 @@ def _to_problem_summaries(rows: list[dict[str, str]]) -> list[ProblemSummary]:
async def _fetch_all_contests_async() -> list[ContestSummary]:
async with httpx.AsyncClient(
limits=httpx.Limits(max_connections=100, max_keepalive_connections=100)
limits=httpx.Limits(max_connections=100, max_keepalive_connections=100),
verify=False,
) as client:
first_html = await _get_async(client, ARCHIVE_URL)
last = _parse_last_page(first_html)

View file

@ -58,18 +58,38 @@ def test_scraper_offline_fixture_matrix(run_scraper_offline, scraper, mode):
assert len(objs) >= 1, "No test objects returned"
validated_any = False
for obj in objs:
assert "problem_id" in obj, "Missing problem_id"
assert obj["problem_id"] != "", "Empty problem_id"
assert "combined" in obj, "Missing combined field"
assert isinstance(obj["combined"], dict), "combined must be a dict"
assert "input" in obj["combined"], "Missing combined.input"
assert "expected" in obj["combined"], "Missing combined.expected"
assert "tests" in obj, "Missing tests field"
assert isinstance(obj["tests"], list), "tests must be a list"
assert "timeout_ms" in obj, "Missing timeout_ms"
assert "memory_mb" in obj, "Missing memory_mb"
assert "interactive" in obj, "Missing interactive"
assert "multi_test" in obj, "Missing multi_test field"
assert isinstance(obj["multi_test"], bool), "multi_test must be bool"
validated_any = True
if "success" in obj and "tests" in obj and "problem_id" in obj:
tr = TestsResult.model_validate(obj)
assert tr.problem_id != ""
assert isinstance(tr.tests, list)
assert hasattr(tr, "combined"), "Missing combined field"
assert tr.combined is not None, "combined field is None"
assert hasattr(tr.combined, "input"), "combined missing input"
assert hasattr(tr.combined, "expected"), "combined missing expected"
assert isinstance(tr.combined.input, str), "combined.input not string"
assert isinstance(tr.combined.expected, str), (
"combined.expected not string"
)
assert hasattr(tr, "multi_test"), "Missing multi_test field"
assert isinstance(tr.multi_test, bool), "multi_test not boolean"
validated_any = True
else:
assert "problem_id" in obj
assert "tests" in obj and isinstance(obj["tests"], list)
assert (
"timeout_ms" in obj and "memory_mb" in obj and "interactive" in obj
)
assert "combined" in obj, "Missing combined field in raw JSON"
assert isinstance(obj["combined"], dict), "combined not a dict"
assert "input" in obj["combined"], "combined missing input key"
assert "expected" in obj["combined"], "combined missing expected key"
assert isinstance(obj["combined"]["input"], str), (
"combined.input not string"
)
assert isinstance(obj["combined"]["expected"], str), (
"combined.expected not string"
)
assert "multi_test" in obj, "Missing multi_test field in raw JSON"
assert isinstance(obj["multi_test"], bool), "multi_test not boolean"
validated_any = True
assert validated_any, "No valid tests payloads validated"