Merge pull request #77 from barrett-ruth/feat/cses-categories

CSES Categories
This commit is contained in:
Barrett Ruth 2025-09-20 20:16:34 +02:00 committed by GitHub
commit b992e1e635
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 562 additions and 99 deletions

View file

@ -326,7 +326,7 @@ CSES ~
URL format: https://cses.fi/problemset/task/1068
CSES (Code Submission Evaluation System) is organized by problem categories
rather than traditional contests. All problems are accessible individually.
rather than traditional contests. Problems are grouped by topic and difficulty.
Platform characteristics:
• Organization: Category-based (Introductory, Sorting, Dynamic Programming)
@ -337,17 +337,17 @@ Platform characteristics:
In terms of cp.nvim, this corresponds to:
- Platform: cses
- Contest ID: Problem ID (1068) - used as both contest and problem identifier
- Problem ID: nil (not applicable for CSES structure)
- Contest ID: Category name (introductory_problems, sorting_and_searching)
- Problem ID: Problem number (1068, 1640)
Usage examples: >
:CP cses 1068 " Set up problem 1068 from CSES
:CP 1070 " Switch to problem 1070 (if CSES context loaded)
:CP next " Navigate to next problem in CSES sequence
:CP cses dynamic_programming 1633 " Set up problem 1633 from DP category
<
Note: CSES problems are treated as individual
Note: Both category and problem ID are required
entities rather than contest problems.
==============================================================================
COMPLETE WORKFLOW EXAMPLE *cp-example*
Example: Setting up and solving AtCoder contest ABC324

View file

@ -700,12 +700,11 @@ local function parse_command(args)
}
elseif #filtered_args == 2 then
if first == 'cses' then
return {
type = 'cses_problem',
platform = first,
problem = filtered_args[2],
language = language,
}
logger.log(
'CSES requires both category and problem ID. Usage: :CP cses <category> <problem_id>',
vim.log.levels.ERROR
)
return { type = 'error' }
else
return {
type = 'contest_setup',
@ -851,22 +850,6 @@ function M.handle_command(opts)
return
end
if cmd.type == 'cses_problem' then
if set_platform(cmd.platform) then
if vim.tbl_contains(config.scrapers, cmd.platform) then
local metadata_result = scrape.scrape_contest_metadata(cmd.platform, '')
if not metadata_result.success then
logger.log(
'failed to load contest metadata: ' .. (metadata_result.error or 'unknown error'),
vim.log.levels.WARN
)
end
end
setup_problem(cmd.problem, nil, cmd.language)
end
return
end
if cmd.type == 'problem_switch' then
if state.platform == 'cses' then
setup_problem(cmd.problem, nil, cmd.language)

View file

@ -89,29 +89,16 @@ function M.scrape_contest_metadata(platform, contest_id)
local plugin_path = get_plugin_path()
local args
if platform == 'cses' then
args = {
'uv',
'run',
'--directory',
plugin_path,
'-m',
'scrapers.' .. platform,
'metadata',
}
else
args = {
'uv',
'run',
'--directory',
plugin_path,
'-m',
'scrapers.' .. platform,
'metadata',
contest_id,
}
end
local args = {
'uv',
'run',
'--directory',
plugin_path,
'-m',
'scrapers.' .. platform,
'metadata',
contest_id,
}
local result = vim
.system(args, {
@ -140,12 +127,7 @@ function M.scrape_contest_metadata(platform, contest_id)
return data
end
local problems_list
if platform == 'cses' then
problems_list = data.categories and data.categories['CSES Problem Set'] or {}
else
problems_list = data.problems or {}
end
local problems_list = data.problems or {}
cache.set_contest_data(platform, contest_id, problems_list)
return {
@ -223,7 +205,7 @@ function M.scrape_problem(ctx)
'-m',
'scrapers.' .. ctx.contest,
'tests',
ctx.contest_id,
ctx.problem_id,
}
else
args = {

View file

@ -3,12 +3,20 @@
import json
import re
import sys
import time
from dataclasses import asdict
import requests
from bs4 import BeautifulSoup, Tag
from .models import MetadataResult, ProblemSummary, TestCase, TestsResult
from .models import (
ContestListResult,
ContestSummary,
MetadataResult,
ProblemSummary,
TestCase,
TestsResult,
)
def extract_problem_limits(soup: BeautifulSoup) -> tuple[int, float]:
@ -159,11 +167,78 @@ def scrape(url: str) -> list[TestCase]:
return []
def scrape_contests() -> list[ContestSummary]:
contests = []
max_pages = 15
for page in range(1, max_pages + 1):
try:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
url = f"https://atcoder.jp/contests/archive?page={page}"
response = requests.get(url, headers=headers, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
table = soup.find("table", class_="table")
if not table:
break
tbody = table.find("tbody")
if not tbody or not isinstance(tbody, Tag):
break
rows = tbody.find_all("tr")
if not rows:
break
for row in rows:
cells = row.find_all("td")
if len(cells) < 2:
continue
contest_cell = cells[1]
link = contest_cell.find("a")
if not link or not link.get("href"):
continue
href = link.get("href")
contest_id = href.split("/")[-1]
name = link.get_text().strip()
display_name = name
if "AtCoder Beginner Contest" in name:
match = re.search(r"AtCoder Beginner Contest (\d+)", name)
if match:
display_name = f"Beginner Contest {match.group(1)} (ABC)"
elif "AtCoder Regular Contest" in name:
match = re.search(r"AtCoder Regular Contest (\d+)", name)
if match:
display_name = f"Regular Contest {match.group(1)} (ARC)"
elif "AtCoder Grand Contest" in name:
match = re.search(r"AtCoder Grand Contest (\d+)", name)
if match:
display_name = f"Grand Contest {match.group(1)} (AGC)"
contests.append(
ContestSummary(id=contest_id, name=name, display_name=display_name)
)
time.sleep(0.5)
except Exception as e:
print(f"Failed to scrape page {page}: {e}", file=sys.stderr)
continue
return contests
def main() -> None:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: atcoder.py metadata <contest_id> OR atcoder.py tests <contest_id> <problem_letter>",
error="Usage: atcoder.py metadata <contest_id> OR atcoder.py tests <contest_id> <problem_letter> OR atcoder.py contests",
)
print(json.dumps(asdict(result)))
sys.exit(1)
@ -264,10 +339,27 @@ def main() -> None:
)
print(json.dumps(asdict(tests_result)))
elif mode == "contests":
if len(sys.argv) != 2:
contest_result = ContestListResult(
success=False, error="Usage: atcoder.py contests"
)
print(json.dumps(asdict(contest_result)))
sys.exit(1)
contests = scrape_contests()
if not contests:
contest_result = ContestListResult(success=False, error="No contests found")
print(json.dumps(asdict(contest_result)))
sys.exit(1)
contest_result = ContestListResult(success=True, error="", contests=contests)
print(json.dumps(asdict(contest_result)))
else:
result = MetadataResult(
success=False,
error=f"Unknown mode: {mode}. Use 'metadata' or 'tests'",
error=f"Unknown mode: {mode}. Use 'metadata', 'tests', or 'contests'",
)
print(json.dumps(asdict(result)))
sys.exit(1)

View file

@ -7,7 +7,14 @@ from dataclasses import asdict
import cloudscraper
from bs4 import BeautifulSoup, Tag
from .models import MetadataResult, ProblemSummary, TestCase, TestsResult
from .models import (
ContestListResult,
ContestSummary,
MetadataResult,
ProblemSummary,
TestCase,
TestsResult,
)
def scrape(url: str) -> list[TestCase]:
@ -218,11 +225,54 @@ def scrape_sample_tests(url: str) -> list[TestCase]:
return scrape(url)
def scrape_contests() -> list[ContestSummary]:
try:
scraper = cloudscraper.create_scraper()
response = scraper.get("https://codeforces.com/api/contest.list", timeout=10)
response.raise_for_status()
data = response.json()
if data["status"] != "OK":
return []
contests = []
for contest in data["result"]:
contest_id = str(contest["id"])
name = contest["name"]
# Clean up contest names for display
display_name = name
if "Educational Codeforces Round" in name:
import re
match = re.search(r"Educational Codeforces Round (\d+)", name)
if match:
display_name = f"Educational Round {match.group(1)}"
elif "Codeforces Round" in name and "Div" in name:
match = re.search(r"Codeforces Round (\d+) \(Div\. (\d+)\)", name)
if match:
display_name = f"Round {match.group(1)} (Div. {match.group(2)})"
elif "Codeforces Global Round" in name:
match = re.search(r"Codeforces Global Round (\d+)", name)
if match:
display_name = f"Global Round {match.group(1)}"
contests.append(
ContestSummary(id=contest_id, name=name, display_name=display_name)
)
return contests[:100] # Limit to recent 100 contests
except Exception as e:
print(f"Failed to fetch contests: {e}", file=sys.stderr)
return []
def main() -> None:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> <problem_letter>",
error="Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> <problem_letter> OR codeforces.py contests",
)
print(json.dumps(asdict(result)))
sys.exit(1)
@ -316,9 +366,27 @@ def main() -> None:
)
print(json.dumps(asdict(tests_result)))
elif mode == "contests":
if len(sys.argv) != 2:
contest_result = ContestListResult(
success=False, error="Usage: codeforces.py contests"
)
print(json.dumps(asdict(contest_result)))
sys.exit(1)
contests = scrape_contests()
if not contests:
contest_result = ContestListResult(success=False, error="No contests found")
print(json.dumps(asdict(contest_result)))
sys.exit(1)
contest_result = ContestListResult(success=True, error="", contests=contests)
print(json.dumps(asdict(contest_result)))
else:
result = MetadataResult(
success=False, error=f"Unknown mode: {mode}. Use 'metadata' or 'tests'"
success=False,
error=f"Unknown mode: {mode}. Use 'metadata', 'tests', or 'contests'",
)
print(json.dumps(asdict(result)))
sys.exit(1)

View file

@ -11,6 +11,85 @@ from bs4 import BeautifulSoup, Tag
from .models import MetadataResult, ProblemSummary, TestCase, TestsResult
def normalize_category_name(category_name: str) -> str:
return category_name.lower().replace(" ", "_").replace("&", "and")
def denormalize_category_name(category_id: str) -> str:
category_map = {
"introductory_problems": "Introductory Problems",
"sorting_and_searching": "Sorting and Searching",
"dynamic_programming": "Dynamic Programming",
"graph_algorithms": "Graph Algorithms",
"range_queries": "Range Queries",
"tree_algorithms": "Tree Algorithms",
"mathematics": "Mathematics",
"string_algorithms": "String Algorithms",
"geometry": "Geometry",
"advanced_techniques": "Advanced Techniques",
}
return category_map.get(category_id, category_id.replace("_", " ").title())
def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
category_name = denormalize_category_name(category_id)
try:
problemset_url = "https://cses.fi/problemset/"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
response = requests.get(problemset_url, headers=headers, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
current_category = None
problems = []
target_found = False
for element in soup.find_all(["h1", "h2", "ul"]):
if not isinstance(element, Tag):
continue
if element.name in ["h1", "h2"]:
text = element.get_text(strip=True)
if not text or text.startswith("CSES") or text == "CSES Problem Set":
continue
if target_found and current_category != text:
break
current_category = text
if text.lower() == category_name.lower():
target_found = True
elif element.name == "ul" and current_category and target_found:
problem_links = element.find_all(
"a", href=lambda x: x and "/problemset/task/" in x
)
for link in problem_links:
href = link.get("href", "")
if not href:
continue
problem_id = href.split("/")[-1]
problem_name = link.get_text(strip=True)
if not problem_id.isdigit() or not problem_name:
continue
problems.append(ProblemSummary(id=problem_id, name=problem_name))
problems.sort(key=lambda x: int(x.id))
return problems
except Exception as e:
print(f"Failed to scrape CSES category {category_id}: {e}", file=sys.stderr)
return []
def parse_problem_url(problem_input: str) -> str | None:
if problem_input.startswith("https://cses.fi/problemset/task/"):
return problem_input
@ -94,21 +173,39 @@ def scrape_all_problems() -> dict[str, list[ProblemSummary]]:
soup = BeautifulSoup(response.text, "html.parser")
all_categories: dict[str, list[ProblemSummary]] = {}
problem_links = soup.find_all(
"a", href=lambda x: x and "/problemset/task/" in x
)
print(f"Found {len(problem_links)} problem links", file=sys.stderr)
current_category = None
for element in soup.find_all(["h1", "a"]):
current_category = process_problem_element(
element, current_category, all_categories
)
for element in soup.find_all(["h1", "h2", "ul"]):
if not isinstance(element, Tag):
continue
if element.name in ["h1", "h2"]:
text = element.get_text(strip=True)
if text and not text.startswith("CSES") and text != "CSES Problem Set":
current_category = text
if current_category not in all_categories:
all_categories[current_category] = []
print(f"Found category: {current_category}", file=sys.stderr)
elif element.name == "ul" and current_category:
problem_links = element.find_all(
"a", href=lambda x: x and "/problemset/task/" in x
)
for link in problem_links:
href = link.get("href", "")
if href:
problem_id = href.split("/")[-1]
problem_name = link.get_text(strip=True)
if problem_id.isdigit() and problem_name:
problem = ProblemSummary(id=problem_id, name=problem_name)
all_categories[current_category].append(problem)
for category in all_categories:
all_categories[category].sort(key=lambda x: int(x.id))
print(f"Found {len(all_categories)} categories", file=sys.stderr)
print(
f"Found {len(all_categories)} categories with {sum(len(probs) for probs in all_categories.values())} problems",
file=sys.stderr,
)
return all_categories
except Exception as e:
@ -170,7 +267,7 @@ def main() -> None:
if len(sys.argv) < 2:
result = MetadataResult(
success=False,
error="Usage: cses.py metadata OR cses.py tests <problem_id_or_url>",
error="Usage: cses.py metadata <category_id> OR cses.py tests <problem_id_or_url>",
)
print(json.dumps(asdict(result)))
sys.exit(1)
@ -178,25 +275,26 @@ def main() -> None:
mode: str = sys.argv[1]
if mode == "metadata":
if len(sys.argv) != 2:
if len(sys.argv) != 3:
result = MetadataResult(
success=False,
error="Usage: cses.py metadata",
error="Usage: cses.py metadata <category_id>",
)
print(json.dumps(asdict(result)))
sys.exit(1)
all_categories: dict[str, list[ProblemSummary]] = scrape_all_problems()
category_id = sys.argv[2]
problems = scrape_category_problems(category_id)
if not all_categories:
if not problems:
result = MetadataResult(
success=False,
error="Failed to scrape CSES problem categories",
error=f"No problems found for category: {category_id}",
)
print(json.dumps(asdict(result)))
sys.exit(1)
return
result = MetadataResult(success=True, error="", categories=all_categories)
result = MetadataResult(success=True, error="", problems=problems)
print(json.dumps(asdict(result)))
elif mode == "tests":

View file

@ -13,6 +13,13 @@ class ProblemSummary:
name: str
@dataclass
class ContestSummary:
id: str
name: str
display_name: str
@dataclass
class ScrapingResult:
success: bool
@ -26,6 +33,11 @@ class MetadataResult(ScrapingResult):
categories: dict[str, list[ProblemSummary]] = field(default_factory=dict)
@dataclass
class ContestListResult(ScrapingResult):
contests: list[ContestSummary] = field(default_factory=list)
@dataclass
class TestsResult(ScrapingResult):
problem_id: str

View file

@ -96,7 +96,7 @@ describe('cp command parsing', function()
end)
it('handles cses problem command', function()
local opts = { fargs = { 'cses', '1234' } }
local opts = { fargs = { 'cses', 'sorting_and_searching', '1234' } }
assert.has_no_errors(function()
cp.handle_command(opts)

View file

@ -214,7 +214,7 @@ describe('cp.scrape', function()
end)
it('constructs correct command for cses metadata', function()
scrape.scrape_contest_metadata('cses', 'problemset')
scrape.scrape_contest_metadata('cses', 'sorting_and_searching')
local metadata_call = nil
for _, call in ipairs(mock_system_calls) do
@ -227,7 +227,7 @@ describe('cp.scrape', function()
assert.is_not_nil(metadata_call)
assert.equals('uv', metadata_call.cmd[1])
assert.is_true(vim.tbl_contains(metadata_call.cmd, 'metadata'))
assert.is_false(vim.tbl_contains(metadata_call.cmd, 'problemset'))
assert.is_true(vim.tbl_contains(metadata_call.cmd, 'sorting_and_searching'))
end)
it('handles subprocess execution failure', function()
@ -380,8 +380,8 @@ describe('cp.scrape', function()
it('constructs correct command for cses problem tests', function()
test_context.contest = 'cses'
test_context.contest_id = '1001'
test_context.problem_id = nil
test_context.contest_id = 'sorting_and_searching'
test_context.problem_id = '1001'
scrape.scrape_problem(test_context)
@ -396,7 +396,7 @@ describe('cp.scrape', function()
assert.is_not_nil(tests_call)
assert.is_true(vim.tbl_contains(tests_call.cmd, 'tests'))
assert.is_true(vim.tbl_contains(tests_call.cmd, '1001'))
assert.is_false(vim.tbl_contains(tests_call.cmd, 'a'))
assert.is_false(vim.tbl_contains(tests_call.cmd, 'sorting_and_searching'))
end)
end)

View file

@ -1,6 +1,7 @@
from unittest.mock import Mock
from scrapers.atcoder import scrape, scrape_contest_problems
from scrapers.models import ProblemSummary
from scrapers.atcoder import scrape, scrape_contest_problems, scrape_contests
from scrapers.models import ContestSummary, ProblemSummary
def test_scrape_success(mocker, mock_atcoder_html):
@ -49,3 +50,81 @@ def test_scrape_network_error(mocker):
result = scrape("https://atcoder.jp/contests/abc350/tasks/abc350_a")
assert result == []
def test_scrape_contests_success(mocker):
def mock_get_side_effect(url, **kwargs):
if "page=1" in url:
mock_response = Mock()
mock_response.text = """
<table class="table table-default table-striped table-hover table-condensed table-bordered small">
<thead>
<tr>
<th>Start Time</th>
<th>Contest Name</th>
<th>Duration</th>
<th>Rated Range</th>
</tr>
</thead>
<tbody>
<tr>
<td>2025-01-15 21:00:00+0900</td>
<td><a href="/contests/abc350">AtCoder Beginner Contest 350</a></td>
<td>01:40</td>
<td> - 1999</td>
</tr>
<tr>
<td>2025-01-14 21:00:00+0900</td>
<td><a href="/contests/arc170">AtCoder Regular Contest 170</a></td>
<td>02:00</td>
<td>1000 - 2799</td>
</tr>
</tbody>
</table>
"""
return mock_response
else:
# Return empty page for all other pages
mock_response = Mock()
mock_response.text = "<html><body>No table found</body></html>"
return mock_response
mocker.patch("scrapers.atcoder.requests.get", side_effect=mock_get_side_effect)
mocker.patch("scrapers.atcoder.time.sleep")
result = scrape_contests()
assert len(result) == 2
assert result[0] == ContestSummary(
id="abc350",
name="AtCoder Beginner Contest 350",
display_name="Beginner Contest 350 (ABC)",
)
assert result[1] == ContestSummary(
id="arc170",
name="AtCoder Regular Contest 170",
display_name="Regular Contest 170 (ARC)",
)
def test_scrape_contests_no_table(mocker):
mock_response = Mock()
mock_response.text = "<html><body>No table found</body></html>"
mocker.patch("scrapers.atcoder.requests.get", return_value=mock_response)
mocker.patch("scrapers.atcoder.time.sleep")
result = scrape_contests()
assert result == []
def test_scrape_contests_network_error(mocker):
mocker.patch(
"scrapers.atcoder.requests.get", side_effect=Exception("Network error")
)
mocker.patch("scrapers.atcoder.time.sleep")
result = scrape_contests()
assert result == []

View file

@ -1,6 +1,7 @@
from unittest.mock import Mock
from scrapers.codeforces import scrape, scrape_contest_problems
from scrapers.models import ProblemSummary
from scrapers.codeforces import scrape, scrape_contest_problems, scrape_contests
from scrapers.models import ContestSummary, ProblemSummary
def test_scrape_success(mocker, mock_codeforces_html):
@ -51,3 +52,66 @@ def test_scrape_network_error(mocker):
result = scrape("https://codeforces.com/contest/1900/problem/A")
assert result == []
def test_scrape_contests_success(mocker):
mock_scraper = Mock()
mock_response = Mock()
mock_response.json.return_value = {
"status": "OK",
"result": [
{"id": 1951, "name": "Educational Codeforces Round 168 (Rated for Div. 2)"},
{"id": 1950, "name": "Codeforces Round 936 (Div. 2)"},
{"id": 1949, "name": "Codeforces Global Round 26"},
],
}
mock_scraper.get.return_value = mock_response
mocker.patch(
"scrapers.codeforces.cloudscraper.create_scraper", return_value=mock_scraper
)
result = scrape_contests()
assert len(result) == 3
assert result[0] == ContestSummary(
id="1951",
name="Educational Codeforces Round 168 (Rated for Div. 2)",
display_name="Educational Round 168",
)
assert result[1] == ContestSummary(
id="1950",
name="Codeforces Round 936 (Div. 2)",
display_name="Round 936 (Div. 2)",
)
assert result[2] == ContestSummary(
id="1949", name="Codeforces Global Round 26", display_name="Global Round 26"
)
def test_scrape_contests_api_error(mocker):
mock_scraper = Mock()
mock_response = Mock()
mock_response.json.return_value = {"status": "FAILED", "result": []}
mock_scraper.get.return_value = mock_response
mocker.patch(
"scrapers.codeforces.cloudscraper.create_scraper", return_value=mock_scraper
)
result = scrape_contests()
assert result == []
def test_scrape_contests_network_error(mocker):
mock_scraper = Mock()
mock_scraper.get.side_effect = Exception("Network error")
mocker.patch(
"scrapers.codeforces.cloudscraper.create_scraper", return_value=mock_scraper
)
result = scrape_contests()
assert result == []

View file

@ -1,5 +1,12 @@
from unittest.mock import Mock
from scrapers.cses import scrape, scrape_all_problems
from scrapers.cses import (
denormalize_category_name,
normalize_category_name,
scrape,
scrape_all_problems,
scrape_category_problems,
)
from scrapers.models import ProblemSummary
@ -19,12 +26,19 @@ def test_scrape_success(mocker, mock_cses_html):
def test_scrape_all_problems(mocker):
mock_response = Mock()
mock_response.text = """
<h1>Introductory Problems</h1>
<a href="/problemset/task/1068">Weird Algorithm</a>
<a href="/problemset/task/1083">Missing Number</a>
<h1>Sorting and Searching</h1>
<a href="/problemset/task/1084">Apartments</a>
<div class="content">
<h1>Introductory Problems</h1>
<ul>
<li><a href="/problemset/task/1068">Weird Algorithm</a></li>
<li><a href="/problemset/task/1083">Missing Number</a></li>
</ul>
<h1>Sorting and Searching</h1>
<ul>
<li><a href="/problemset/task/1084">Apartments</a></li>
</ul>
</div>
"""
mock_response.raise_for_status = Mock()
mocker.patch("scrapers.cses.requests.get", return_value=mock_response)
@ -45,3 +59,74 @@ def test_scrape_network_error(mocker):
result = scrape("https://cses.fi/problemset/task/1068")
assert result == []
def test_normalize_category_name():
assert normalize_category_name("Sorting and Searching") == "sorting_and_searching"
assert normalize_category_name("Dynamic Programming") == "dynamic_programming"
assert normalize_category_name("Graph Algorithms") == "graph_algorithms"
def test_denormalize_category_name():
assert denormalize_category_name("sorting_and_searching") == "Sorting and Searching"
assert denormalize_category_name("dynamic_programming") == "Dynamic Programming"
assert denormalize_category_name("graph_algorithms") == "Graph Algorithms"
def test_scrape_category_problems_success(mocker):
mock_response = Mock()
mock_response.text = """
<div class="content">
<h1>General</h1>
<ul>
<li><a href="/problemset/task/1000">Test Problem</a></li>
</ul>
<h1>Sorting and Searching</h1>
<ul>
<li><a href="/problemset/task/1640">Sum of Two Values</a></li>
<li><a href="/problemset/task/1643">Maximum Subarray Sum</a></li>
</ul>
<h1>Dynamic Programming</h1>
<ul>
<li><a href="/problemset/task/1633">Dice Combinations</a></li>
</ul>
</div>
"""
mock_response.raise_for_status = Mock()
mocker.patch("scrapers.cses.requests.get", return_value=mock_response)
result = scrape_category_problems("sorting_and_searching")
assert len(result) == 2
assert result[0].id == "1640"
assert result[0].name == "Sum of Two Values"
assert result[1].id == "1643"
assert result[1].name == "Maximum Subarray Sum"
def test_scrape_category_problems_not_found(mocker):
mock_response = Mock()
mock_response.text = """
<div class="content">
<h1>Some Other Category</h1>
<ul>
<li><a href="/problemset/task/1000">Test Problem</a></li>
</ul>
</div>
"""
mock_response.raise_for_status = Mock()
mocker.patch("scrapers.cses.requests.get", return_value=mock_response)
result = scrape_category_problems("nonexistent_category")
assert result == []
def test_scrape_category_problems_network_error(mocker):
mocker.patch("scrapers.cses.requests.get", side_effect=Exception("Network error"))
result = scrape_category_problems("sorting_and_searching")
assert result == []