fix(ci): format

This commit is contained in:
Barrett Ruth 2025-09-14 00:05:23 -05:00
parent c1c9674503
commit 8394065169
3 changed files with 17 additions and 16 deletions

View file

@ -42,13 +42,12 @@ def scrape_contest_problems(contest_id: str):
# Extract problem letter from task name or URL # Extract problem letter from task name or URL
task_id = task_href.split("/")[-1] if task_href else "" task_id = task_href.split("/")[-1] if task_href else ""
if task_id.startswith(contest_id + "_"): if task_id.startswith(contest_id + "_"):
problem_letter = task_id[len(contest_id) + 1:] problem_letter = task_id[len(contest_id) + 1 :]
if problem_letter and task_name: if problem_letter and task_name:
problems.append({ problems.append(
"id": problem_letter.lower(), {"id": problem_letter.lower(), "name": task_name}
"name": task_name )
})
problems.sort(key=lambda x: x["id"]) problems.sort(key=lambda x: x["id"])
return problems return problems
@ -174,7 +173,9 @@ def main():
if test_cases: if test_cases:
combined_input = ( combined_input = (
str(len(test_cases)) + "\n" + "\n".join(tc["input"] for tc in test_cases) str(len(test_cases))
+ "\n"
+ "\n".join(tc["input"] for tc in test_cases)
) )
combined_output = "\n".join(tc["output"] for tc in test_cases) combined_output = "\n".join(tc["output"] for tc in test_cases)
test_cases = [{"input": combined_input, "output": combined_output}] test_cases = [{"input": combined_input, "output": combined_output}]

View file

@ -70,7 +70,9 @@ def scrape_contest_problems(contest_id: str):
soup = BeautifulSoup(response.text, "html.parser") soup = BeautifulSoup(response.text, "html.parser")
problems = [] problems = []
problem_links = soup.find_all("a", href=lambda x: x and f"/contest/{contest_id}/problem/" in x) problem_links = soup.find_all(
"a", href=lambda x: x and f"/contest/{contest_id}/problem/" in x
)
for link in problem_links: for link in problem_links:
href = link.get("href", "") href = link.get("href", "")
@ -79,10 +81,7 @@ def scrape_contest_problems(contest_id: str):
problem_name = link.get_text(strip=True) problem_name = link.get_text(strip=True)
if problem_letter and problem_name and len(problem_letter) == 1: if problem_letter and problem_name and len(problem_letter) == 1:
problems.append({ problems.append({"id": problem_letter, "name": problem_name})
"id": problem_letter,
"name": problem_name
})
problems.sort(key=lambda x: x["id"]) problems.sort(key=lambda x: x["id"])

View file

@ -29,7 +29,9 @@ def scrape_all_problems():
all_categories = {} all_categories = {}
# Find all problem links first # Find all problem links first
problem_links = soup.find_all("a", href=lambda x: x and "/problemset/task/" in x) problem_links = soup.find_all(
"a", href=lambda x: x and "/problemset/task/" in x
)
print(f"Found {len(problem_links)} problem links", file=sys.stderr) print(f"Found {len(problem_links)} problem links", file=sys.stderr)
# Group by categories - look for h1 elements that precede problem lists # Group by categories - look for h1 elements that precede problem lists
@ -45,10 +47,9 @@ def scrape_all_problems():
problem_name = element.get_text(strip=True) problem_name = element.get_text(strip=True)
if problem_id.isdigit() and problem_name and current_category: if problem_id.isdigit() and problem_name and current_category:
all_categories[current_category].append({ all_categories[current_category].append(
"id": problem_id, {"id": problem_id, "name": problem_name}
"name": problem_name )
})
# Sort problems in each category # Sort problems in each category
for category in all_categories: for category in all_categories: