feat: caching

This commit is contained in:
Barrett Ruth 2025-09-13 23:46:37 -05:00
parent 64c7559c78
commit 40117c2cf1
10 changed files with 764 additions and 175 deletions

View file

@ -12,6 +12,52 @@ def parse_problem_url(contest_id: str, problem_letter: str) -> str:
return f"https://atcoder.jp/contests/{contest_id}/tasks/{task_id}"
def scrape_contest_problems(contest_id: str):
try:
contest_url = f"https://atcoder.jp/contests/{contest_id}/tasks"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
response = requests.get(contest_url, headers=headers, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
problems = []
task_table = soup.find("table", class_="table")
if not task_table:
return []
rows = task_table.find_all("tr")[1:] # Skip header row
for row in rows:
cells = row.find_all("td")
if len(cells) >= 2:
task_link = cells[1].find("a")
if task_link:
task_name = task_link.get_text(strip=True)
task_href = task_link.get("href", "")
# Extract problem letter from task name or URL
task_id = task_href.split("/")[-1] if task_href else ""
if task_id.startswith(contest_id + "_"):
problem_letter = task_id[len(contest_id) + 1:]
if problem_letter and task_name:
problems.append({
"id": problem_letter.lower(),
"name": task_name
})
problems.sort(key=lambda x: x["id"])
return problems
except Exception as e:
print(f"Failed to scrape AtCoder contest problems: {e}", file=sys.stderr)
return []
def scrape(url: str) -> list[tuple[str, str]]:
try:
headers = {
@ -57,54 +103,98 @@ def scrape(url: str) -> list[tuple[str, str]]:
def main():
if len(sys.argv) != 3:
if len(sys.argv) < 2:
result = {
"success": False,
"error": "Usage: atcoder.py <contest_id> <problem_letter>",
"problem_id": None,
"error": "Usage: atcoder.py metadata <contest_id> OR atcoder.py tests <contest_id> <problem_letter>",
}
print(json.dumps(result))
sys.exit(1)
contest_id = sys.argv[1]
problem_letter = sys.argv[2]
problem_id = contest_id + problem_letter.lower()
mode = sys.argv[1]
url = parse_problem_url(contest_id, problem_letter)
print(f"Scraping: {url}", file=sys.stderr)
if mode == "metadata":
if len(sys.argv) != 3:
result = {
"success": False,
"error": "Usage: atcoder.py metadata <contest_id>",
}
print(json.dumps(result))
sys.exit(1)
tests = scrape(url)
contest_id = sys.argv[2]
problems = scrape_contest_problems(contest_id)
if not problems:
result = {
"success": False,
"error": f"No problems found for contest {contest_id}",
}
print(json.dumps(result))
sys.exit(1)
if not tests:
result = {
"success": False,
"error": f"No tests found for {contest_id} {problem_letter}",
"success": True,
"contest_id": contest_id,
"problems": problems,
}
print(json.dumps(result))
elif mode == "tests":
if len(sys.argv) != 4:
result = {
"success": False,
"error": "Usage: atcoder.py tests <contest_id> <problem_letter>",
}
print(json.dumps(result))
sys.exit(1)
contest_id = sys.argv[2]
problem_letter = sys.argv[3]
problem_id = contest_id + problem_letter.lower()
url = parse_problem_url(contest_id, problem_letter)
print(f"Scraping: {url}", file=sys.stderr)
tests = scrape(url)
if not tests:
result = {
"success": False,
"error": f"No tests found for {contest_id} {problem_letter}",
"problem_id": problem_id,
"url": url,
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
if test_cases:
combined_input = (
str(len(test_cases)) + "\n" + "\n".join(tc["input"] for tc in test_cases)
)
combined_output = "\n".join(tc["output"] for tc in test_cases)
test_cases = [{"input": combined_input, "output": combined_output}]
result = {
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
else:
result = {
"success": False,
"error": f"Unknown mode: {mode}. Use 'metadata' or 'tests'",
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
if test_cases:
combined_input = (
str(len(test_cases)) + "\n" + "\n".join(tc["input"] for tc in test_cases)
)
combined_output = "\n".join(tc["output"] for tc in test_cases)
test_cases = [{"input": combined_input, "output": combined_output}]
result = {
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
if __name__ == "__main__":
main()

View file

@ -60,51 +60,135 @@ def parse_problem_url(contest_id: str, problem_letter: str) -> str:
)
def scrape_contest_problems(contest_id: str):
try:
contest_url = f"https://codeforces.com/contest/{contest_id}"
scraper = cloudscraper.create_scraper()
response = scraper.get(contest_url, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
problems = []
problem_links = soup.find_all("a", href=lambda x: x and f"/contest/{contest_id}/problem/" in x)
for link in problem_links:
href = link.get("href", "")
if f"/contest/{contest_id}/problem/" in href:
problem_letter = href.split("/")[-1].lower()
problem_name = link.get_text(strip=True)
if problem_letter and problem_name and len(problem_letter) == 1:
problems.append({
"id": problem_letter,
"name": problem_name
})
problems.sort(key=lambda x: x["id"])
seen = set()
unique_problems = []
for p in problems:
if p["id"] not in seen:
seen.add(p["id"])
unique_problems.append(p)
return unique_problems
except Exception as e:
print(f"Failed to scrape contest problems: {e}", file=sys.stderr)
return []
def scrape_sample_tests(url: str):
print(f"Scraping: {url}", file=sys.stderr)
return scrape(url)
def main():
if len(sys.argv) != 3:
if len(sys.argv) < 2:
result = {
"success": False,
"error": "Usage: codeforces.py <contest_id> <problem_letter>",
"problem_id": None,
"error": "Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> <problem_letter>",
}
print(json.dumps(result))
sys.exit(1)
contest_id = sys.argv[1]
problem_letter = sys.argv[2]
problem_id = contest_id + problem_letter.lower()
mode = sys.argv[1]
url = parse_problem_url(contest_id, problem_letter)
tests = scrape_sample_tests(url)
if mode == "metadata":
if len(sys.argv) != 3:
result = {
"success": False,
"error": "Usage: codeforces.py metadata <contest_id>",
}
print(json.dumps(result))
sys.exit(1)
contest_id = sys.argv[2]
problems = scrape_contest_problems(contest_id)
if not problems:
result = {
"success": False,
"error": f"No problems found for contest {contest_id}",
}
print(json.dumps(result))
sys.exit(1)
if not tests:
result = {
"success": False,
"error": f"No tests found for {contest_id} {problem_letter}",
"success": True,
"contest_id": contest_id,
"problems": problems,
}
print(json.dumps(result))
elif mode == "tests":
if len(sys.argv) != 4:
result = {
"success": False,
"error": "Usage: codeforces.py tests <contest_id> <problem_letter>",
}
print(json.dumps(result))
sys.exit(1)
contest_id = sys.argv[2]
problem_letter = sys.argv[3]
problem_id = contest_id + problem_letter.lower()
url = parse_problem_url(contest_id, problem_letter)
tests = scrape_sample_tests(url)
if not tests:
result = {
"success": False,
"error": f"No tests found for {contest_id} {problem_letter}",
"problem_id": problem_id,
"url": url,
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
result = {
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
else:
result = {
"success": False,
"error": f"Unknown mode: {mode}. Use 'metadata' or 'tests'",
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
result = {
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
if __name__ == "__main__":
main()

View file

@ -15,6 +15,53 @@ def parse_problem_url(problem_input: str) -> str | None:
return None
def scrape_all_problems():
try:
problemset_url = "https://cses.fi/problemset/"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
}
response = requests.get(problemset_url, headers=headers, timeout=10)
response.raise_for_status()
soup = BeautifulSoup(response.text, "html.parser")
all_categories = {}
# Find all problem links first
problem_links = soup.find_all("a", href=lambda x: x and "/problemset/task/" in x)
print(f"Found {len(problem_links)} problem links", file=sys.stderr)
# Group by categories - look for h1 elements that precede problem lists
current_category = None
for element in soup.find_all(["h1", "a"]):
if element.name == "h1":
current_category = element.get_text().strip()
if current_category not in all_categories:
all_categories[current_category] = []
elif element.name == "a" and "/problemset/task/" in element.get("href", ""):
href = element.get("href", "")
problem_id = href.split("/")[-1]
problem_name = element.get_text(strip=True)
if problem_id.isdigit() and problem_name and current_category:
all_categories[current_category].append({
"id": problem_id,
"name": problem_name
})
# Sort problems in each category
for category in all_categories:
all_categories[category].sort(key=lambda x: int(x["id"]))
print(f"Found {len(all_categories)} categories", file=sys.stderr)
return all_categories
except Exception as e:
print(f"Failed to scrape CSES problems: {e}", file=sys.stderr)
return {}
def scrape(url: str) -> list[tuple[str, str]]:
try:
headers = {
@ -57,56 +104,98 @@ def scrape(url: str) -> list[tuple[str, str]]:
def main():
if len(sys.argv) != 2:
if len(sys.argv) < 2:
result = {
"success": False,
"error": "Usage: cses.py <problem_id_or_url>",
"problem_id": None,
"error": "Usage: cses.py metadata OR cses.py tests <problem_id_or_url>",
}
print(json.dumps(result))
sys.exit(1)
problem_input = sys.argv[1]
url = parse_problem_url(problem_input)
mode = sys.argv[1]
if mode == "metadata":
if len(sys.argv) != 2:
result = {
"success": False,
"error": "Usage: cses.py metadata",
}
print(json.dumps(result))
sys.exit(1)
all_categories = scrape_all_problems()
if not all_categories:
result = {
"success": False,
"error": "Failed to scrape CSES problem categories",
}
print(json.dumps(result))
sys.exit(1)
if not url:
result = {
"success": False,
"error": f"Invalid problem input: {problem_input}. Use either problem ID (e.g., 1068) or full URL",
"problem_id": problem_input if problem_input.isdigit() else None,
"success": True,
"categories": all_categories,
}
print(json.dumps(result))
sys.exit(1)
tests = scrape(url)
elif mode == "tests":
if len(sys.argv) != 3:
result = {
"success": False,
"error": "Usage: cses.py tests <problem_id_or_url>",
}
print(json.dumps(result))
sys.exit(1)
problem_id = (
problem_input if problem_input.isdigit() else problem_input.split("/")[-1]
)
problem_input = sys.argv[2]
url = parse_problem_url(problem_input)
if not url:
result = {
"success": False,
"error": f"Invalid problem input: {problem_input}. Use either problem ID (e.g., 1068) or full URL",
"problem_id": problem_input if problem_input.isdigit() else None,
}
print(json.dumps(result))
sys.exit(1)
tests = scrape(url)
problem_id = (
problem_input if problem_input.isdigit() else problem_input.split("/")[-1]
)
if not tests:
result = {
"success": False,
"error": f"No tests found for {problem_input}",
"problem_id": problem_id,
"url": url,
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
if not tests:
result = {
"success": False,
"error": f"No tests found for {problem_input}",
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
else:
result = {
"success": False,
"error": f"Unknown mode: {mode}. Use 'metadata' or 'tests'",
}
print(json.dumps(result))
sys.exit(1)
test_cases = []
for input_data, output_data in tests:
test_cases.append({"input": input_data, "output": output_data})
result = {
"success": True,
"problem_id": problem_id,
"url": url,
"test_cases": test_cases,
}
print(json.dumps(result))
if __name__ == "__main__":
main()