fix(ci): update typing
This commit is contained in:
parent
8394065169
commit
c1c1194945
4 changed files with 83 additions and 85 deletions
|
|
@ -60,7 +60,6 @@ local function check_luasnip()
|
||||||
end
|
end
|
||||||
|
|
||||||
local function check_config()
|
local function check_config()
|
||||||
local cp = require("cp")
|
|
||||||
vim.health.ok("Plugin ready")
|
vim.health.ok("Plugin ready")
|
||||||
|
|
||||||
if vim.g.cp and vim.g.cp.platform then
|
if vim.g.cp and vim.g.cp.platform then
|
||||||
|
|
|
||||||
|
|
@ -8,14 +8,14 @@ from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
def parse_problem_url(contest_id: str, problem_letter: str) -> str:
|
def parse_problem_url(contest_id: str, problem_letter: str) -> str:
|
||||||
task_id = f"{contest_id}_{problem_letter}"
|
task_id: str = f"{contest_id}_{problem_letter}"
|
||||||
return f"https://atcoder.jp/contests/{contest_id}/tasks/{task_id}"
|
return f"https://atcoder.jp/contests/{contest_id}/tasks/{task_id}"
|
||||||
|
|
||||||
|
|
||||||
def scrape_contest_problems(contest_id: str):
|
def scrape_contest_problems(contest_id: str) -> list[dict[str, str]]:
|
||||||
try:
|
try:
|
||||||
contest_url = f"https://atcoder.jp/contests/{contest_id}/tasks"
|
contest_url: str = f"https://atcoder.jp/contests/{contest_id}/tasks"
|
||||||
headers = {
|
headers: dict[str, str] = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -23,7 +23,7 @@ def scrape_contest_problems(contest_id: str):
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
problems = []
|
problems: list[dict[str, str]] = []
|
||||||
|
|
||||||
task_table = soup.find("table", class_="table")
|
task_table = soup.find("table", class_="table")
|
||||||
if not task_table:
|
if not task_table:
|
||||||
|
|
@ -36,13 +36,13 @@ def scrape_contest_problems(contest_id: str):
|
||||||
if len(cells) >= 2:
|
if len(cells) >= 2:
|
||||||
task_link = cells[1].find("a")
|
task_link = cells[1].find("a")
|
||||||
if task_link:
|
if task_link:
|
||||||
task_name = task_link.get_text(strip=True)
|
task_name: str = task_link.get_text(strip=True)
|
||||||
task_href = task_link.get("href", "")
|
task_href: str = task_link.get("href", "")
|
||||||
|
|
||||||
# Extract problem letter from task name or URL
|
# Extract problem letter from task name or URL
|
||||||
task_id = task_href.split("/")[-1] if task_href else ""
|
task_id: str = task_href.split("/")[-1] if task_href else ""
|
||||||
if task_id.startswith(contest_id + "_"):
|
if task_id.startswith(contest_id + "_"):
|
||||||
problem_letter = task_id[len(contest_id) + 1 :]
|
problem_letter: str = task_id[len(contest_id) + 1 :]
|
||||||
|
|
||||||
if problem_letter and task_name:
|
if problem_letter and task_name:
|
||||||
problems.append(
|
problems.append(
|
||||||
|
|
@ -59,7 +59,7 @@ def scrape_contest_problems(contest_id: str):
|
||||||
|
|
||||||
def scrape(url: str) -> list[tuple[str, str]]:
|
def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
try:
|
try:
|
||||||
headers = {
|
headers: dict[str, str] = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -68,7 +68,7 @@ def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
|
||||||
tests = []
|
tests: list[tuple[str, str]] = []
|
||||||
|
|
||||||
sample_headers = soup.find_all(
|
sample_headers = soup.find_all(
|
||||||
"h3", string=lambda x: x and "sample" in x.lower() if x else False
|
"h3", string=lambda x: x and "sample" in x.lower() if x else False
|
||||||
|
|
@ -84,8 +84,10 @@ def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
if "output" in next_header.get_text().lower():
|
if "output" in next_header.get_text().lower():
|
||||||
output_pre = next_header.find_next("pre")
|
output_pre = next_header.find_next("pre")
|
||||||
if output_pre:
|
if output_pre:
|
||||||
input_text = input_pre.get_text().strip().replace("\r", "")
|
input_text: str = (
|
||||||
output_text = (
|
input_pre.get_text().strip().replace("\r", "")
|
||||||
|
)
|
||||||
|
output_text: str = (
|
||||||
output_pre.get_text().strip().replace("\r", "")
|
output_pre.get_text().strip().replace("\r", "")
|
||||||
)
|
)
|
||||||
if input_text and output_text:
|
if input_text and output_text:
|
||||||
|
|
@ -101,16 +103,16 @@ def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
result = {
|
result: dict[str, str | bool] = {
|
||||||
"success": False,
|
"success": False,
|
||||||
"error": "Usage: atcoder.py metadata <contest_id> OR atcoder.py tests <contest_id> <problem_letter>",
|
"error": "Usage: atcoder.py metadata <contest_id> OR atcoder.py tests <contest_id> <problem_letter>",
|
||||||
}
|
}
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
mode = sys.argv[1]
|
mode: str = sys.argv[1]
|
||||||
|
|
||||||
if mode == "metadata":
|
if mode == "metadata":
|
||||||
if len(sys.argv) != 3:
|
if len(sys.argv) != 3:
|
||||||
|
|
@ -121,8 +123,8 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
contest_id = sys.argv[2]
|
contest_id: str = sys.argv[2]
|
||||||
problems = scrape_contest_problems(contest_id)
|
problems: list[dict[str, str]] = scrape_contest_problems(contest_id)
|
||||||
|
|
||||||
if not problems:
|
if not problems:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -148,14 +150,14 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
contest_id = sys.argv[2]
|
contest_id: str = sys.argv[2]
|
||||||
problem_letter = sys.argv[3]
|
problem_letter: str = sys.argv[3]
|
||||||
problem_id = contest_id + problem_letter.lower()
|
problem_id: str = contest_id + problem_letter.lower()
|
||||||
|
|
||||||
url = parse_problem_url(contest_id, problem_letter)
|
url: str = parse_problem_url(contest_id, problem_letter)
|
||||||
print(f"Scraping: {url}", file=sys.stderr)
|
print(f"Scraping: {url}", file=sys.stderr)
|
||||||
|
|
||||||
tests = scrape(url)
|
tests: list[tuple[str, str]] = scrape(url)
|
||||||
|
|
||||||
if not tests:
|
if not tests:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -167,17 +169,17 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
test_cases = []
|
test_cases: list[dict[str, str]] = []
|
||||||
for input_data, output_data in tests:
|
for input_data, output_data in tests:
|
||||||
test_cases.append({"input": input_data, "output": output_data})
|
test_cases.append({"input": input_data, "output": output_data})
|
||||||
|
|
||||||
if test_cases:
|
if test_cases:
|
||||||
combined_input = (
|
combined_input: str = (
|
||||||
str(len(test_cases))
|
str(len(test_cases))
|
||||||
+ "\n"
|
+ "\n"
|
||||||
+ "\n".join(tc["input"] for tc in test_cases)
|
+ "\n".join(tc["input"] for tc in test_cases)
|
||||||
)
|
)
|
||||||
combined_output = "\n".join(tc["output"] for tc in test_cases)
|
combined_output: str = "\n".join(tc["output"] for tc in test_cases)
|
||||||
test_cases = [{"input": combined_input, "output": combined_output}]
|
test_cases = [{"input": combined_input, "output": combined_output}]
|
||||||
|
|
||||||
result = {
|
result = {
|
||||||
|
|
|
||||||
|
|
@ -7,14 +7,14 @@ import cloudscraper
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
|
||||||
def scrape(url: str):
|
def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
try:
|
try:
|
||||||
scraper = cloudscraper.create_scraper()
|
scraper = cloudscraper.create_scraper()
|
||||||
response = scraper.get(url, timeout=10)
|
response = scraper.get(url, timeout=10)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
tests = []
|
tests: list[tuple[str, str]] = []
|
||||||
|
|
||||||
input_sections = soup.find_all("div", class_="input")
|
input_sections = soup.find_all("div", class_="input")
|
||||||
output_sections = soup.find_all("div", class_="output")
|
output_sections = soup.find_all("div", class_="output")
|
||||||
|
|
@ -24,8 +24,8 @@ def scrape(url: str):
|
||||||
out_pre = out_section.find("pre")
|
out_pre = out_section.find("pre")
|
||||||
|
|
||||||
if inp_pre and out_pre:
|
if inp_pre and out_pre:
|
||||||
input_lines = []
|
input_lines: list[str] = []
|
||||||
output_lines = []
|
output_lines: list[str] = []
|
||||||
|
|
||||||
for line_div in inp_pre.find_all("div", class_="test-example-line"):
|
for line_div in inp_pre.find_all("div", class_="test-example-line"):
|
||||||
input_lines.append(line_div.get_text().strip())
|
input_lines.append(line_div.get_text().strip())
|
||||||
|
|
@ -60,33 +60,33 @@ def parse_problem_url(contest_id: str, problem_letter: str) -> str:
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def scrape_contest_problems(contest_id: str):
|
def scrape_contest_problems(contest_id: str) -> list[dict[str, str]]:
|
||||||
try:
|
try:
|
||||||
contest_url = f"https://codeforces.com/contest/{contest_id}"
|
contest_url: str = f"https://codeforces.com/contest/{contest_id}"
|
||||||
scraper = cloudscraper.create_scraper()
|
scraper = cloudscraper.create_scraper()
|
||||||
response = scraper.get(contest_url, timeout=10)
|
response = scraper.get(contest_url, timeout=10)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
problems = []
|
problems: list[dict[str, str]] = []
|
||||||
|
|
||||||
problem_links = soup.find_all(
|
problem_links = soup.find_all(
|
||||||
"a", href=lambda x: x and f"/contest/{contest_id}/problem/" in x
|
"a", href=lambda x: x and f"/contest/{contest_id}/problem/" in x
|
||||||
)
|
)
|
||||||
|
|
||||||
for link in problem_links:
|
for link in problem_links:
|
||||||
href = link.get("href", "")
|
href: str = link.get("href", "")
|
||||||
if f"/contest/{contest_id}/problem/" in href:
|
if f"/contest/{contest_id}/problem/" in href:
|
||||||
problem_letter = href.split("/")[-1].lower()
|
problem_letter: str = href.split("/")[-1].lower()
|
||||||
problem_name = link.get_text(strip=True)
|
problem_name: str = link.get_text(strip=True)
|
||||||
|
|
||||||
if problem_letter and problem_name and len(problem_letter) == 1:
|
if problem_letter and problem_name and len(problem_letter) == 1:
|
||||||
problems.append({"id": problem_letter, "name": problem_name})
|
problems.append({"id": problem_letter, "name": problem_name})
|
||||||
|
|
||||||
problems.sort(key=lambda x: x["id"])
|
problems.sort(key=lambda x: x["id"])
|
||||||
|
|
||||||
seen = set()
|
seen: set[str] = set()
|
||||||
unique_problems = []
|
unique_problems: list[dict[str, str]] = []
|
||||||
for p in problems:
|
for p in problems:
|
||||||
if p["id"] not in seen:
|
if p["id"] not in seen:
|
||||||
seen.add(p["id"])
|
seen.add(p["id"])
|
||||||
|
|
@ -99,21 +99,21 @@ def scrape_contest_problems(contest_id: str):
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def scrape_sample_tests(url: str):
|
def scrape_sample_tests(url: str) -> list[tuple[str, str]]:
|
||||||
print(f"Scraping: {url}", file=sys.stderr)
|
print(f"Scraping: {url}", file=sys.stderr)
|
||||||
return scrape(url)
|
return scrape(url)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
result = {
|
result: dict[str, str | bool] = {
|
||||||
"success": False,
|
"success": False,
|
||||||
"error": "Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> <problem_letter>",
|
"error": "Usage: codeforces.py metadata <contest_id> OR codeforces.py tests <contest_id> <problem_letter>",
|
||||||
}
|
}
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
mode = sys.argv[1]
|
mode: str = sys.argv[1]
|
||||||
|
|
||||||
if mode == "metadata":
|
if mode == "metadata":
|
||||||
if len(sys.argv) != 3:
|
if len(sys.argv) != 3:
|
||||||
|
|
@ -124,8 +124,8 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
contest_id = sys.argv[2]
|
contest_id: str = sys.argv[2]
|
||||||
problems = scrape_contest_problems(contest_id)
|
problems: list[dict[str, str]] = scrape_contest_problems(contest_id)
|
||||||
|
|
||||||
if not problems:
|
if not problems:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -151,12 +151,12 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
contest_id = sys.argv[2]
|
contest_id: str = sys.argv[2]
|
||||||
problem_letter = sys.argv[3]
|
problem_letter: str = sys.argv[3]
|
||||||
problem_id = contest_id + problem_letter.lower()
|
problem_id: str = contest_id + problem_letter.lower()
|
||||||
|
|
||||||
url = parse_problem_url(contest_id, problem_letter)
|
url: str = parse_problem_url(contest_id, problem_letter)
|
||||||
tests = scrape_sample_tests(url)
|
tests: list[tuple[str, str]] = scrape_sample_tests(url)
|
||||||
|
|
||||||
if not tests:
|
if not tests:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -168,7 +168,7 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
test_cases = []
|
test_cases: list[dict[str, str]] = []
|
||||||
for input_data, output_data in tests:
|
for input_data, output_data in tests:
|
||||||
test_cases.append({"input": input_data, "output": output_data})
|
test_cases.append({"input": input_data, "output": output_data})
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,10 +15,10 @@ def parse_problem_url(problem_input: str) -> str | None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def scrape_all_problems():
|
def scrape_all_problems() -> dict[str, list[dict[str, str]]]:
|
||||||
try:
|
try:
|
||||||
problemset_url = "https://cses.fi/problemset/"
|
problemset_url: str = "https://cses.fi/problemset/"
|
||||||
headers = {
|
headers: dict[str, str] = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -26,32 +26,29 @@ def scrape_all_problems():
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
all_categories = {}
|
all_categories: dict[str, list[dict[str, str]]] = {}
|
||||||
|
|
||||||
# Find all problem links first
|
|
||||||
problem_links = soup.find_all(
|
problem_links = soup.find_all(
|
||||||
"a", href=lambda x: x and "/problemset/task/" in x
|
"a", href=lambda x: x and "/problemset/task/" in x
|
||||||
)
|
)
|
||||||
print(f"Found {len(problem_links)} problem links", file=sys.stderr)
|
print(f"Found {len(problem_links)} problem links", file=sys.stderr)
|
||||||
|
|
||||||
# Group by categories - look for h1 elements that precede problem lists
|
current_category: str | None = None
|
||||||
current_category = None
|
|
||||||
for element in soup.find_all(["h1", "a"]):
|
for element in soup.find_all(["h1", "a"]):
|
||||||
if element.name == "h1":
|
if element.name == "h1":
|
||||||
current_category = element.get_text().strip()
|
current_category = element.get_text().strip()
|
||||||
if current_category not in all_categories:
|
if current_category not in all_categories:
|
||||||
all_categories[current_category] = []
|
all_categories[current_category] = []
|
||||||
elif element.name == "a" and "/problemset/task/" in element.get("href", ""):
|
elif element.name == "a" and "/problemset/task/" in element.get("href", ""):
|
||||||
href = element.get("href", "")
|
href: str = element.get("href", "")
|
||||||
problem_id = href.split("/")[-1]
|
problem_id: str = href.split("/")[-1]
|
||||||
problem_name = element.get_text(strip=True)
|
problem_name: str = element.get_text(strip=True)
|
||||||
|
|
||||||
if problem_id.isdigit() and problem_name and current_category:
|
if problem_id.isdigit() and problem_name and current_category:
|
||||||
all_categories[current_category].append(
|
all_categories[current_category].append(
|
||||||
{"id": problem_id, "name": problem_name}
|
{"id": problem_id, "name": problem_name}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Sort problems in each category
|
|
||||||
for category in all_categories:
|
for category in all_categories:
|
||||||
all_categories[category].sort(key=lambda x: int(x["id"]))
|
all_categories[category].sort(key=lambda x: int(x["id"]))
|
||||||
|
|
||||||
|
|
@ -65,7 +62,7 @@ def scrape_all_problems():
|
||||||
|
|
||||||
def scrape(url: str) -> list[tuple[str, str]]:
|
def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
try:
|
try:
|
||||||
headers = {
|
headers: dict[str, str] = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -74,13 +71,13 @@ def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
|
||||||
tests = []
|
tests: list[tuple[str, str]] = []
|
||||||
example_header = soup.find("h1", string="Example")
|
example_header = soup.find("h1", string="Example")
|
||||||
|
|
||||||
if example_header:
|
if example_header:
|
||||||
current = example_header.find_next_sibling()
|
current = example_header.find_next_sibling()
|
||||||
input_text = None
|
input_text: str | None = None
|
||||||
output_text = None
|
output_text: str | None = None
|
||||||
|
|
||||||
while current:
|
while current:
|
||||||
if current.name == "p" and "Input:" in current.get_text():
|
if current.name == "p" and "Input:" in current.get_text():
|
||||||
|
|
@ -104,16 +101,16 @@ def scrape(url: str) -> list[tuple[str, str]]:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main() -> None:
|
||||||
if len(sys.argv) < 2:
|
if len(sys.argv) < 2:
|
||||||
result = {
|
result: dict[str, str | bool] = {
|
||||||
"success": False,
|
"success": False,
|
||||||
"error": "Usage: cses.py metadata OR cses.py tests <problem_id_or_url>",
|
"error": "Usage: cses.py metadata OR cses.py tests <problem_id_or_url>",
|
||||||
}
|
}
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
mode = sys.argv[1]
|
mode: str = sys.argv[1]
|
||||||
|
|
||||||
if mode == "metadata":
|
if mode == "metadata":
|
||||||
if len(sys.argv) != 2:
|
if len(sys.argv) != 2:
|
||||||
|
|
@ -124,7 +121,7 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
all_categories = scrape_all_problems()
|
all_categories: dict[str, list[dict[str, str]]] = scrape_all_problems()
|
||||||
|
|
||||||
if not all_categories:
|
if not all_categories:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -149,8 +146,8 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
problem_input = sys.argv[2]
|
problem_input: str = sys.argv[2]
|
||||||
url = parse_problem_url(problem_input)
|
url: str | None = parse_problem_url(problem_input)
|
||||||
|
|
||||||
if not url:
|
if not url:
|
||||||
result = {
|
result = {
|
||||||
|
|
@ -161,9 +158,9 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
tests = scrape(url)
|
tests: list[tuple[str, str]] = scrape(url)
|
||||||
|
|
||||||
problem_id = (
|
problem_id: str = (
|
||||||
problem_input if problem_input.isdigit() else problem_input.split("/")[-1]
|
problem_input if problem_input.isdigit() else problem_input.split("/")[-1]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -177,7 +174,7 @@ def main():
|
||||||
print(json.dumps(result))
|
print(json.dumps(result))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
test_cases = []
|
test_cases: list[dict[str, str]] = []
|
||||||
for input_data, output_data in tests:
|
for input_data, output_data in tests:
|
||||||
test_cases.append({"input": input_data, "output": output_data})
|
test_cases.append({"input": input_data, "output": output_data})
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue