cleanup
This commit is contained in:
parent
52c50cde79
commit
7711788d3d
1 changed files with 43 additions and 87 deletions
130
scrapers/cses.py
130
scrapers/cses.py
|
|
@ -46,7 +46,6 @@ def snake_to_title(name: str) -> str:
|
||||||
"vs",
|
"vs",
|
||||||
"via",
|
"via",
|
||||||
}
|
}
|
||||||
|
|
||||||
words: list[str] = name.split("_")
|
words: list[str] = name.split("_")
|
||||||
n = len(words)
|
n = len(words)
|
||||||
|
|
||||||
|
|
@ -85,21 +84,16 @@ def make_request(url: str, headers: dict) -> requests.Response:
|
||||||
|
|
||||||
def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
|
def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
|
||||||
category_name = snake_to_title(category_id)
|
category_name = snake_to_title(category_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
problemset_url = "https://cses.fi/problemset/"
|
problemset_url = "https://cses.fi/problemset/"
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
response = make_request(problemset_url, headers)
|
response = make_request(problemset_url, headers)
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
|
||||||
current_category = None
|
current_category = None
|
||||||
problems = []
|
problems = []
|
||||||
target_found = False
|
target_found = False
|
||||||
|
|
||||||
for element in soup.find_all(["h1", "h2", "ul"]):
|
for element in soup.find_all(["h1", "h2", "ul"]):
|
||||||
if not isinstance(element, Tag):
|
if not isinstance(element, Tag):
|
||||||
continue
|
continue
|
||||||
|
|
@ -107,14 +101,11 @@ def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
|
||||||
text = element.get_text(strip=True)
|
text = element.get_text(strip=True)
|
||||||
if not text or text.startswith("CSES") or text == "CSES Problem Set":
|
if not text or text.startswith("CSES") or text == "CSES Problem Set":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if target_found and current_category != text:
|
if target_found and current_category != text:
|
||||||
break
|
break
|
||||||
|
|
||||||
current_category = text
|
current_category = text
|
||||||
if text.lower() == category_name.lower():
|
if text.lower() == category_name.lower():
|
||||||
target_found = True
|
target_found = True
|
||||||
|
|
||||||
elif element.name == "ul" and current_category and target_found:
|
elif element.name == "ul" and current_category and target_found:
|
||||||
problem_links = element.find_all(
|
problem_links = element.find_all(
|
||||||
"a", href=lambda x: x and "/problemset/task/" in x
|
"a", href=lambda x: x and "/problemset/task/" in x
|
||||||
|
|
@ -123,17 +114,12 @@ def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
|
||||||
href = link.get("href", "")
|
href = link.get("href", "")
|
||||||
if not href:
|
if not href:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
problem_id = href.split("/")[-1]
|
problem_id = href.split("/")[-1]
|
||||||
problem_name = link.get_text(strip=True)
|
problem_name = link.get_text(strip=True)
|
||||||
|
|
||||||
if not problem_id.isdigit() or not problem_name:
|
if not problem_id.isdigit() or not problem_name:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
problems.append(ProblemSummary(id=problem_id, name=problem_name))
|
problems.append(ProblemSummary(id=problem_id, name=problem_name))
|
||||||
|
|
||||||
return problems
|
return problems
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to scrape CSES category {category_id}: {e}", file=sys.stderr)
|
print(f"Failed to scrape CSES category {category_id}: {e}", file=sys.stderr)
|
||||||
return []
|
return []
|
||||||
|
|
@ -141,7 +127,7 @@ def scrape_category_problems(category_id: str) -> list[ProblemSummary]:
|
||||||
|
|
||||||
def parse_problem_url(problem_input: str) -> str | None:
|
def parse_problem_url(problem_input: str) -> str | None:
|
||||||
if problem_input.startswith("https://cses.fi/problemset/task/"):
|
if problem_input.startswith("https://cses.fi/problemset/task/"):
|
||||||
return problem_input
|
return problem_input.rstrip("/")
|
||||||
elif problem_input.isdigit():
|
elif problem_input.isdigit():
|
||||||
return f"https://cses.fi/problemset/task/{problem_input}"
|
return f"https://cses.fi/problemset/task/{problem_input}"
|
||||||
return None
|
return None
|
||||||
|
|
@ -150,33 +136,26 @@ def parse_problem_url(problem_input: str) -> str | None:
|
||||||
def extract_problem_limits(soup: BeautifulSoup) -> tuple[int, float]:
|
def extract_problem_limits(soup: BeautifulSoup) -> tuple[int, float]:
|
||||||
timeout_ms = None
|
timeout_ms = None
|
||||||
memory_mb = None
|
memory_mb = None
|
||||||
|
|
||||||
constraints_ul = soup.find("ul", class_="task-constraints")
|
constraints_ul = soup.find("ul", class_="task-constraints")
|
||||||
if not constraints_ul or not isinstance(constraints_ul, Tag):
|
if not constraints_ul or not isinstance(constraints_ul, Tag):
|
||||||
raise ValueError("Could not find task-constraints section")
|
raise ValueError("Could not find task-constraints section")
|
||||||
|
|
||||||
for li in constraints_ul.find_all("li"):
|
for li in constraints_ul.find_all("li"):
|
||||||
text = li.get_text()
|
text = li.get_text()
|
||||||
|
|
||||||
if "Time limit:" in text:
|
if "Time limit:" in text:
|
||||||
match = re.search(r"Time limit:\s*(\d+(?:\.\d+)?)\s*s", text)
|
match = re.search(r"Time limit:\s*(\d+(?:\.\d+)?)\s*s", text)
|
||||||
if match:
|
if match:
|
||||||
seconds = float(match.group(1))
|
seconds = float(match.group(1))
|
||||||
timeout_ms = int(seconds * 1000)
|
timeout_ms = int(seconds * 1000)
|
||||||
|
|
||||||
if "Memory limit:" in text:
|
if "Memory limit:" in text:
|
||||||
match = re.search(r"Memory limit:\s*(\d+)\s*MB", text)
|
match = re.search(r"Memory limit:\s*(\d+)\s*MB", text)
|
||||||
if match:
|
if match:
|
||||||
memory_mb = float(match.group(1))
|
memory_mb = float(match.group(1))
|
||||||
|
|
||||||
if timeout_ms is None:
|
if timeout_ms is None:
|
||||||
raise ValueError("Could not find valid timeout in task-constraints section")
|
raise ValueError("Could not find valid timeout in task-constraints section")
|
||||||
|
|
||||||
if memory_mb is None:
|
if memory_mb is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Could not find valid memory limit in task-constraints section"
|
"Could not find valid memory limit in task-constraints section"
|
||||||
)
|
)
|
||||||
|
|
||||||
return timeout_ms, memory_mb
|
return timeout_ms, memory_mb
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -186,27 +165,20 @@ def scrape_categories() -> list[ContestSummary]:
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
response = make_request("https://cses.fi/problemset/", headers)
|
response = make_request("https://cses.fi/problemset/", headers)
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
categories = []
|
categories = []
|
||||||
|
|
||||||
for h2 in soup.find_all("h2"):
|
for h2 in soup.find_all("h2"):
|
||||||
category_name = h2.get_text().strip()
|
category_name = h2.get_text().strip()
|
||||||
if category_name == "General":
|
if category_name == "General":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
category_id = normalize_category_name(category_name)
|
category_id = normalize_category_name(category_name)
|
||||||
|
|
||||||
display_name = category_name
|
display_name = category_name
|
||||||
|
|
||||||
categories.append(
|
categories.append(
|
||||||
ContestSummary(
|
ContestSummary(
|
||||||
id=category_id, name=category_name, display_name=display_name
|
id=category_id, name=category_name, display_name=display_name
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
return categories
|
return categories
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to scrape CSES categories: {e}", file=sys.stderr)
|
print(f"Failed to scrape CSES categories: {e}", file=sys.stderr)
|
||||||
return []
|
return []
|
||||||
|
|
@ -222,20 +194,15 @@ def process_problem_element(
|
||||||
if category_name not in all_categories:
|
if category_name not in all_categories:
|
||||||
all_categories[category_name] = []
|
all_categories[category_name] = []
|
||||||
return category_name
|
return category_name
|
||||||
|
|
||||||
if element.name != "a" or "/problemset/task/" not in element.get("href", ""):
|
if element.name != "a" or "/problemset/task/" not in element.get("href", ""):
|
||||||
return current_category
|
return current_category
|
||||||
|
|
||||||
href = element.get("href", "")
|
href = element.get("href", "")
|
||||||
if not href:
|
if not href:
|
||||||
return current_category
|
return current_category
|
||||||
|
|
||||||
problem_id = href.split("/")[-1]
|
problem_id = href.split("/")[-1]
|
||||||
problem_name = element.get_text(strip=True)
|
problem_name = element.get_text(strip=True)
|
||||||
|
|
||||||
if not (problem_id.isdigit() and problem_name and current_category):
|
if not (problem_id.isdigit() and problem_name and current_category):
|
||||||
return current_category
|
return current_category
|
||||||
|
|
||||||
problem = ProblemSummary(id=problem_id, name=problem_name)
|
problem = ProblemSummary(id=problem_id, name=problem_name)
|
||||||
all_categories[current_category].append(problem)
|
all_categories[current_category].append(problem)
|
||||||
return current_category
|
return current_category
|
||||||
|
|
@ -247,13 +214,10 @@ def scrape_all_problems() -> dict[str, list[ProblemSummary]]:
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
response = requests.get(problemset_url, headers=headers, timeout=10)
|
response = requests.get(problemset_url, headers=headers, timeout=10)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
all_categories: dict[str, list[ProblemSummary]] = {}
|
all_categories: dict[str, list[ProblemSummary]] = {}
|
||||||
|
|
||||||
current_category = None
|
current_category = None
|
||||||
for element in soup.find_all(["h1", "h2", "ul"]):
|
for element in soup.find_all(["h1", "h2", "ul"]):
|
||||||
if not isinstance(element, Tag):
|
if not isinstance(element, Tag):
|
||||||
|
|
@ -265,7 +229,6 @@ def scrape_all_problems() -> dict[str, list[ProblemSummary]]:
|
||||||
if current_category not in all_categories:
|
if current_category not in all_categories:
|
||||||
all_categories[current_category] = []
|
all_categories[current_category] = []
|
||||||
print(f"Found category: {current_category}", file=sys.stderr)
|
print(f"Found category: {current_category}", file=sys.stderr)
|
||||||
|
|
||||||
elif element.name == "ul" and current_category:
|
elif element.name == "ul" and current_category:
|
||||||
problem_links = element.find_all(
|
problem_links = element.find_all(
|
||||||
"a", href=lambda x: x and "/problemset/task/" in x
|
"a", href=lambda x: x and "/problemset/task/" in x
|
||||||
|
|
@ -275,47 +238,61 @@ def scrape_all_problems() -> dict[str, list[ProblemSummary]]:
|
||||||
if href:
|
if href:
|
||||||
problem_id = href.split("/")[-1]
|
problem_id = href.split("/")[-1]
|
||||||
problem_name = link.get_text(strip=True)
|
problem_name = link.get_text(strip=True)
|
||||||
|
|
||||||
if problem_id.isdigit() and problem_name:
|
if problem_id.isdigit() and problem_name:
|
||||||
problem = ProblemSummary(id=problem_id, name=problem_name)
|
problem = ProblemSummary(id=problem_id, name=problem_name)
|
||||||
all_categories[current_category].append(problem)
|
all_categories[current_category].append(problem)
|
||||||
|
|
||||||
print(
|
print(
|
||||||
f"Found {len(all_categories)} categories with {sum(len(probs) for probs in all_categories.values())} problems",
|
f"Found {len(all_categories)} categories with {sum(len(probs) for probs in all_categories.values())} problems",
|
||||||
file=sys.stderr,
|
file=sys.stderr,
|
||||||
)
|
)
|
||||||
return all_categories
|
return all_categories
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to scrape CSES problems: {e}", file=sys.stderr)
|
print(f"Failed to scrape CSES problems: {e}", file=sys.stderr)
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
def extract_example_test_case(soup) -> tuple[str, str] | None:
|
def _collect_section_after(header: Tag) -> list[Tag]:
|
||||||
example_header = soup.find("h1", string="Example")
|
out: list[Tag] = []
|
||||||
if not example_header:
|
cur = header.find_next_sibling()
|
||||||
return None
|
while cur and not (isinstance(cur, Tag) and cur.name in ("h1", "h2", "h3")):
|
||||||
|
if isinstance(cur, Tag):
|
||||||
|
out.append(cur)
|
||||||
|
cur = cur.find_next_sibling()
|
||||||
|
return out
|
||||||
|
|
||||||
current = example_header.find_next_sibling()
|
|
||||||
input_text = None
|
|
||||||
output_text = None
|
|
||||||
|
|
||||||
while current:
|
def extract_example_test_cases(soup: BeautifulSoup) -> list[tuple[str, str]]:
|
||||||
if current.name == "p" and "Input:" in current.get_text():
|
example_headers = soup.find_all(
|
||||||
input_pre = current.find_next_sibling("pre")
|
lambda t: isinstance(t, Tag)
|
||||||
if input_pre:
|
and t.name in ("h1", "h2", "h3")
|
||||||
input_text = input_pre.get_text().strip()
|
and t.get_text(strip=True).lower().startswith("example")
|
||||||
elif current.name == "p" and "Output:" in current.get_text():
|
)
|
||||||
output_pre = current.find_next_sibling("pre")
|
cases: list[tuple[str, str]] = []
|
||||||
if output_pre:
|
for hdr in example_headers:
|
||||||
output_text = output_pre.get_text().strip()
|
section = _collect_section_after(hdr)
|
||||||
break
|
|
||||||
current = current.find_next_sibling()
|
|
||||||
|
|
||||||
if not input_text or not output_text:
|
def find_labeled(label: str) -> str | None:
|
||||||
return None
|
for node in section:
|
||||||
|
if not isinstance(node, Tag):
|
||||||
|
continue
|
||||||
|
if node.name in ("p", "h4", "h5", "h6"):
|
||||||
|
txt = node.get_text(strip=True).lower().rstrip(":")
|
||||||
|
if txt == label:
|
||||||
|
pre = node.find_next_sibling("pre")
|
||||||
|
if pre:
|
||||||
|
return pre.get_text().strip()
|
||||||
|
return None
|
||||||
|
|
||||||
return (input_text, output_text)
|
inp = find_labeled("input")
|
||||||
|
out = find_labeled("output")
|
||||||
|
if not inp or not out:
|
||||||
|
pres = [n for n in section if isinstance(n, Tag) and n.name == "pre"]
|
||||||
|
if len(pres) >= 2:
|
||||||
|
inp = inp or pres[0].get_text().strip()
|
||||||
|
out = out or pres[1].get_text().strip()
|
||||||
|
if inp and out:
|
||||||
|
cases.append((inp, out))
|
||||||
|
return cases
|
||||||
|
|
||||||
|
|
||||||
def scrape(url: str) -> list[TestCase]:
|
def scrape(url: str) -> list[TestCase]:
|
||||||
|
|
@ -323,18 +300,10 @@ def scrape(url: str) -> list[TestCase]:
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
|
|
||||||
response = make_request(url, headers)
|
response = make_request(url, headers)
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
|
pairs = extract_example_test_cases(soup)
|
||||||
test_case = extract_example_test_case(soup)
|
return [TestCase(input=inp, expected=out) for (inp, out) in pairs]
|
||||||
if not test_case:
|
|
||||||
return []
|
|
||||||
|
|
||||||
input_text, output_text = test_case
|
|
||||||
return [TestCase(input=input_text, expected=output_text)]
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error scraping CSES: {e}", file=sys.stderr)
|
print(f"Error scraping CSES: {e}", file=sys.stderr)
|
||||||
return []
|
return []
|
||||||
|
|
@ -361,7 +330,6 @@ class CSESScraper(BaseScraper):
|
||||||
return func(*args)
|
return func(*args)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"{self.platform_name}: {str(e)}"
|
error_msg = f"{self.platform_name}: {str(e)}"
|
||||||
|
|
||||||
if operation == "metadata":
|
if operation == "metadata":
|
||||||
return MetadataResult(success=False, error=error_msg)
|
return MetadataResult(success=False, error=error_msg)
|
||||||
elif operation == "tests":
|
elif operation == "tests":
|
||||||
|
|
@ -400,21 +368,18 @@ class CSESScraper(BaseScraper):
|
||||||
timeout_ms=0,
|
timeout_ms=0,
|
||||||
memory_mb=0,
|
memory_mb=0,
|
||||||
)
|
)
|
||||||
|
|
||||||
tests = scrape(url)
|
tests = scrape(url)
|
||||||
|
m = re.search(r"/task/(\d+)", url)
|
||||||
actual_problem_id = (
|
actual_problem_id = (
|
||||||
problem_id if problem_id.isdigit() else problem_id.split("/")[-1]
|
problem_id if problem_id.isdigit() else (m.group(1) if m else "")
|
||||||
)
|
)
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
||||||
}
|
}
|
||||||
response = requests.get(url, headers=headers, timeout=10)
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
soup = BeautifulSoup(response.text, "html.parser")
|
soup = BeautifulSoup(response.text, "html.parser")
|
||||||
timeout_ms, memory_mb = extract_problem_limits(soup)
|
timeout_ms, memory_mb = extract_problem_limits(soup)
|
||||||
|
|
||||||
if not tests:
|
if not tests:
|
||||||
return TestsResult(
|
return TestsResult(
|
||||||
success=False,
|
success=False,
|
||||||
|
|
@ -425,7 +390,6 @@ class CSESScraper(BaseScraper):
|
||||||
timeout_ms=timeout_ms,
|
timeout_ms=timeout_ms,
|
||||||
memory_mb=memory_mb,
|
memory_mb=memory_mb,
|
||||||
)
|
)
|
||||||
|
|
||||||
return TestsResult(
|
return TestsResult(
|
||||||
success=True,
|
success=True,
|
||||||
error="",
|
error="",
|
||||||
|
|
@ -453,10 +417,8 @@ def main() -> None:
|
||||||
)
|
)
|
||||||
print(json.dumps(asdict(result)))
|
print(json.dumps(asdict(result)))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
mode: str = sys.argv[1]
|
mode: str = sys.argv[1]
|
||||||
scraper = CSESScraper()
|
scraper = CSESScraper()
|
||||||
|
|
||||||
if mode == "metadata":
|
if mode == "metadata":
|
||||||
if len(sys.argv) != 3:
|
if len(sys.argv) != 3:
|
||||||
result = MetadataResult(
|
result = MetadataResult(
|
||||||
|
|
@ -465,13 +427,11 @@ def main() -> None:
|
||||||
)
|
)
|
||||||
print(json.dumps(asdict(result)))
|
print(json.dumps(asdict(result)))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
category_id = sys.argv[2]
|
category_id = sys.argv[2]
|
||||||
result = scraper.scrape_contest_metadata(category_id)
|
result = scraper.scrape_contest_metadata(category_id)
|
||||||
print(json.dumps(asdict(result)))
|
print(json.dumps(asdict(result)))
|
||||||
if not result.success:
|
if not result.success:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
elif mode == "tests":
|
elif mode == "tests":
|
||||||
if len(sys.argv) != 4:
|
if len(sys.argv) != 4:
|
||||||
tests_result = TestsResult(
|
tests_result = TestsResult(
|
||||||
|
|
@ -485,14 +445,12 @@ def main() -> None:
|
||||||
)
|
)
|
||||||
print(json.dumps(asdict(tests_result)))
|
print(json.dumps(asdict(tests_result)))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
category = sys.argv[2]
|
category = sys.argv[2]
|
||||||
problem_id = sys.argv[3]
|
problem_id = sys.argv[3]
|
||||||
tests_result = scraper.scrape_problem_tests(category, problem_id)
|
tests_result = scraper.scrape_problem_tests(category, problem_id)
|
||||||
print(json.dumps(asdict(tests_result)))
|
print(json.dumps(asdict(tests_result)))
|
||||||
if not tests_result.success:
|
if not tests_result.success:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
elif mode == "contests":
|
elif mode == "contests":
|
||||||
if len(sys.argv) != 2:
|
if len(sys.argv) != 2:
|
||||||
contest_result = ContestListResult(
|
contest_result = ContestListResult(
|
||||||
|
|
@ -500,12 +458,10 @@ def main() -> None:
|
||||||
)
|
)
|
||||||
print(json.dumps(asdict(contest_result)))
|
print(json.dumps(asdict(contest_result)))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
contest_result = scraper.scrape_contest_list()
|
contest_result = scraper.scrape_contest_list()
|
||||||
print(json.dumps(asdict(contest_result)))
|
print(json.dumps(asdict(contest_result)))
|
||||||
if not contest_result.success:
|
if not contest_result.success:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
result = MetadataResult(
|
result = MetadataResult(
|
||||||
success=False,
|
success=False,
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue