Merge pull request #29 from barrett-ruth/fix/cache

fix caching/scraper
This commit is contained in:
Barrett Ruth 2025-09-15 03:22:36 +02:00 committed by GitHub
commit 1f066348d0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 30 additions and 15 deletions

View file

@ -1,6 +1,6 @@
local M = {}
local cache_file = vim.fn.stdpath("data") .. "/cp-contest-cache.json"
local cache_file = vim.fn.stdpath("data") .. "/cp-nvim.json"
local cache_data = {}
local function get_expiry_date(platform)

View file

@ -11,6 +11,11 @@ local function ensure_io_directory()
vim.fn.mkdir("io", "p")
end
local function check_internet_connectivity()
local result = vim.system({ "ping", "-c", "1", "-W", "3", "8.8.8.8" }, { text = true }):wait()
return result.code == 0
end
local function setup_python_env()
local plugin_path = get_plugin_path()
local venv_dir = plugin_path .. "/.venv"
@ -50,6 +55,13 @@ function M.scrape_contest_metadata(platform, contest_id)
}
end
if not check_internet_connectivity() then
return {
success = false,
error = "No internet connection available",
}
end
if not setup_python_env() then
return {
success = false,
@ -119,6 +131,14 @@ function M.scrape_problem(ctx)
}
end
if not check_internet_connectivity() then
return {
success = false,
problem_id = ctx.problem_name,
error = "No internet connection available",
}
end
if not setup_python_env() then
return {
success = false,

View file

@ -27,20 +27,15 @@ def scrape(url: str) -> list[tuple[str, str]]:
input_lines: list[str] = []
output_lines: list[str] = []
for line_div in inp_pre.find_all("div", class_="test-example-line"):
input_lines.append(line_div.get_text().strip())
input_text_raw = inp_pre.get_text().strip().replace("\r", "")
input_lines = [
line.strip() for line in input_text_raw.split("\n") if line.strip()
]
output_divs = out_pre.find_all("div", class_="test-example-line")
if not output_divs:
output_text_raw = out_pre.get_text().strip().replace("\r", "")
output_lines = [
line.strip()
for line in output_text_raw.split("\n")
if line.strip()
]
else:
for line_div in output_divs:
output_lines.append(line_div.get_text().strip())
output_text_raw = out_pre.get_text().strip().replace("\r", "")
output_lines = [
line.strip() for line in output_text_raw.split("\n") if line.strip()
]
if input_lines and output_lines:
input_text = "\n".join(input_lines)
@ -80,7 +75,7 @@ def scrape_contest_problems(contest_id: str) -> list[dict[str, str]]:
problem_letter: str = href.split("/")[-1].lower()
problem_name: str = link.get_text(strip=True)
if problem_letter and problem_name and len(problem_letter) == 1:
if problem_letter and problem_name:
problems.append({"id": problem_letter, "name": problem_name})
problems.sort(key=lambda x: x["id"])