diff --git a/.prettierrc b/.prettierrc new file mode 100644 index 0000000..6aa4462 --- /dev/null +++ b/.prettierrc @@ -0,0 +1,11 @@ +{ + "plugins": ["prettier-plugin-astro"], + "overrides": [ + { + "files": "*.astro", + "options": { + "parser": "astro", + } + } + ] +} diff --git a/package.json b/package.json index 61272c8..af8ff52 100644 --- a/package.json +++ b/package.json @@ -15,6 +15,7 @@ "remark-math": "^6.0.0" }, "devDependencies": { - "prettier": "^3.5.3" + "prettier": "^3.5.3", + "prettier-plugin-astro": "^0.14.1" } } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3b30353..962d6ec 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -24,6 +24,9 @@ importers: prettier: specifier: ^3.5.3 version: 3.5.3 + prettier-plugin-astro: + specifier: ^0.14.1 + version: 0.14.1 packages: @@ -1208,6 +1211,10 @@ packages: resolution: {integrity: sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A==} engines: {node: ^10 || ^12 || >=14} + prettier-plugin-astro@0.14.1: + resolution: {integrity: sha512-RiBETaaP9veVstE4vUwSIcdATj6dKmXljouXc/DDNwBSPTp8FRkLGDSGFClKsAFeeg+13SB0Z1JZvbD76bigJw==} + engines: {node: ^14.15.0 || >=16.0.0} + prettier@3.5.3: resolution: {integrity: sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw==} engines: {node: '>=14'} @@ -1315,6 +1322,12 @@ packages: engines: {node: '>=18.0.0', npm: '>=8.0.0'} hasBin: true + s.color@0.0.15: + resolution: {integrity: sha512-AUNrbEUHeKY8XsYr/DYpl+qk5+aM+DChopnWOPEzn8YKzOhv4l2zH6LzZms3tOZP3wwdOyc0RmTciyi46HLIuA==} + + sass-formatter@0.7.9: + resolution: {integrity: sha512-CWZ8XiSim+fJVG0cFLStwDvft1VI7uvXdCNJYXhDvowiv+DsbD1nXLiQ4zrE5UBvj5DWZJ93cwN0NX5PMsr1Pw==} + semver@7.7.2: resolution: {integrity: sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==} engines: {node: '>=10'} @@ -1373,6 +1386,9 @@ packages: style-to-object@1.0.8: resolution: {integrity: sha512-xT47I/Eo0rwJmaXC4oilDGDWLohVhR6o/xAQcPQN8q6QBuZVL8qMYL85kLmST5cPjAorwvqIA4qXTRQoYHaL6g==} + suf-log@2.5.3: + resolution: {integrity: sha512-KvC8OPjzdNOe+xQ4XWJV2whQA0aM1kGVczMQ8+dStAO6KfEB140JEVQ9dE76ONZ0/Ylf67ni4tILPJB41U0eow==} + tiny-inflate@1.0.3: resolution: {integrity: sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==} @@ -3220,6 +3236,12 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + prettier-plugin-astro@0.14.1: + dependencies: + '@astrojs/compiler': 2.12.0 + prettier: 3.5.3 + sass-formatter: 0.7.9 + prettier@3.5.3: {} prismjs@1.30.0: {} @@ -3430,6 +3452,12 @@ snapshots: '@rollup/rollup-win32-x64-msvc': 4.41.0 fsevents: 2.3.3 + s.color@0.0.15: {} + + sass-formatter@0.7.9: + dependencies: + suf-log: 2.5.3 + semver@7.7.2: {} sharp@0.33.5: @@ -3518,6 +3546,10 @@ snapshots: dependencies: inline-style-parser: 0.2.4 + suf-log@2.5.3: + dependencies: + s.color: 0.0.15 + tiny-inflate@1.0.3: {} tinyexec@0.3.2: {} diff --git a/public/pfp.webp b/public/pfp.webp deleted file mode 100644 index 1ff7301..0000000 Binary files a/public/pfp.webp and /dev/null differ diff --git a/public/posts/from-github-pages-to-aws/website-design.webp b/public/posts/from-github-pages-to-aws/website-design.webp new file mode 100644 index 0000000..e69de29 diff --git a/public/posts/website-design.webp b/public/posts/website-design.webp deleted file mode 100644 index 374fdcb..0000000 Binary files a/public/posts/website-design.webp and /dev/null differ diff --git a/public/scripts/dateHeadings.js b/public/scripts/dateHeadings.js new file mode 100644 index 0000000..55e9a53 --- /dev/null +++ b/public/scripts/dateHeadings.js @@ -0,0 +1,18 @@ +document.addEventListener("DOMContentLoaded", function () { + const katexParagraphs = document.querySelectorAll("article p"); + + katexParagraphs.forEach((p) => { + const katexSpan = p.querySelector(".katex"); + if (!katexSpan) return; + + const clone = p.cloneNode(true); + clone.querySelector(".katex").remove(); + const textWithoutKatex = clone.textContent.trim(); + + if (textWithoutKatex === "") { + p.style.display = "flex"; + p.style.justifyContent = "center"; + p.style.margin = "1.5rem 0"; + } + }); +}); diff --git a/public/styles/common.css b/public/styles/common.css index fd7a3a9..20ac7af 100644 --- a/public/styles/common.css +++ b/public/styles/common.css @@ -54,3 +54,9 @@ li { .terminal-container { font-family: "Courier New", monospace; } + +pre { + font-feature-settings: + "liga" 0, + "calt" 0; +} diff --git a/public/styles/mdx.css b/public/styles/mdx.css deleted file mode 100644 index e8c62a0..0000000 --- a/public/styles/mdx.css +++ /dev/null @@ -1,63 +0,0 @@ -article h1, article h2, article h3, -.post-article h1, .post-article h2, .post-article h3 { - font-weight: normal; - position: relative; -} - -article h1, .post-article h1 { - padding-left: 1.5em; -} - -article h2, .post-article h2 { - padding-left: 2em; -} - -article h3, .post-article h3 { - padding-left: 2.5em; -} - -article h1::before, .post-article h1::before { - content: "#"; -} - -article h2::before, .post-article h2::before { - content: "##"; -} - -article h3::before, .post-article h3::before { - content: "###"; -} - -article h1::before, article h2::before, article h3::before, -.post-article h1::before, .post-article h2::before, .post-article h3::before { - position: absolute; - left: 0; - color: var(--topic-color, #000); - margin-right: 0.5em; -} - -article img { - display: block; - margin: 2rem auto; - max-width: 100%; - height: auto; -} - -article pre { - padding: 1rem; - overflow-x: auto; -} - -/* Heading with date styling */ -article h2.heading-with-date, article h3.heading-with-date { - display: flex; - justify-content: space-between; - align-items: center; -} - -article h2.heading-with-date .date, article h3.heading-with-date .date { - font-size: 0.8em; - color: #000; - font-weight: normal; - margin-left: 1rem; -} diff --git a/public/styles/post.css b/public/styles/posts.css similarity index 58% rename from public/styles/post.css rename to public/styles/posts.css index 6cd985c..80dcd42 100644 --- a/public/styles/post.css +++ b/public/styles/posts.css @@ -39,17 +39,6 @@ li { width: 80%; } -.post-meta { - margin-left: 100px; - font-size: 1.3em; -} - -h2 .post-meta { - float: right; - margin-left: 20px; - font-size: 1em; -} - .post-title { font-weight: normal; font-size: 2.2em; @@ -59,33 +48,10 @@ h2 .post-meta { display: inline-block; } -:not(pre) > code { - font-family: "Courier New", Courier, monospace; - padding: 2px 4px; - margin: 0 5px; - font-size: 0.95em; - white-space: nowrap; - border: 1px solid #e1e1e1; -} - -code, -pre { - border-radius: 4px; - background: #f4f4f4 !important; -} - -pre { - border: 1px solid #e1e1e1; -} - -pre * { - background: #f4f4f4 !important; -} - .post-title::before { content: ""; position: absolute; - background-color: var(--topic-color); + background-color: var(--topic-color, #000); height: 30px; width: 2px; bottom: -10px; @@ -95,34 +61,110 @@ pre * { .post-title::after { content: ""; position: absolute; - background-color: var(--topic-color); + background-color: var(--topic-color, #000); width: 200px; height: 2px; bottom: -10px; left: -20px; } +.post-meta { + font-size: 1.3em; + color: #000; + margin-left: 100px; +} + .post-article { font-size: 1.5em; line-height: 1.5em; padding-bottom: 50px; } -.post-article h2 { - font-weight: normal; -} - -.post-article h3 { - font-weight: normal; -} - .post-article a { text-decoration: underline; } -.language-python, -.language-py, -.language-cc, -.language-cpp { +article h1, +article h2, +article h3, +.post-article h1, +.post-article h2, +.post-article h3 { + font-weight: normal; + position: relative; +} + +article h1, +.post-article h1 { + padding-left: 1.5em; +} + +article h2, +.post-article h2 { + padding-left: 2em; +} + +article h3, +.post-article h3 { + padding-left: 2.5em; +} + +article h1::before, +.post-article h1::before { + content: "#"; +} + +article h2::before, +.post-article h2::before { + content: "##"; +} + +article h3::before, +.post-article h3::before { + content: "###"; +} + +article h1::before, +article h2::before, +article h3::before, +.post-article h1::before, +.post-article h2::before, +.post-article h3::before { + position: absolute; + left: 0; + color: var(--topic-color, #000); + margin-right: 0.5em; +} + +article img { + display: block; + margin: 2rem auto; + max-width: 100%; + height: auto; +} + +article pre { + padding: 1rem; + overflow-x: auto; + border-radius: 4px; + background: #f4f4f4 !important; + border: 1px solid #e1e1e1; +} + +pre * { + background: #f4f4f4 !important; +} + +:not(pre) > code { + font-family: "Courier New", Courier, monospace; + padding: 4px; + margin: 0 5px; + white-space: nowrap; + border: 1px solid #e1e1e1; + border-radius: 4px; + background: #f4f4f4 !important; +} + +.astro-code { font-size: 0.8em !important; } diff --git a/src/components/Footer.astro b/src/components/Footer.astro index 28e758f..9654327 100644 --- a/src/components/Footer.astro +++ b/src/components/Footer.astro @@ -1,11 +1,14 @@ --- + --- @@ -28,4 +31,4 @@ margin-left: 25px; text-decoration: none; } - \ No newline at end of file + diff --git a/src/components/Header.astro b/src/components/Header.astro index 51a9574..915d5ea 100644 --- a/src/components/Header.astro +++ b/src/components/Header.astro @@ -5,7 +5,7 @@ const isHome = path === "/" || path === "/index.html"; // Determine topic from path function getTopic() { const pathname = path.split("/"); - + if (pathname.length === 2 && pathname[1].endsWith(".html")) { return "/" + pathname[1].replace(".html", ""); } else if (pathname.length >= 3) { @@ -47,8 +47,9 @@ const promptText = topic ? `barrett@ruth:~$ ${topic}` : "barrett@ruth:~$"; clearing = false; return; } - - const topicLength = terminalPrompt.innerHTML.length - TERMINAL_PROMPT.length; + + const topicLength = + terminalPrompt.innerHTML.length - TERMINAL_PROMPT.length; let i = 0; function removeChar() { @@ -69,19 +70,19 @@ const promptText = topic ? `barrett@ruth:~$ ${topic}` : "barrett@ruth:~$"; e.preventDefault(); clearPrompt(500, () => (window.location.href = "/")); } - - document.addEventListener('DOMContentLoaded', () => { + + document.addEventListener("DOMContentLoaded", () => { window.TERMINAL_PROMPT = TERMINAL_PROMPT; window.clearPrompt = clearPrompt; window.goHome = goHome; - + const homeLink = document.querySelector('header a[href="/"]'); if (homeLink) { const path = window.location.pathname; const isHome = path === "/" || path === "/index.html"; - + if (isHome) { - homeLink.addEventListener('click', (e) => { + homeLink.addEventListener("click", (e) => { e.preventDefault(); const topics = document.querySelectorAll(".topic a"); topics.forEach((topic) => { @@ -92,7 +93,7 @@ const promptText = topic ? `barrett@ruth:~$ ${topic}` : "barrett@ruth:~$"; clearPrompt(500); }); } else { - homeLink.addEventListener('click', goHome); + homeLink.addEventListener("click", goHome); } } }); diff --git a/src/content/posts/algorithms/competitive-programming-log.mdx b/src/content/posts/algorithms/competitive-programming-log.mdx index bb57824..f3ff8f1 100644 --- a/src/content/posts/algorithms/competitive-programming-log.mdx +++ b/src/content/posts/algorithms/competitive-programming-log.mdx @@ -1,6 +1,6 @@ --- title: "competitive programming log" -date: "2025-05-14" +date: "14/05/2025" useKatex: true --- @@ -9,7 +9,7 @@ useKatex: true First contest in a while. Implementation lacked heavily but solving harder problems made easier problems easier. - A: rust immediately showed with the brute force. Since $n\cdot m\leq 25$, at most $\lceil\log_2(25)\rceil=5$ concatenations need to be made. **Slow down and consider constrains**. -- B: Similarly, mathematical insight and just *playing with parameters*/quantifying the effects of operations is important. **Test your conjectures, they may be right/helpful** (ie. "I must maintain the shortest length thread"). +- B: Similarly, mathematical insight and just _playing with parameters_/quantifying the effects of operations is important. **Test your conjectures, they may be right/helpful** (ie. "I must maintain the shortest length thread"). - C: implementation weak. Simplify, step back, simplify, continuously. I stopped considering altering the grid and used a pair of coordinates but deriving the others inline, _if you trust your mathematics_, is way simpler. - D: formalize your answer better. Understand prime factorization more. Improve/memorize asymptotic bounds of factoring and prime counting. Don't overcomplicate the problem—here, I erroneously thought it was asking for minimum operations, not possibility. In reality, all this problem asks is: "Are the total number of each factor greater than one divisible by $n$?" - E: dp rust. Simplify your thought process—look back into SRTBOT (i.e. define the subproblem). If the subproblems depend rightward, iterate right to left. That simple. @@ -61,7 +61,7 @@ From now on, prioritize actually _learning_ and problem-solving in a pressurized ~1300 performance. Consistently finishing E, getting F occasionally. -- B: took me way too long because I just wanted to code. Wasted 5 minutes when I know better—patience and a formula got me there in 30 seconds. +- B: took me way too long because I just wanted to code. Wasted 5 minutes when I know better—patience and a formula got me there in 30 seconds. > Impatience is holding me back. Lack of discipline and focus is holding me back. It doesn't just harm my ranking problem-solving ability but it _prevents me from improving_. @@ -69,7 +69,7 @@ From now on, prioritize actually _learning_ and problem-solving in a pressurized - D: misunderstood the problem statement twice. Polya and Zeitz both advise to fully understand exactly what the problem statement is asking, whether through direct analysis or examples. Then, I messed up the brute force. However, I did notice that raw recursion would not TLE based on the fact that each number can be divided in only a few ways. - E: I knew a number didn't have that many divisors but the implementation took me too long. **Pay closer attention to the constraints and just solve the problem.**. Spend a bit more time on implementation, _even if you know a way that works_. Here, I knew an approach to code but it was easily error-prone. Immediately came up with the idea but was unable to express it in code. This means I did not fully understand the problem, namely with some core mistakes:1. The tree it not necessarily binary - C's cannot have children, so tracking depth/available nodes to fill per level is crucial - Coming up with the expression $a=c+1$ (and ensuring the input conforms to that) is a lot easier than cusotm coding logic to ensure levels are filled out properly. The core problem is that I lack the mathematical prowess to be certain of what exactly I need to check after I make that assertion and why. In this case, it means that the number of C's is appropriate, _so I never even need to check them_—I just need to make sure that the rest of the solution is valid. + Coming up with the expression $a=c+1$ (and ensuring the input conforms to that) is a lot easier than cusotm coding logic to ensure levels are filled out properly. The core problem is that I lack the mathematical prowess to be certain of what exactly I need to check after I make that assertion and why. In this case, it means that the number of C's is appropriate, _so I never even need to check them_—I just need to make sure that the rest of the solution is valid. ## [1017 (div. 4)](https://codeforces.com/contest/2094) 14/04/2025 @@ -77,8 +77,8 @@ Decent contest. Lost focus near the end and was not paying attention at the star 1. D: submitted what I knew to be incorrect. Can't account for laziness. 2. E: long long overflow. Laziness. - F: lost focus here and did not prove the correctness. Was confused by the grid—simply breaking down and _experimenting_ in terms of simple patterns would help isolate that a mutation after $m%K==0$ is key. Then, a subsequent rigorous proof of the modulo shift would help.4. G: gave up after a few insights and did not persevere to find the simple mathematical equation for updating the score on reversal although I got the deque intution. **Sometimes, there's nothing to learn besides improving your discipline.** - My math intuition needs to improve. I see something hard (i.e. dividing out a number repeatedly) then think "this is impossible" rather than "this seems hard, but is it feasible? Is it computationally practical?" In this case, I know the solution rests on the fact of only a logarithmic (?) amount of numbers can end up dividing $a$i$$. Time to upsolve later. + F: lost focus here and did not prove the correctness. Was confused by the grid—simply breaking down and _experimenting_ in terms of simple patterns would help isolate that a mutation after $m%K==0$ is key. Then, a subsequent rigorous proof of the modulo shift would help.4. G: gave up after a few insights and did not persevere to find the simple mathematical equation for updating the score on reversal although I got the deque intution. **Sometimes, there's nothing to learn besides improving your discipline.** + My math intuition needs to improve. I see something hard (i.e. dividing out a number repeatedly) then think "this is impossible" rather than "this seems hard, but is it feasible? Is it computationally practical?" In this case, I know the solution rests on the fact of only a logarithmic (?) amount of numbers can end up dividing $a$i$$. Time to upsolve later. ## [799 (div. 4)](https://codeforces.com/contest/1692) 10/04/2025 @@ -145,7 +145,7 @@ Div. 4 to practice implemenation skills + mathematical observations thanks to [P - B: typo, costing a few minutes. Go slower. Declare variables. Think consistently through approach. - C: paused an found a general implementation as Zeitz advises (starting with a general, non-mathematical solution: "The solution is the earliest to get either both 1s at once or each over two strings"). **I still rushed**, incorrectly computing the result as `min(first, second)` instead of `first + second`. -- D: I looked up the recurrence relation $T(n)=T(n/3)+T(2n/3)$ to ensure it was sub-linear time. +- D: I looked up the recurrence relation $T(n)=T(n/3)+T(2n/3)$ to ensure it was sub-linear time. Gain the mathematical skills to both analyze and derive (i.e. if you forget) recurrence relations. @@ -178,10 +178,10 @@ I also just plug in $ceil$ and $floor$ until I find the right answer (I'm not ly _Everything I did here was wrong and this problem showed an embarrassingly fundamental flaw in my practice strategy._ - Namely, I should divide up practice time into: +Namely, I should divide up practice time into: - 1. Contests, emphasizing speed and implementation - 2. Single problems, emphasizing specific learning objectives +1. Contests, emphasizing speed and implementation +2. Single problems, emphasizing specific learning objectives In this problem, I immediately saw the application of the lazy segment tree but decided to hold off on it, failing to find the simpler prefix-XOR solution. Therefore, I not only wasted my time, but also cemented in unrealistic practice (I would never do this in a real contest) and worsened my virtual contest performance. As for the prefix-XOR solution, focusing on just one/zero corresponding elements and **walking through small examples** (i.e. "what happens when $l=r$?") would've help me pick up the pattern. diff --git a/src/content/posts/algorithms/extrema-circular-buffer.mdx b/src/content/posts/algorithms/extrema-circular-buffer.mdx index 22c7184..0bc2878 100644 --- a/src/content/posts/algorithms/extrema-circular-buffer.mdx +++ b/src/content/posts/algorithms/extrema-circular-buffer.mdx @@ -1,6 +1,6 @@ --- title: "extrema circular buffer" -date: "2024-07-30" +date: "30/07/2024" useKatex: true --- diff --git a/src/content/posts/algorithms/leetcode-daily.mdx b/src/content/posts/algorithms/leetcode-daily.mdx index cf281b4..832aefe 100644 --- a/src/content/posts/algorithms/leetcode-daily.mdx +++ b/src/content/posts/algorithms/leetcode-daily.mdx @@ -1,44 +1,73 @@ --- title: "leetcode daily" -date: "2024-04-13" +date: "04/13/2024" useKatex: true --- -## [count good numbers](https://leetcode.com/problems/count-good-numbers/submissions/1605647445/?envType=daily-question&envId=2025-04-13) 04/13/2024 +# [count good numbers](https://leetcode.com/problems/count-good-numbers/submissions/1605647445/?envType=daily-question&envId=2025-04-13) 04/13/2024 -### understanding the problem +## understanding the problem -p is a combinatoric problem at heart. You have some slots for evens and some for primes, with a limited number of choices for each. Leverage the multiplication rule, which states that if you have $n$ slots with $x$ choices, you get $x^n$ possible outcomes. +This is a combinatoric problem at heart. You have some slots for evens and some for primes, with a limited number of choices for each. Leverage the multiplication rule, which states that if you have $n$ slots with $x$ choices, you get $x^n$ possible outcomes. -### doing it +## doing it -So, what's the answer? If we know which slots we have and the number of choices for them, we're done. Since this is leetcode, they don't let you think—they just give you the answer. You have 2 types of slots (even and odd indices) with 5 (${0,2,4,6,8}$) and 4 (${2,3,5,7}$) choices respectively. Therefore, the answer is: $5^{\text{# even slots}}\cdot4^{\text{# odd slots}}$ By counting or with small cases, we have $\lceil\frac{n}{2}\rceil$ even slots and $\lfloor\frac{n}{2}\rfloor$ odd slots. Let's submit it!And.... TLE. Checking _everything_ when you submit your code—in this case, constraint $n\leq 10^{16}$ informs us of something suspect. In the worst case, $\frac{n}{2}\approx n^{14}$. This is far too many multiplications, so we can leverage binary exponentiation instead (and probably should've been the whole time!). Don't forget the mod. +So, what's the answer? If we know which slots we have and the number of choices for them, we're done. Since this is leetcode, they don't let you think—they just give you the answer. You have 2 types of slots (even and odd indices) with 5 (${0,2,4,6,8}$) and 4 (${2,3,5,7}$) choices respectively. Therefore, the answer is: $5^{\text{# even slots}}\cdot4^{\text{# odd slots}}$ By counting or with small cases, we have $\lceil\frac{n}{2}\rceil$ even slots and $\lfloor\frac{n}{2}\rfloor$ odd slots. Let's submit it! -## [minimum number of operations to make array distinct](https://leetcode.com/problems/minimum-number-of-operations-to-make-elements-in-array-distinc) 04/09/2024 +And.... TLE. Checking _everything_ when you submit your code—in this case, constraint $n\leq 10^{16}$ informs us of something suspect. In the worst case, $\frac{n}{2}\approx n^{14}$. This is far too many multiplications, so we can leverage binary exponentiation instead (and probably should've been the whole time!). Don't forget the mod. -### understanding the problem +```cpp +class Solution { +public: + static constexpr long long MOD = 1e9 + 7; + long long mpow(long long a, long long b, long long mod=MOD) { + long long ans = 1; + while (b > 0) { + if (b & 1) { + ans = (ans * a) % MOD; + } + a = (a * a) % MOD; + b >>= 1; + } + return ans; + } + int countGoodNumbers(long long n) { + long long even_slots = (n + 1) / 2, odd_slots = n / 2; + return (mpow(5, even_slots) * mpow(4, odd_slots)) % MOD; + } +}; +``` + +# [minimum number of operations to make array distinct](https://leetcode.com/problems/minimum-number-of-operations-to-make-elements-in-array-distinc) 04/09/2024 + +## understanding the problem You can remove elements in groups of 3 _solely_ from the beginning of the array. Perform this operation until there are no more duplicates left, returning the number of times you had to perform the operation. -### solution: rephrase the question +## solution: rephrase the question Definitionally, you remove the _last_ duplicate. If such duplicate is at 0-indexed `i`, it belongs to the $\lceil \frac{i + 1}{3}\rceil$th chunk of 3 (i.e. operation). Find the last duplicate by leveraging a frequency map and iterating backwards through the input. -### asymptotic complexity +## asymptotic complexity The solution is optimal, considering the least amount of elements possible in: -Time Complexity: $O(n)$ Space Complexity: $\Theta(1)$ +- Time Complexity: $O(n)$ +- Space Complexity: $\Theta(1)$ -## [count the number of fair pairs](https://leetcode.com/problems/count-the-number-of-fair-pairs/) 09/13/2024 +# [count the number of fair pairs](https://leetcode.com/problems/count-the-number-of-fair-pairs/) 09/13/2024 -### problem statement +## problem statement -Given an array `nums` of integers and upper/lower integer bounds `upper`/`lower` respectively, return the number of unique valid index pairs such that: $i\neq j,lower\leq nums$i$+nums$j$\leq upper$ +Given an array `nums` of integers and upper/lower integer bounds `upper`/`lower` respectively, return the number of unique valid index pairs such that: -### understanding the problem +$$i\neq j,lower\leq nums[i]+nums[j]\leq upper$$ -This is another sleeper daily in which a bit of thinking in the beginning pays dividends. Intuitively, I think it makes sense to reduce the “dimensionality” of the problem. Choosing both `i` and `j` concurrently seems tricky, so let's assume we've found a valid `i`. What must be true? Well: $i\neq j,lower-nums_i\leq nums_j\leq upper-nums_i$ +## understanding the problem + +This is another sleeper daily in which a bit of thinking in the beginning pays dividends. Intuitively, I think it makes sense to reduce the “dimensionality” of the problem. Choosing both `i` and `j` concurrently seems tricky, so let's assume we've found a valid `i`. What must be true? Well: + +$$i\neq j,lower-nums[i]\leq nums[j]\leq upper-nums[i]$$ It doesn't seem like we've made much progress. If nums is a sequence of random integers, _there's truly no way to find all `j` satisfying this condition efficiently_. @@ -50,54 +79,94 @@ So, it would be nice to sort `nums` to find such `j` relatively quickly. However Let's consider our solution a bit more before implementing it: -Is the approach feasible? We're sorting `nums` then binary searching over it considering all `i`, which will take around $O(nlg(n))$ time. `len(nums)`$\leq10^5$, so this is fine.* How do we avoid double-counting? The logic so far makes no effort. If we consider making all pairs with indices *less than* `i` for all `i` left-to-right, we'll be considering all valid pairs with no overlap. This is a common pattern—take a moment to justify it to yourself. -*Exactly* how many elements do we count? Okay, we're considering some rightmost index `i` and we've found upper and lower index bounds `j` and `k` respectively. We can pair `nums[j]` with all elements up to an including `nums[k]` (besides `nums[j]`). There are exactly $k-j$ of these. If the indexing confuses you, draw it out and prove it to yourself.* How do we get our final answer? Accumulate all `k-j` for all `i`. +- Is the approach feasible? We're sorting `nums` then binary searching over it considering all `i`, which will take around $O(nlg(n))$ time. `len(nums)`$\leq10^5$, so this is fine. +- How do we avoid double-counting? The logic so far makes no effort. If we consider making all pairs with indices _less than_ `i` for all `i` left-to-right, we'll be considering all valid pairs with no overlap. This is a common pattern—take a moment to justify it to yourself. +- _Exactly_ how many elements do we count? Okay, we're considering some rightmost index `i` and we've found upper and lower index bounds `j` and `k` respectively. We can pair `nums[j]` with all elements up to an including `nums[k]` (besides `nums[j]`). There are exactly $k-j$ of these. If the indexing confuses you, draw it out and prove it to yourself.\* How do we get our final answer? Accumulate all `k-j` for all `i`. -### carrying out the plan +## carrying out the plan The following approach implements our logic quite elegantly and directly. The third and fourth arguments to the `bisect` calls specify `lo` (inclusive) and `hi` (exclusive) bounds for our search space, mirroring the criteria that we search across all indices $\lt i$. -### optimizing the approach +```python +def countFairPairs(self, nums, lower, upper): + nums.sort() + ans = 0 + + for i, num in enumerate(nums): + k = bisect_left(nums, lower - num, 0, i) + j = bisect_right(nums, upper - num, 0, i) + + ans += k - j + + return ans +``` + +## optimizing the approach If we interpret the criteria this way, the above approach is relatively efficient. To improve this approach, we'll need to reinterpret the constraints. Forget about the indexing and consider the constraint in aggregate. We want to find all $i,j$ with $x=nums$i$+nums$j$$ such that $i\neq j,lower\leq x\leq upper$. We _still_ need to reduce the “dimensionality” of the problem—there are just too many moving parts to consider at once. This seems challening. Let's simplify the problem to identify helpful ideas: pretend `lower` does not exist (and, of course, that `nums` is sorted). -We're looking for all index pairs with sum $\leq upper$. And behold: (almost) two sum in the wild. This can be accomplished with a two-pointers approach—this post is getting quite long so we'll skip over why this is the case—but the main win here is that we can solve this simplified version of our problem in $O(n)$.Are we any closer to actually solving the problem? Now, we have the count of index pairs $\leq upper$. Is this our answer? No—some may be too small, namely, with sum $\lt lower$. Let's exclude those by running our two-pointer approach with and upper bound of $lower-1$ (we want to include $lower$). Now, our count reflects the total number of index pairs with a sum in our interval bound. +We're looking for all index pairs with sum $\leq upper$. And behold: (almost) two sum in the wild. This can be accomplished with a two-pointers approach—this post is getting quite long so we'll skip over why this is the case—but the main win here is that we can solve this simplified version of our problem in $O(n)$. + +Are we any closer to actually solving the problem? Now, we have the count of index pairs $\leq upper$. Is this our answer? No—some may be too small, namely, with sum $\lt lower$. Let's exclude those by running our two-pointer approach with and upper bound of $lower-1$ (we want to include $lower$). Now, our count reflects the total number of index pairs with a sum in our interval bound. Note that this really is just running a prefix sum/using the “inclusion-exclusion” principle/however you want to phrase it. -### some more considerations +```python +def countFairPairs(self, nums, lower, upper): + nums.sort() + ans = 0 + + def pairs_leq(x: int) -> int: + pairs = 0 + l, r = 0, len(nums) - 1 + while l < r: + if nums[l] + nums[r] <= x: + pairs += r - l + l += 1 + else: + r -= 1 + return pairs + + return pairs_leq(upper) - pairs_leq(lower - 1) +``` + +## some more considerations The second approach is _asymptotically_ equivalent. However, it's still worth considering for two reasons: 1. If an interviewer says “assume `nums` is sorted” or “how can we do better?”—you're cooked. 2. (Much) more importantly, it's extremely valuable to be able to _reconceptualize_ a problem and look at it from different angles. Not being locked in on a solution shows perseverance, curiosity, and strong problem-solving abilities. -### asymptotic complexity +## asymptotic complexity -Time Complexity: $O(nlg(n))$ for both—$O(n)$ if `nums` is sorted with respect to the second approach.Space Complexity: $\Theta(1)$ for both. +- Time Complexity: $O(nlg(n))$ for both—$O(n)$ if `nums` is sorted with + respect to the second approach. +- Space Complexity: $\Theta(1)$ for both. -## [most beautiful item for each query](https://leetcode.com/problems/most-beautiful-item-for-each-query/description/) 09/12/2024 +# [most beautiful item for each query](https://leetcode.com/problems/most-beautiful-item-for-each-query/description/) 09/12/2024 -### problem statement +## problem statement Given an array `items` of $(price, beauty)$ tuples, answer each integer query of $queries$. The answer to some `query[i]` is the maximum beauty of an item with $price\leq$`items[i][0]`. -### understanding the problem +## understanding the problem Focus on one aspect of the problem at a time. To answer a query, we need to have considered: 1. Items with a non-greater price 2. The beauty of all such items -Given some query, how can we _efficiently_ identify the “last” item with an acceptable price? Leverage the most common pre-processing algorithm: sorting. Subsequently, we can binary search `items` (keyed by price, of course) to identify all considerable items in $O(lg(n))$.Great. Now we need to find the item with the largest beauty. Naïvely considering all the element is a _correct_ approach—but is it correct? Considering our binary search $O(lg(n))$ and beauty search $O(n)$ across $\Theta(n)$ queries with `len(items)<=len(queries)`$\leq10^5$, an $O(n^2lg(n))$ approach is certainly unacceptable. +Given some query, how can we _efficiently_ identify the “last” item with an acceptable price? Leverage the most common pre-processing algorithm: sorting. Subsequently, we can binary search `items` (keyed by price, of course) to identify all considerable items in $O(lg(n))$. + +Great. Now we need to find the item with the largest beauty. Naïvely considering all the element is a _correct_ approach—but is it correct? Considering our binary search $O(lg(n))$ and beauty search $O(n)$ across $\Theta(n)$ queries with `len(items)<=len(queries)`$\leq10^5$, an $O(n^2lg(n))$ approach is certainly unacceptable. Consider alternative approaches to responding to our queries. It is clear that answering them in-order yields no benefit (i.e. we have to consider each item all over again, per query)—could we answer them in another order to save computations? Visualizing our items from left-to-right, we's interested in both increasing beauty and prices. If we can scan our items left to right, we can certainly “accumulate” a running maximal beauty. We can leverage sorting once again to answer our queries left-to-right, then re-order them appropriately before returning a final answer. Sorting both `queries` and `items` with a linear scan will take $O(nlg(n))$ time, meeting the constraints. -### carrying out the plan +## carrying out the plan A few specifics need to be understood before coding up the approach: @@ -105,17 +174,51 @@ A few specifics need to be understood before coding up the approach: - The linear scan: accumulate a running maximal beauty, starting at index `0`. For some query `query`, we want to consider all items with price less than or equal to `query`. Therefore, loop until this condition is _violated_— the previous index will represent the last considered item. - Edge cases: it's perfectly possible the last considered item is invalid (consider a query cheaper than the cheapest item). Return `0` as specified by the problem constraints. -### asymptotic complexity +```cpp +std::vector maximumBeauty(std::vector>& items, std::vector& queries) { + std::sort(items.begin(), items.end()); + std::vector> sorted_queries; + sorted_queries.reserve(queries.size()); + for (size_t i = 0; i < queries.size(); ++i) { + sorted_queries.emplace_back(queries[i], i); + } + std::sort(sorted_queries.begin(), sorted_queries.end()); -Let `n=len(items)` and `m=len(queries)`. There may be more items than queries, or vice versa. Note that a “looser” upper bound can be found by analyzing the runtime in terms of $max\{n,m\}$.Time Complexity: $O(nlg(n)+mlg(m)+m)\in O(nlg(n)+mlg(m))$. An argument can be made that because `queries[i],items[i][{0,1}]`$\leq10^9$, radix sort can be leveraged to achieve a time complexity of $O(d \cdot (n + k + m + k))\in O(9\cdot (n + m))\in O(n+m)$.Space Complexity: $\Theta(1)$, considering that $O(m)$ space must be allocated. If `queries`/`items` cannot be modified in-place, increase the space complexity by $m$/$n$ respectively. + int beauty = items[0][1]; + size_t i = 0; + std::vector ans(queries.size()); -## [shortest subarray with or at least k ii](https://leetcode.com/problems/shortest-subarray-with-or-at-least-k-ii/description/) 09/11/2024 + for (const auto [query, index] : sorted_queries) { + while (i < items.size() && items[i][0] <= query) { + beauty = std::max(beauty, items[i][1]); + ++i; + } + ans[index] = i > 0 && items[i - 1][0] <= query ? beauty : 0; + } -### problem statement + return ans; +} +``` + +## asymptotic complexity + +Let `n=len(items)` and `m=len(queries)`. There may be more items than queries, or vice versa. Note that a “looser” upper bound can be found by analyzing the runtime in terms of $max\{n,m\}$. + +- Time Complexity: $O(nlg(n)+mlg(m)+m)\in O(nlg(n)+mlg(m))$. An argument + can be made that because `queries[i],items[i][{(0, 1)}]`$\leq10^9$, radix sort + can be leveraged to achieve a time complexity of $O(d \cdot (n + k + m + + k))\in O(9\cdot (n + m))\in O(n+m)$. +- Space Complexity: $\Theta(1)$, considering that $O(m)$ space must be + allocated. If `queries`/`items` cannot be modified in-place, increase the + space complexity by $m$/$n$ respectively. + +# [shortest subarray with or at least k ii](https://leetcode.com/problems/shortest-subarray-with-or-at-least-k-ii/description/) 09/11/2024 + +## problem statement Given an array of non-negative integers $num$ and some $k$, find the length of the shortest non-empty subarray of nums such that its element-wise bitwise OR is greater than or equal to $k$—return -1 if no such array exists. -### developing an approach +## developing an approach Another convoluted, uninspired bitwise-oriented daily. @@ -129,35 +232,68 @@ Now, how many times do we remove? While the element-wise bitwise OR of `xs` is $ This approach is generally called a variable-sized “sliding window”. Every element of `nums` is only added (considered in the element-wise bitwise OR) or removed (discard) one time, yielding an asymptotically linear time complexity. In other words, this is a realistic approach for our constraints. -### carrying out the plan +## carrying out the plan Plugging in our algorithm to my sliding window framework: +```python +def minimumSubarrayLength(self, nums, k): + # provide a sentinel for "no window found" + ans = sys.maxsize + window = deque() + l = 0 + + # expand the window by default + for r in range(len(nums)): + # consider `nums[r]` + window.append(nums[r]) + # shrink window while valid + while l <= r and reduce(operator.or_, window) >= k: + ans = min(ans, r - l + 1) + window.popleft() + l += 1 + + # normalize to -1 as requested + return -1 if ans == sys.maxsize else ans +``` + Done, right? No. TLE. If you thought this solution would work, you move too fast. Consider _every_ aspect of an algorithm before implementing it. In this case, we (I) overlooked one core question: -1. _How do we maintain our element-wise bitwise OR_? +> How do we maintain our element-wise bitwise OR? -Calculating it by directly maintaining a window of length $n$ takes $n$ time—with a maximum window size of $n$, this solution is $O(n^2)$.Let's try again. Adding an element is simple—OR it to some cumulative value. Removing an element, not so much. Considering some $x$ to remove, we only unset one of its bits from our aggregated OR if it's the “last” one of these bits set across all numbers contributing to our aggregated value.Thus, to maintain our aggregate OR, we want to map bit “indices” to counts. A hashmap (dictionary) or static array will do just find. Adding/removing some $x$ will increment/decrement each the counter's bit count at its respective position. I like to be uselessly specific sometimes—choosing the latter approach, how big should our array be? As many bits as represented by the largest of $nums$—(or $k$ itself): $\lfloor \lg({max\{nums,k \})}\rfloor+1$ +Calculating it by directly maintaining a window of length $n$ takes $n$ time—with a maximum window size of $n$, this solution is $O(n^2)$. + +Let's try again. Adding an element is simple—OR it to some cumulative value. Removing an element, not so much. Considering some $x$ to remove, we only unset one of its bits from our aggregated OR if it's the “last” one of these bits set across all numbers contributing to our aggregated value. + +Thus, to maintain our aggregate OR, we want to map bit “indices” to counts. A hashmap (dictionary) or static array will do just find. Adding/removing some $x$ will increment/decrement each the counter's bit count at its respective position. I like to be uselessly specific sometimes—choosing the latter approach, how big should our array be? As many bits as represented by the largest of $nums$—(or $k$ itself): + +$$\lfloor \lg({max\{nums,k \})}\rfloor+1$$ Note that: -Below we use the [change of base formula for logarithms](https://artofproblemsolving.com/wiki/index.php/Change_of_base_formula) because $log_2(x)$ is not available in python.It's certainly possible that $max\{nums, k\}=0$. To avoid the invalid calculation $log(0)$, take the larger of $1$ and this calculation. The number of digits will then (correctly) be $1$ in this special case. +1. Below we use the [change of base formula for logarithms](https://artofproblemsolving.com/wiki/index.php/Change_of_base_formula) because $log_2(x)$ is not available in python. +2. It's certainly possible that $max\{nums, k\}=0$. To avoid the invalid calculation $log(0)$, take the larger of $1$ and this calculation. The number of digits will then (correctly) be $1$ in this special case. -### asymptotic complexity +## asymptotic complexity -Note that the size of the frequency map is bounded by $lg\_{2}({10^9})\approx30$.Space Complexity: Thus, the window uses $O(1)$ space.Time Complexity: $\Theta($`len(nums)`$)$ —every element of `nums` is considered at least once and takes $O(1)$ work each to find the element-wise bitwise OR. +Note that the size of the frequency map is bounded by $lg_{2}({10^9})\approx30$. -## [minimum array end](https://leetcode.com/problems/minimum-array-end/) 09/10/2024 +- Space Complexity: Thus, the window uses $O(1)$ space. +- Time Complexity: $\Theta($`len(nums)`$)$ —every element of `nums` is + considered at least once and takes $O(1)$ work each to find the element-wise + bitwise OR. -### problem statement +# [minimum array end](https://leetcode.com/problems/minimum-array-end/) 09/10/2024 + +## problem statement Given some $x$ and $n$, construct a strictly increasing array (say `nums` ) of length $n$ such that `nums[0] & nums[1] ... & nums[n - 1] == x` , where `&` denotes the bitwise AND operator. Finally, return the minimum possible value of `nums[n - 1]`. -### understanding the problem +## understanding the problem The main difficulty in this problem lies in understanding what is being asked (intentionally or not, the phrasing is terrible). Some initial notes: @@ -165,21 +301,50 @@ The main difficulty in this problem lies in understanding what is being asked (i - If the element-wise bitwise AND of an array equals `x` if and only if each element has `x`'s bits set—and no other bit it set by all elements - It makes sense to set `nums[0] == x` to ensure `nums[n - 1]` is minimal -### developing an approach +## developing an approach -An inductive approach is helpful. Consider the natural question: “If I had correctly generated `nums[:i]`”, how could I find `nums[i]`? In other words, how can I find the next smallest number such that `nums` 's element-wise bitwise AND is still $x$?Hmm... this is tricky. Let's think of a similar problem to glean some insight: “Given some $x$, how can I find the next smallest number?”. The answer is, of course, add one (bear with me here).We also know that all of `nums[i]` must have at least $x$'s bits set. Therefore, we need to alter the unset bits of `nums[i]`. +An inductive approach is helpful. Consider the natural question: “If I had correctly generated `nums[:i]`”, how could I find `nums[i]`? In other words, _how can I find the next smallest number such that `nums` 's element-wise bitwise AND is still $x$?_ + +Hmm... this is tricky. Let's think of a similar problem to glean some insight: “Given some $x$, how can I find the next smallest number?”. The answer is, of course, add one (bear with me here). + +We also know that all of `nums[i]` must have at least $x$'s bits set. Therefore, we need to alter the unset bits of `nums[i]`. The key insight of this problem is combining these two ideas to answer our question: _Just “add one” to `nums[i - 1]`'s unset bits_. Repeat this to find `nums[n - 1]`. One last piece is missing—how do we know the element-wise bitwise AND is _exactly_ $x$? Because `nums[i > 0]` only sets $x$'s unset bits, every number in `nums` will have at least $x$'s bits set. Further, no other bits will be set because $x$ has them unset. -### carrying out the plan +## carrying out the plan Let's flesh out the remaining parts of the algorithm: - `len(nums) == n` and we initialize `nums[0] == x`. So, we need to “add one” `n - 1` times - How do we carry out the additions? We could iterate $n - 1$ times and simulate them. However, we already know how we want to alter the unset bits of `nums[0]` inductively— (add one) _and_ how many times we want to do this ($n - 1$). Because we're adding one $n-1$ times to $x$'s unset bits (right to left, of course), we simply set its unset bits to those of $n - 1$.The implementation is relatively straightfoward. Traverse $x$ from least-to-most significant bit, setting its $i$th unset bit to $n - 1$'s $i$th bit. Use a bitwise mask `mask` to traverse $x$. +- How do we carry out the additions? We could iterate $n - 1$ times and simulate them. However, we already know how we want to alter the unset bits of `nums[0]` inductively— (add one) _and_ how many times we want to do this ($n - 1$). Because we're adding one $n-1$ times to $x$'s unset bits (right to left, of course), we simply set its unset bits to those of $n - 1$.The implementation is relatively straightfoward. Traverse $x$ from least-to-most significant bit, setting its $i$th unset bit to $n - 1$'s $i$th bit. Use a bitwise mask `mask` to traverse $x$. -### asymptotic complexity +```cpp +long long minEnd(int n, long long x) { + int bits_to_distribute = n - 1; + long long mask = 1; -Space Complexity: $\Theta(1)$—a constant amount of numeric variables are allocated regardless of $n$ and $x$.Time Complexity: in the worst case, may need to traverse the entirety of $x$ to distribute every bit of $n - 1$ to $x$. This occurs if and only if $x$ is all ones ($\exists k\gt 0 : 2^k-1=x$)). $x$ and $n$ have $lg(x)$ and $lg(n)$ bits respectively, so the solution is $O(lg(x) + lg(n))\in O(log(xn))$. $1\leq x,n\leq 1e8$, so this runtime is bounded by $O(log(1e8^2))\in O(1)$. + while (bits_to_distribute > 0) { + if ((x & mask) == 0) { + if ((bits_to_distribute & 1) == 1) + x |= mask; + bits_to_distribute >>= 1; + } + mask <<= 1; + } + + return x; +} +``` + +## asymptotic complexity + +- Space Complexity: $\Theta(1)$—a constant amount of numeric variables + are allocated regardless of $n$ and $x$. +- Time Complexity: in the worst case, may need to traverse the entirety + of $x$ to distribute every bit of $n - 1$ to $x$. This occurs if and only if + $x$ is all ones ($\exists k\gt 0 : 2^k-1=x$)). $x$ and $n$ have $lg(x)$ and + $lg(n)$ bits respectively, so the solution is $O(lg(x) + lg(n))\in + O(log(xn))$. $1\leq x,n\leq 1e8$, so this runtime is bounded by + $O(log(1e8^2))\in O(1)$. diff --git a/src/content/posts/algorithms/models-of-production.mdx b/src/content/posts/algorithms/models-of-production.mdx index 1ab38ab..e0bb64b 100644 --- a/src/content/posts/algorithms/models-of-production.mdx +++ b/src/content/posts/algorithms/models-of-production.mdx @@ -1,6 +1,6 @@ --- title: "models of production" -date: "2024-06-22" +date: "22/06/2024" useKatex: true --- @@ -22,9 +22,9 @@ With: In this simple model, the following statements describe the economy: 1. Output is either saved or consumed; in other words, savings equals investment -2. Capital accumulates according to investment $I_t$ and depreciation $\bar{d}$, beginning with $K_0$ (often called the Law of Capital Motion) -3. Labor $L_t$ is time-independent -4. A savings rate $\bar{s}$ describes the invested portion of total output +2. Capital accumulates according to investment $I_t$ and depreciation $\bar{d}$, beginning with $K_0$ (often called the Law of Capital Motion) +3. Labor $L_t$ is time-independent +4. A savings rate $\bar{s}$ describes the invested portion of total output Including the production function, these four ideas encapsulate the Solow Model: @@ -49,7 +49,13 @@ When investment is completely disincentivized by depreciation (in other words, $ Using this equilibrium condition, it follows that: -$$Y_t^*=\bar{A}{K_t^*}^\alpha\bar{L}^{1-\alpha}$$ $$\rightarrow \bar{d}K_t^*=\bar{s}\bar{A}{K_t^*}^\alpha\bar{L}^{1-\alpha}$$ $$\rightarrow K^*=\bar{L}(\frac{\bar{s}\bar{A}}{\bar{d}})^\frac{1}{1-\alpha}$$ $$\rightarrow Y^*=\bar{A}^\frac{1}{1-\alpha}(\frac{\bar{s}}{\bar{d}})^\frac{\alpha}{1-\alpha}\bar{L}$$ +$$Y_t^*=\bar{A}{K_t^*}^\alpha\bar{L}^{1-\alpha}$$ + +$$\rightarrow \bar{d}K_t^*=\bar{s}\bar{A}{K_t^*}^\alpha\bar{L}^{1-\alpha}$$ + +$$\rightarrow K^*=\bar{L}(\frac{\bar{s}\bar{A}}{\bar{d}})^\frac{1}{1-\alpha}$$ + +$$\rightarrow Y^*=\bar{A}^\frac{1}{1-\alpha}(\frac{\bar{s}}{\bar{d}})^\frac{\alpha}{1-\alpha}\bar{L}$$ Thus, the equilibrium intensive form (output per worker) of both capital and output are summarized as follows: @@ -63,8 +69,8 @@ Using both mathematical intuition and manipulating the visualization above, we f - Capital is influenced by workforce size, TFP, and savings rate - Capital output share's $\alpha$ impact on output is twofold: - 1. Directly through capital quantity - 2. Indirectly through TFP + 1. Directly through capital quantity + 2. Indirectly through TFP - Large deviations in capital from steady-state $K^*$ induce net investments of larger magnitude, leading to an accelerated reversion to the steady-state - Economies stagnate at the steady-state $(K^*,Y^*)$—this model provides no avenues for long-run growth. @@ -86,7 +92,9 @@ The Romer Model provides an answer by both modeling ideas $A_t$ (analagous to TF The Model divides the world into two parts: - Objects: finite resources, like capital and labor in the Solow Model -- Ideas: infinite, [non-rivalrous](https://en.wikipedia.org/wiki/Rivalry_$economics$) items leveraged in production (note that ideas may be [excludable](blank), though) +- Ideas: infinite, + [non-rivalrous](https://en.wikipedia.org/wiki/Rivalry_$economics$) items + leveraged in production (note that ideas may be [excludable](blank), though) The Romer Models' production function can be modelled as: @@ -196,7 +204,7 @@ $$g_K^*=\bar{s}\frac{Y_t^*}{K_t^*}-\bar{d}=g_Y^*\rightarrow K_t^*=\frac{\bar{s}Y Isolating $Y_t^*$, -$$Y_t^*=A_t^* (\frac{\bar{s}Y_t^*}{g_Y^*+\bar{d}})^\alpha ({(1-\bar{l})\bar{L}})^{1-\alpha}$$ +$$Y_t^*=A_t^* (\frac{\bar{s}Y_t^*}{g_Y^*+\bar{d}})^\alpha ({(1-\bar{l})\bar{L}})^{1-\alpha}$$ $$\rightarrow {Y_t^*}^{1-\alpha}=A_t^*(\frac{\bar{s}}{g_Y^*+\bar{d}})^\alpha({(1-\bar{l})\bar{L}})^{1-\alpha}$$ @@ -206,7 +214,7 @@ $$Y_t^*={(A_0(1+\bar{z}\bar{l}\bar{L})^t})^\frac{1}{1-\alpha}(\frac{\bar{s}}{\fr ### analysis -First looking at the growth rate of output, $g*Y^*=\frac{\bar{z}\bar{l}\bar{L}}{1-\alpha}$, idea-driving factors and an increased allocation of labor to output increase the equilibrium Balanced Growth Path—the _level* of long-run growth. Thus, this model captures the influences of both capital and ideas on economic growth. +First looking at the growth rate of output, $g*Y^*=\frac{\bar{z}\bar{l}\bar{L}}{1-\alpha}$, idea-driving factors and an increased allocation of labor to output increase the equilibrium Balanced Growth Path—the \_level\* of long-run growth. Thus, this model captures the influences of both capital and ideas on economic growth. Looking at $Y_t^*$, ideas have both a direct and indirect effect on output. Firstly, ideas raise output because they increase productivity (directly); second, with the introduction of capital stock, ideas also increase capital, in turn increasing output further (indirectly). Mathematically, this is evident in both instances of $g_A^*$ in the formula for output $Y_t^*$—note that $\frac{1}{1-\alpha},\frac{\alpha}{1-\alpha}>0$ for any $\alpha\in(0,1)$, so $\frac{d}{dg_A^*}Y_t^*>0$. diff --git a/src/content/posts/algorithms/practice-makes-perfect.mdx b/src/content/posts/algorithms/practice-makes-perfect.mdx index e9672d1..248bfb6 100644 --- a/src/content/posts/algorithms/practice-makes-perfect.mdx +++ b/src/content/posts/algorithms/practice-makes-perfect.mdx @@ -1,6 +1,6 @@ --- title: "practice makes perfect" -date: "2025-05-07" +date: "05/07/2024" useKatex: true --- diff --git a/src/content/posts/meditations/the-problem-with-cs-curricula.mdx b/src/content/posts/meditations/the-problem-with-cs-curricula.mdx index 7dd822c..7b758cb 100644 --- a/src/content/posts/meditations/the-problem-with-cs-curricula.mdx +++ b/src/content/posts/meditations/the-problem-with-cs-curricula.mdx @@ -1,5 +1,6 @@ --- title: "the problem with cs curricula" +date: "16/05/2025" useKatex: true --- diff --git a/src/content/posts/operating-systems/building-an-os.mdx b/src/content/posts/operating-systems/building-an-os.mdx deleted file mode 100644 index 0061e36..0000000 --- a/src/content/posts/operating-systems/building-an-os.mdx +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: "building an os" -date: "2025-04-15" ---- - -## introduction - -wip diff --git a/src/content/posts/software/designing-this-website.mdx b/src/content/posts/software/designing-this-website.mdx index 8df0340..054cd7d 100644 --- a/src/content/posts/software/designing-this-website.mdx +++ b/src/content/posts/software/designing-this-website.mdx @@ -1,6 +1,6 @@ --- title: "designing this website" -date: "2024-06-18" +date: "18/06/2024" --- ## HTML, JavaScript, and CSS diff --git a/src/content/posts/software/from-github-pages-to-aws.mdx b/src/content/posts/software/from-github-pages-to-aws.mdx index 332d959..008d819 100644 --- a/src/content/posts/software/from-github-pages-to-aws.mdx +++ b/src/content/posts/software/from-github-pages-to-aws.mdx @@ -1,6 +1,6 @@ --- title: "from github pages to AWS" -date: "2024-06-15" +date: "15/06/2024" --- ## pages begone @@ -44,7 +44,7 @@ A user request can be modelled as follows: 4. CloudFront checks its edge caches for the requested content. If the content is stale or not cached, CloudFront fetches the content from S3. Otherwise, it uses the cached content from an edge server. 5. CloudFront returns the content to the user's browser. -![system design of my portfolio website](/public/posts/website-design.webp) +![system design of my portfolio website](/public/posts/from-github-pages-to-aws/website-design.webp) ## difficulties diff --git a/src/content/posts/software/hosting-a-git-server.mdx b/src/content/posts/software/hosting-a-git-server.mdx index 2fc561c..04c7775 100644 --- a/src/content/posts/software/hosting-a-git-server.mdx +++ b/src/content/posts/software/hosting-a-git-server.mdx @@ -1,6 +1,6 @@ --- title: "hosting a git server" -date: "2025-05-07" +date: "07/05/2025" --- ## why @@ -28,8 +28,46 @@ I detail self-hosting a git server on an AWS t2.micro instance ("free" for 1 yea 2. `sudo chgrp -R apache /srv/git` 9. To deal with "dubious ownership" issues when cloning with HTTPS, I needed to add **exactly** the following configuration to `/etc/gitconfig`. _No group permission finagling will work_! Git only allows cloning repositories that are owned by the user. If you wish to clone via SSH with, say, user A, this same user must also be employed by your HTTP server to clone the files (customize HTTPD/whatever you're using accordingly). +```gitconfig +[safe] + directory = * +``` + 10. Security-wise, set up TLS/HTTPS with [Let's Encrypt](https://letsencrypt.org/). Further, only allow authorized people to actually _push_ to the server. The following is my HTTPD configuration file `/etc/apache/conf.d/git-server.conf` hosting the web ui at the root and clone urls at `/git`: +```apacheconf + + ServerName + + SSLEngine on + SSLCertificateFile /etc/letsencrypt/live//fullchain.pem + SSLCertificateKeyFile /etc/letsencrypt/live//privkey.pem + + SetEnv GIT_PROJECT_ROOT /srv/git + SetEnv REMOTE_USER $REDIRECT_REMOTE_USER + + ScriptAlias /git/ /usr/libexec/git-core/git-http-backend/ + + + Options +ExecCGI -MultiViews +SymLinksIfOwnerMatch + Require all granted + AllowOverride None + + + + AuthType Basic + AuthName "Git Access" + AuthUserFile /srv/git/.htpasswd + Require expr !(%{QUERY_STRING} -strmatch '*service=git-receive-pack*' || %{REQUEST_URI} =~ m#/git-receive-pack$#) + Require valid-user + + ProxyPassMatch ^/git/ ! + ProxyPreserveHost On + ProxyPass / http://127.0.0.1:8000/ + ProxyPassReverse / http://127.0.0.1:8000/ + +``` + 11. There are a variety of choices for web ui, including [cgit](https://git.zx2c4.com/cgit/), [gitweb](https://git-scm.com/docs/gitweb) (I do not recommend this—the scripts are ancient and require manual tuning), and some even heavier options that allow for further customization. I am not a fan of viewing code on the web, so you cannot in [my custom ui](https://git.barrettruth.com). I spin up a simple python server to walk the projects in `/srv/git` and configured a systemd service to run it in the ec2 box: ## lessons diff --git a/src/content/posts/software/my-cp-setup.mdx b/src/content/posts/software/my-cp-setup.mdx index adbbb6e..c97b17a 100644 --- a/src/content/posts/software/my-cp-setup.mdx +++ b/src/content/posts/software/my-cp-setup.mdx @@ -1,6 +1,6 @@ --- title: "my cp setup" -date: "2025-04-15" +date: "15/04/2025" --- Source code [here](https://github.com/barrett-ruth/dots/blob/main/nvim/lua/cp.lua). diff --git a/src/layouts/BaseLayout.astro b/src/layouts/BaseLayout.astro index fdfdfec..4435324 100644 --- a/src/layouts/BaseLayout.astro +++ b/src/layouts/BaseLayout.astro @@ -1,6 +1,6 @@ --- -import Header from '../components/Header.astro'; -import Footer from '../components/Footer.astro'; +import Header from "../components/Header.astro"; +import Footer from "../components/Footer.astro"; interface Props { title: string; @@ -9,15 +9,15 @@ interface Props { bodyClass?: string; } -const { - title, - description = "Barrett Ruth's personal website", +const { + title, + description = "Barrett Ruth's personal website", useKatex = false, - bodyClass = "graph-background" + bodyClass = "graph-background", } = Astro.props; --- - + @@ -37,4 +37,4 @@ const {