From f6e28d2f8b657eed21c12142bd487079dc1109bd Mon Sep 17 00:00:00 2001
From: Dinu Blanovschi
Date: Sun, 25 Feb 2024 02:06:09 +0100
Subject: [PATCH 01/20] Quartz sync: Feb 25, 2024, 2:06 AM
---
content/.gitkeep | 0
content/blog/cargo-difftests/index.md | 7 +
.../blog/cargo-difftests/introduction-old.md | 970 ++++++++++++++++++
content/blog/dir-structure.md | 440 ++++++++
content/blog/index.md | 6 +
content/blog/upsilon/index.md | 6 +
content/blog/upsilon/part-1-introduction.md | 96 ++
content/blog/upsilon/part-2-git-over-ssh.md | 46 +
.../blog/upsilon/part-3-upsilon-difftests.md | 173 ++++
content/cargo-difftests/analyze-all.md | 120 +++
.../cargo-difftests/custom-test-harnesses.md | 12 +
content/cargo-difftests/index.md | 40 +
content/cargo-difftests/manual.md | 6 +
content/cargo-difftests/usage.md | 40 +
content/index.md | 6 +
.../prompts/default/getEmailNeg.md | 13 +
.../prompts/default/getEmailPos.md | 13 +
.../textgenerator/prompts/default/getIdeas.md | 12 +
.../prompts/default/getOutline.md | 15 +
.../prompts/default/getParagraph.md | 15 +
.../textgenerator/prompts/default/getTags.md | 14 +
.../prompts/default/getTitles.md | 12 +
.../textgenerator/prompts/default/rewrite.md | 12 +
.../textgenerator/prompts/default/simplify.md | 12 +
.../prompts/default/summarize.md | 12 +
.../prompts/default/summarizeLarge.md | 18 +
.../huggingface/classify-bart-large-mnli.md | 18 +
.../prompts/huggingface/completeTextBloom.md | 11 +
.../prompts/huggingface/summarizeBART.md | 18 +
content/toc.md | 5 +
quartz.config.ts | 6 +-
quartz.layout.ts | 12 +-
quartz/components/Footer.tsx | 4 +-
quartz/static/icon.png | Bin 17368 -> 39442 bytes
34 files changed, 2181 insertions(+), 9 deletions(-)
delete mode 100644 content/.gitkeep
create mode 100644 content/blog/cargo-difftests/index.md
create mode 100644 content/blog/cargo-difftests/introduction-old.md
create mode 100644 content/blog/dir-structure.md
create mode 100644 content/blog/index.md
create mode 100644 content/blog/upsilon/index.md
create mode 100644 content/blog/upsilon/part-1-introduction.md
create mode 100644 content/blog/upsilon/part-2-git-over-ssh.md
create mode 100644 content/blog/upsilon/part-3-upsilon-difftests.md
create mode 100644 content/cargo-difftests/analyze-all.md
create mode 100644 content/cargo-difftests/custom-test-harnesses.md
create mode 100644 content/cargo-difftests/index.md
create mode 100644 content/cargo-difftests/manual.md
create mode 100644 content/cargo-difftests/usage.md
create mode 100644 content/index.md
create mode 100644 content/textgenerator/prompts/default/getEmailNeg.md
create mode 100644 content/textgenerator/prompts/default/getEmailPos.md
create mode 100644 content/textgenerator/prompts/default/getIdeas.md
create mode 100644 content/textgenerator/prompts/default/getOutline.md
create mode 100644 content/textgenerator/prompts/default/getParagraph.md
create mode 100644 content/textgenerator/prompts/default/getTags.md
create mode 100644 content/textgenerator/prompts/default/getTitles.md
create mode 100644 content/textgenerator/prompts/default/rewrite.md
create mode 100644 content/textgenerator/prompts/default/simplify.md
create mode 100644 content/textgenerator/prompts/default/summarize.md
create mode 100644 content/textgenerator/prompts/default/summarizeLarge.md
create mode 100644 content/textgenerator/prompts/huggingface/classify-bart-large-mnli.md
create mode 100644 content/textgenerator/prompts/huggingface/completeTextBloom.md
create mode 100644 content/textgenerator/prompts/huggingface/summarizeBART.md
create mode 100644 content/toc.md
diff --git a/content/.gitkeep b/content/.gitkeep
deleted file mode 100644
index e69de29b..00000000
diff --git a/content/blog/cargo-difftests/index.md b/content/blog/cargo-difftests/index.md
new file mode 100644
index 00000000..79c2b55c
--- /dev/null
+++ b/content/blog/cargo-difftests/index.md
@@ -0,0 +1,7 @@
+---
+title: Cargo difftests blog
+---
+This section of the blog contains a few blog posts which describe the evolution of [[cargo-difftests/index|cargo-difftests]].
+
+- The [[part-3-upsilon-difftests|pre-introduction]] was a post from before `cargo-difftests` was extracted from my other major project at the time, [[blog/upsilon/index|upsilon]], for which it was more or less specifically made.
+- [[introduction-old|The old introduction]] is from when I first announced the tool on r/rust. It has come a *very* long way since then.
\ No newline at end of file
diff --git a/content/blog/cargo-difftests/introduction-old.md b/content/blog/cargo-difftests/introduction-old.md
new file mode 100644
index 00000000..756eaaf4
--- /dev/null
+++ b/content/blog/cargo-difftests/introduction-old.md
@@ -0,0 +1,970 @@
+---
+title: "Cargo difftests: old introduction post"
+date: 2023-02-12
+---
+>[!tip]
+>Edit (2024-02-25): This is only here for historical reasons, and to show how much `cargo-difftests` has changed since it was first released. Although the basic commands are still almost the same, `cargo-difftests` today has some newer commands which provide an easier and nicer interface to interact with.
+
+A few days ago, I wrote a [[part-3-upsilon-difftests|post about upsilon-difftests]], a tool that tells you about the tests that have changed since the last commit / test run, but somewhat tailored to my [[blog/upsilon/index|upsilon]] project. Since then, I thought it would be nice to extract the core functionality into a few separate crates, and make it available for others to use. In this post, I'll give a quick introduction to the `cargo-difftests` crate, and show you how to get the most out of it.
+
+## TL;DR
+
+`cargo-difftests` works with coverage data, and can tell you which tests have changed since the last commit / based on file system mtimes. In the next section, I will go over how it achieves this, but if you just want the guide to set it up, feel free to skip to [the walkthrough section](#walkthrough).
+
+## How does it work?
+
+Similarly to my [[part-3-upsilon-difftests|upsilon-difftests post]], I would like to ask you to familiarize yourself with [rustc's instrumentation-based source coverage](https://doc.rust-lang.org/rustc/instrument-coverage.html) first, since that is the foundation on which `cargo-difftests` is built.
+
+After that, we can get started.
+
+### `cargo-difftests-testclient`
+
+`cargo-difftests-testclient` is a small crate that is used to generate the file system structure that `cargo-difftests` expects.
+
+It takes a bit of information about the test and a directory, and generates a directory structure that looks like this:
+
+Do note that it will delete the directory if it exists, so make sure you don't have anything important in there.
+
+```
+.
+|- self.json
+|- self.profraw
+|- cargo_difftests_version
+|- ....profraw
+```
+
+- `self.json` contains the information that you pass about the tests.
+- `self.profraw` contains the coverage data for the code of the test binary itself.
+- `cargo_difftests_version` contains the version of `cargo-difftests` that generated the directory.
+- `....profraw` being the other profraw files generated by the binaries the test invokes.
+
+## `cargo-difftests`
+
+After we have the directory structure, we can use `cargo-difftests` to figure out if any of the source files involved in the test have changed.
+
+Under the hood, it will call `rust-profdata merge` to merge all the `.profraw` files into a `.profdata` file, and then call `rust-cov export` to get the coverage data from the `.profdata` file. (`rust-profdata` and `rust-cov` come from [`cargo-binutils`](https://github.com/rust-embedded/cargo-binutils))
+
+Optionally, it can export the coverage data json into a smaller "test index", which contains only the information that it actually uses, and nothing more.
+
+In practice (in upsilon), when the exported coverage data is a json file of about 40 MiBs, the test index comes up to about 20 KiBs, so using that for the analysis is a lot faster, almost instant.
+
+After we have some exported coverage data, we can use it to figure out which source files were involved in the test, and then check if any of them have changed.
+
+>[!note]
+> `cargo-difftests` will use the mtime of the `self.json` file to determine when the test was run.
+
+From the coverage data, and depending on the passed `--algo` option, it will go one of three ways, but for the sake of the explanation, we'll just call the modified files "dirty". If any of the files involved in a test were marked as dirty, then so will the test.
+
+#### `--algo=fs-mtime` (default)
+
+This is the simplest algorithm, and it will just check if any of the source files that have been involved in the test have changed since the test was run (by comparing mtimes). So, the set of dirty files is the set of all files from the repository that have been changed since the test was run.
+
+#### `--algo=git-diff-files`
+
+Very similar to `--algo=fs-mtime`, but it will diff the `HEAD` with the worktree to find out which files have changed. Here, the set of dirty files is the set of all files from the repository that have been changed **since the last commit (and not test run)**.
+
+#### `--algo=git-diff-hunks`
+
+This is a more advanced one. Similarly to `--algo=git-diff-files`, it will diff the `HEAD` with the worktree, but instead of just checking if the file was modified, it will look at the hunks that were modified, and it will try to intersect them with the regions from the coverage data. To try to put it mathematically, the set of dirty files is the set of all files that changed since the last commit, **and** each file has to have at least one diff hunk that intersects any of the regions of the coverage data with a `count > 0`.
+
+It has almost the same drawback as `--algo=git-diff-files`, that is, it cannot know the state of the repository at the last test run, just at the last commit, but when that is indeed the case, it will yield the most accurate results out of the three.
+
+In git, a hunk is a part of a file that was changed. It's not always a single line, but it can be multiple lines. In `libgit2`, it is identified by a tuple like `(old_line_start, old_line_count, new_line_start, new_line_count)`.
+
+When `old_line_count` is 0, it means that the hunk is an addition, and when `new_line_count` is 0, it means that the hunk is a deletion. In other cases, it's a modification.
+
+The way this algorithm works is that it will intersect (old_line_start..old_line_start + old_line_count) with the regions from the coverage data, and if there is an intersection, it will mark the file as dirty.
+
+> **This only works if the file was not modified since the last commit when the tests were run.**
+
+##### `--algo=git-diff-hunks` vs. `--algo=git-diff-files`
+
+`--algo=git-diff-hunks` will be more accurate than `--algo=git-diff-files`, assuming the last test run was right after the last commit, when none of the source files have been changed (that is, the worktree was clean), but in the case that the worktree of the repository was dirty when the test was run, `--algo=git-diff-hunks` would ~~catastrophically~~ fail, giving false positives **and** false negatives, while `--algo=git-diff-files` would still yield somewhat correct results, although with a few false positives.
+
+This is just their overview, and we will compare how those algorithms work in practice in the walkthrough, including how `--algo=git-diff-hunks` would fail while `--algo=git-diff-files` would still work (~~for now `git-diff-hunks` is completely broken, and I will update this once it is fixed~~ edit: fixed in `0.1.0-alpha.3`).
+
+## Walkthrough
+
+### Prerequisites
+
+>[!NOTE]
+> Needs rust nightly.
+
+`cargo-difftests` uses `rust-profdata` and `rust-cov` from [`cargo-binutils`](https://github.com/rust-embedded/cargo-binutils) under the hood, so you will need to install that first, along with the llvm-tools themselves:
+
+```bash
+rustup component add llvm-tools-preview
+cargo install cargo-binutils
+```
+
+Now, to install `cargo-difftests`:
+
+```bash
+cargo install cargo-difftests --version 0.1.0-alpha.3
+```
+
+### Setup
+
+Let us start with a new project:
+
+```bash
+cargo new --bin cargo-difftests-sample-project
+```
+
+We will create a new profile called `difftests`, that will use code coverage:
+
+```toml
+# .cargo/config.toml
+[profile.difftests]
+inherits = "dev"
+rustflags = [
+ "-C", "instrument-coverage", # flag required for instrumentation-based code coverage
+ "--cfg", "cargo_difftests", # cfg required for cargo-difftests-testclient,
+ # more on it in a second
+]
+
+[unstable]
+profile-rustflags = true
+```
+
+If we just run `cargo run --profile difftests`, we will get:
+
+```bash
+> cargo run --profile difftests -q
+Hello, world!
+```
+
+And if we `ls`, we should have a `.profraw` file:
+
+```bash
+> ls
+Cargo.lock default_8281569816464993346_0_147888.profraw target/
+Cargo.toml src/
+```
+
+We don't need it, so feel free to delete it:
+
+```bash
+rm default_*.profraw
+```
+
+Great! Now, let's add some functions we can test:
+
+```rust
+// src/lib.rs
+
+pub fn add(a: i32, b: i32) -> i32 {
+ a + b
+}
+
+pub fn sub(a: i32, b: i32) -> i32 {
+ a - b
+}
+
+pub mod advanced_arithmetic;
+
+pub use advanced_arithmetic::*;
+```
+
+```rust
+// src/advanced_arithmetic.rs
+
+pub fn mul(a: i32, b: i32) -> i32 {
+ a * b
+}
+
+pub fn div_unchecked(a: i32, b: i32) -> i32 {
+ a / b
+}
+
+pub fn div(a: i32, b: i32) -> Option {
+ if b != 0 {
+ Some(div_unchecked(a, b))
+ } else {
+ None
+ }
+}
+```
+
+And now we can add some tests:
+
+```rust
+// tests/tests.rs
+
+use cargo_difftests_sample_project::*;
+
+#[test]
+fn test_add() {
+ assert_eq!(add(1, 2), 3);
+}
+
+#[test]
+fn test_sub() {
+ assert_eq!(sub(3, 2), 1);
+}
+
+#[test]
+fn test_mul() {
+ assert_eq!(mul(2, 3), 6);
+}
+
+#[test]
+fn test_div() {
+ assert_eq!(div(6, 3), Some(2));
+}
+
+#[test]
+fn test_div_2() {
+ assert_eq!(div(6, 0), None);
+}
+```
+
+Running them right now gives us:
+
+```rust
+> cargo t --profile difftests
+ Compiling cargo-difftests-sample-project v0.1.0 (C:\Users\Dinu\samples\cargo-difftests-sample-project)
+ Finished difftests [unoptimized + debuginfo] target(s) in 0.66s
+ Running unittests src\lib.rs (target\difftests\deps\cargo_difftests_sample_project-0fa293eef4b2f5f9.exe)
+
+running 0 tests
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
+
+ Running unittests src\main.rs (target\difftests\deps\cargo_difftests_sample_project-3c5054455458f422.exe)
+
+running 0 tests
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
+
+ Running tests\tests.rs (target\difftests\deps\tests-53cb4ce840823521.exe)
+
+running 5 tests
+test test_add ... ok
+test test_div ... ok
+test test_sub ... ok
+test test_mul ... ok
+test test_div_2 ... ok
+
+test result: ok. 5 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
+
+ Doc-tests cargo-difftests-sample-project
+
+running 0 tests
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.00s
+
+```
+
+And now we should have 3 `.profraw` files:
+
+```bash
+> ls
+Cargo.lock
+Cargo.toml
+default_14538582753082375997_0_149916.profraw
+default_17956391759092769319_0_141152.profraw
+default_323744082823911785_0_145776.profraw
+src/
+target/
+tests/
+```
+
+One came from the unit tests, one from the integration tests in `tests/tests.rs`, and one from the doc tests.
+
+Again, we can go ahead and remove them:
+
+```bash
+rm default_*.profraw
+```
+
+Now, we will need `cargo-difftests-testclient`:
+
+`Cargo.toml`
+
+```toml
+[package]
+name = "cargo-difftests-sample-project"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+
+[dev-dependencies]
+cargo-difftests-testclient = "0.1.0-alpha.3"
+```
+
+And we can go ahead and use it:
+
+`tests/tests.rs`
+
+```rust
+use cargo_difftests_sample_project::*;
+
+fn setup_difftests(test_name: &str) {
+ #[cfg(cargo_difftests)] // the cargo_difftests_testclient crate is empty
+ // without this cfg
+ {
+ // the temporary directory where we will store everything we need.
+ // this should be passed to various `cargo difftests` subcommands as the
+ // `--dir` option.
+ let tmpdir = std::path::PathBuf::from(env!("CARGO_TARGET_TMPDIR"))
+ .join("cargo-difftests").join(test_name);
+ let difftests_env = cargo_difftests_testclient::init(
+ cargo_difftests_testclient::TestDesc {
+ // a "description" of the test.
+ // cargo-difftests doesn't care about what you put here
+ // (except for the bin_path field) but it is your job to use
+ // the data in here to identify the test
+ // and rerun it if needed.
+ // those fields are here to guide you, but you can add any other
+ // fields you might need (see the `other_fields` field below)
+ pkg_name: env!("CARGO_PKG_NAME").to_string(),
+ crate_name: env!("CARGO_CRATE_NAME").to_string(),
+ bin_name: option_env!("CARGO_BIN_NAME").map(ToString::to_string),
+ bin_path: std::env::current_exe().unwrap(),
+ test_name: test_name.to_string(),
+ other_fields: std::collections::HashMap::new(), // any other
+ // fields you might want to add, to identify the test.
+ // (the map is of type HashMap)
+ },
+ &tmpdir,
+ ).unwrap();
+ // right now, the difftests_env is not used, but if
+ // spawning children, it is needed to pass some environment variables to
+ // them, like this:
+ //
+ // cmd.envs(difftests_env.env_for_children());
+ }
+}
+
+#[test]
+fn test_add() {
+ setup_difftests("test_add");
+ assert_eq!(add(1, 2), 3);
+}
+
+#[test]
+fn test_sub() {
+ setup_difftests("test_sub");
+ assert_eq!(sub(3, 2), 1);
+}
+
+#[test]
+fn test_mul() {
+ setup_difftests("test_mul");
+ assert_eq!(mul(2, 3), 6);
+}
+
+#[test]
+fn test_div() {
+ setup_difftests("test_div");
+ assert_eq!(div(6, 3), Some(2));
+}
+
+#[test]
+fn test_div_2() {
+ setup_difftests("test_div_2");
+ assert_eq!(div(6, 0), None);
+}
+```
+
+>[!NOTE]
+>For it to work, you need to run the tests in separate processes. `cargo nextest` does that by default, but if you are using `cargo test`, you will need to do that yourself.
+
+Also, the tests should not exit `::std::process::exit(code)` or `abort()`-style as that will prevent the coverage data from being written to the `.profraw` file.
+
+Now, we can run the tests:
+
+```bash
+cargo t --profile difftests --test tests test_add
+cargo t --profile difftests --test tests test_sub
+cargo t --profile difftests --test tests test_mul
+cargo t --profile difftests --test tests test_div -- --exact
+cargo t --profile difftests --test tests test_div_2
+```
+
+Now, we finally get to invoke `cargo difftests` for the first time:
+
+```bash
+cargo difftests analyze-all # since we used the default directory
+cargo difftests analyze-all --dir target/tmp/cargo-difftests # explicit
+```
+
+We should get something like this:
+
+```json
+[
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_add",
+ "self_profraw": "target/tmp/cargo-difftests\\test_add\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_add\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_add\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_add\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_add",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div_2",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div_2\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div_2\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div_2\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div_2\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div_2",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_mul",
+ "self_profraw": "target/tmp/cargo-difftests\\test_mul\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_mul\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_mul\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_mul\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_mul",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_sub",
+ "self_profraw": "target/tmp/cargo-difftests\\test_sub\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_sub\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_sub\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_sub\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_sub",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ }
+]
+```
+
+As you can see, it's quite verbose, but it's also quite easy to see what's going on. We are only interested in the name of the test and the `verdict`.
+
+Verdict is always either `clean` or `dirty`, and then you can use the `test_desc` to get the name of the test to rerun.
+
+Let's touch the file `src/lib.rs` and see what happens:
+
+```bash
+touch src/lib.rs
+```
+
+`cargo difftests analyze-all`:
+
+```json
+[
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_add",
+ "self_profraw": "target/tmp/cargo-difftests\\test_add\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_add\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_add\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_add\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_add",
+ "other_fields": {}
+ },
+ "verdict": "dirty"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div_2",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div_2\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div_2\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div_2\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div_2\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div_2",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_mul",
+ "self_profraw": "target/tmp/cargo-difftests\\test_mul\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_mul\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_mul\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_mul\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_mul",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_sub",
+ "self_profraw": "target/tmp/cargo-difftests\\test_sub\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_sub\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_sub\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_sub\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_sub",
+ "other_fields": {}
+ },
+ "verdict": "dirty"
+ }
+]
+```
+
+We can see that the `test_add` and `test_sub` tests have the "dirty" `verdict`, while the other tests still have the "clean" verdict. That is because we modified the `src/lib.rs` file (well technically it's still the same, but by mtime rules it is different), and only the `test_add` and `test_sub` tests used code from `src/lib.rs`, while the others didn't. Let's rerun the `test_add` and `test_sub` tests:
+
+```bash
+cargo t --profile difftests --test tests test_addcargo t --profile difftests --test tests test_sub
+```
+
+Analyzing again:
+
+```bash
+cargo difftests analyze-all
+```
+
+Should give us `"verdict": "clean"` for all the tests.
+
+Similarly, if we were to `touch src/advanced_arithmetic.rs`, we would get the "dirty" `verdict` for the `test_mul`, `test_div` and `test_div_2` tests, but `test_add` and `test_sub` would still be "clean".
+
+I mentioned above that `cargo difftests` used the file system mtime by default to determine if a file was modified. This works well in most cases, but it also has 2 other git-diff based algorithms to choose from:
+
+```bash
+cargo difftests analyze-all --algo git-diff-files
+# and
+cargo difftests analyze-all --algo git-diff-hunks
+```
+
+To be able to use them, you need to have a git repository, with at least one commit, so let's initialize one and commit our files:
+
+```bash
+git init
+git add .
+git commit -m "Initial commit"
+```
+
+In both cases, it's recommended to rerun the tests right after each commit, so let's do that:
+
+```bash
+cargo t --profile difftests --test tests test_add
+cargo t --profile difftests --test tests test_sub
+cargo t --profile difftests --test tests test_mul
+cargo t --profile difftests --test tests test_div -- --exact
+cargo t --profile difftests --test tests test_div_2
+```
+
+#### git-diff-files
+
+What this does is explained above, but let's see it in action.
+
+If we analyze:
+
+```bash
+cargo difftests analyze-all --algo git-diff-files
+```
+
+It should give us clean on all tests.
+
+Let's try adding a few empty lines to `src/lib.rs` and analyzing again:
+
+```bash
+cargo difftests analyze-all --algo git-diff-files
+```
+
+Similarly to the mtime algorithm, we get the "dirty" `verdict` for the `test_add` and `test_sub` tests, but the others are still "clean".
+
+Now, if we remove the empty lines that we added and analyze again:
+
+```bash
+git reset --hard HEAD
+cargo difftests analyze-all --algo git-diff-files
+```
+
+We should get the "clean" `verdict` for all the tests.
+
+#### git-diff-hunks
+
+~~Currently broken. _To be done_.~~ edit: fixed in `0.1.0-alpha.3`.
+
+This algorithm is similar to the `git-diff-files` algorithm, but instead of considering the whole file, it looks only at hunks (groups of lines that were modified). If they were touched by a test, then that test should be considered dirty.
+
+> It's highly recommended you go read the explanation of this in the first part of the blog post before deciding to use this, as it is the most error-prone if not used well, yet can be the most accurate out of all of them.
+
+Let's try it out:
+
+```bash
+git reset --hard HEAD # reset to HEAD
+cargo t --profile difftests --test tests test_add -- --exact
+cargo t --profile difftests --test tests test_sub -- --exact
+cargo t --profile difftests --test tests test_mul -- --exact
+cargo t --profile difftests --test tests test_div -- --exact
+cargo t --profile difftests --test tests test_div_2 -- --exact
+```
+
+Let us edit just `advanced_arithmetic::div_unchecked`:
+
+```rust
+// src/advanced_arithmetic.rs
+
+pub fn mul(a: i32, b: i32) -> i32 {
+ a * b
+}
+
+pub fn div_unchecked(a: i32, b: i32) -> i32 {
+ a / b // b is guaranteed to be != 0 // we modified this line
+}
+
+pub fn div(a: i32, b: i32) -> Option {
+ if b != 0 {
+ Some(div_unchecked(a, b))
+ } else {
+ None
+ }
+}
+```
+
+And analyze:
+
+```bash
+cargo difftests analyze-all --algo git-diff-hunks
+```
+
+`test_div` should be the only dirty test, as it is the only one that uses `advanced_arithmetic::div_unchecked`. `test_div_2` is not dirty, because `div_unchecked` is only reached if `b != 0`, and that is not the case in `test_div_2`.
+
+The problems arise when the profiling data was not collected in a clean working tree.
+
+For example, let us perform the following steps:
+
+```bash
+git reset --hard HEAD
+```
+
+Edit file:
+
+```rust
+// src/advanced_arithmetic.rs
+
+pub fn mul(a: i32, b: i32) -> i32 {
+ a * b
+}
+
+// add a few empty lines
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+pub fn div_unchecked(a: i32, b: i32) -> i32 {
+ a / b
+}
+
+pub fn div(a: i32, b: i32) -> Option {
+ if b != 0 {
+ Some(div_unchecked(a, b))
+ } else {
+ None
+ }
+}
+```
+
+Now rerun the tests:
+
+```bash
+cargo t --profile difftests --test tests test_add -- --exact
+cargo t --profile difftests --test tests test_sub -- --exact
+cargo t --profile difftests --test tests test_mul -- --exact
+cargo t --profile difftests --test tests test_div -- --exact
+cargo t --profile difftests --test tests test_div_2 -- --exact
+```
+
+Now if we remove those empty lines, and make `div_unchecked` return `a / b + 1`:
+
+```rust
+// src/advanced_arithmetic.rs
+
+pub fn mul(a: i32, b: i32) -> i32 {
+ a * b
+}
+
+// add a few empty lines here
+pub fn div_unchecked(a: i32, b: i32) -> i32 {
+ a / b + 1
+}
+
+pub fn div(a: i32, b: i32) -> Option {
+ if b != 0 {
+ Some(div_unchecked(a, b))
+ } else {
+ None
+ }
+}
+```
+
+Now if we rerun the analysis:
+
+```bash
+cargo difftests analyze-all --algo git-diff-hunks
+```
+
+```json
+[
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_add",
+ "self_profraw": "target/tmp/cargo-difftests\\test_add\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_add\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_add\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_add\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_add",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_div_2",
+ "self_profraw": "target/tmp/cargo-difftests\\test_div_2\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_div_2\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_div_2\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_div_2\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_div_2",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_mul",
+ "self_profraw": "target/tmp/cargo-difftests\\test_mul\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_mul\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_mul\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_mul\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_mul",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ },
+ {
+ "difftest": {
+ "dir": "target/tmp/cargo-difftests\\test_sub",
+ "self_profraw": "target/tmp/cargo-difftests\\test_sub\\self.profraw",
+ "other_profraws": [],
+ "self_json": "target/tmp/cargo-difftests\\test_sub\\self.json",
+ "profdata_file": "target/tmp/cargo-difftests\\test_sub\\merged.profdata",
+ "exported_profdata_file": "target/tmp/cargo-difftests\\test_sub\\exported.json",
+ "index_data": null
+ },
+ "test_desc": {
+ "pkg_name": "cargo-difftests-sample-project",
+ "crate_name": "tests",
+ "bin_name": null,
+ "bin_path": "C:\\Users\\Dinu\\samples\\cargo-difftests-sample-project\\target\\difftests\\deps\\tests-53cb4ce840823521.exe",
+ "test_name": "test_sub",
+ "other_fields": {}
+ },
+ "verdict": "clean"
+ }
+]
+```
+
+All the tests are considered clean, but that is clearly wrong. This is one of the pitfalls of using `--algo=git-diff-hunks`: It's not accurate when running the tests in a worktree that has uncommitted changes. In this case, `--algo=git-diff-files` would still work, while `--algo=git-diff-hunks` gives flat out incorrect results.
+
+Now the question that would naturally arise:
+
+### Which algorithm should I use?
+
+The answer is: it depends. If you understand and can manage the pitfalls of `git-diff-hunks`, that's the best option. Otherwise, `git-diff-files` is another good option; although it suffers from the same problems as `git-diff-hunks`, they are not as severe. Although it's not as accurate as the `git-diff`-based ones, `fs-mtime` can (almost) _never_ go wrong (_it is actually hard to get it to go wrong_), and is therefore the default, so if you're unsure, just use that.
+
+### Test indexes
+
+The `cargo difftests analyze-all` command can also generate and use test indexes, which are JSON files that contain simpler versions of the extracted profdata files, making subsequent analyze calls a lot faster. In our small sample project, we don't get much of an improvement, but in a larger project it can be significant. For example, in upsilon I got a 23x speedup (from 7s down to 0.3s) when using indexes.
+
+To use them:
+
+```bash
+cargo difftests analyze-all --index-root ... --index-strategy always
+# or
+cargo difftests analyze-all --index-root ... --index-strategy if-available
+```
+
+But note that the `if-available` strategy will only use the index if it exists, and will not generate it if it doesn't.
+
+The `--index-root` argument is the path to the directory where the indexes will be stored.
+
+## Appendix
+
+### Appendix A: Versions
+
+The toolchain used in this guide was `nightly-2023-02-03-x86_64-pc-windows-msvc`.
+
+`cargo-difftests` version: `0.1.0-alpha.3`
+
+### Appendix B: Repository
+
+The repository for this guide can be found [here](https://github.com/dnbln/cargo-difftests-sample-project).
\ No newline at end of file
diff --git a/content/blog/dir-structure.md b/content/blog/dir-structure.md
new file mode 100644
index 00000000..412f878a
--- /dev/null
+++ b/content/blog/dir-structure.md
@@ -0,0 +1,440 @@
+---
+title: Introducing dir-structure
+date: 2023-09-27
+---
+TL;DR: A simple crate to read and write directory structures.
+
+## Motivation
+
+There were a few times when I needed to read or write a directory structure, but I didn't want to use the `std::fs` API directly. If you think about it, it is pretty simple to read, parse and write directory structures, so I went through a lot of crates on `crates.io` that referenced directories, but I didn't really find anything that would do this. So I've decided to write my own.
+
+This blog post serves both as a guide to getting started with `dir-structure`, as well as a bit of documentation on the crate itself.
+
+## Getting started
+
+The crate is available on `crates.io`, so you can just add it to your `Cargo.toml`:
+
+```toml
+[dependencies]
+dir-structure = "0.1"
+```
+
+But before being able to use it, we need to first think of what kind of directory structure we want to model. The particular use case I had in mind was a directory, filled with many other subdirectories, each containing 2 files: `input.txt` and `output.txt`, which would later be used for testing purposes (via a crate like `libtest-mimic` for example), but the directory structure part can be applied to many other use cases.
+
+So an example `tree` would look like this:
+
+```
+root
+├───assignment
+│ input.txt
+│ output.txt
+│
+├───block_with_semis
+│ input.txt
+│ output.txt
+│
+├───call_with_lambdas
+│ input.txt
+│ output.txt
+│
+└───fn
+ input.txt
+ output.txt
+```
+
+We can model it with a simple:
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ // a more advanced example will follow, but
+ // for now we will model it with compile-time
+ // known directories.
+
+ // Notice how nested directories are also supported.
+ assignment: InnerDir,
+ block_with_semis: InnerDir,
+ call_with_lambdas: InnerDir,
+ r#fn: InnerDir,
+}
+
+#[derive(dir_structure::DirStructure)]
+struct InnerDir {
+ #[dir_structure(path = "input.txt")]
+ input: String,
+ #[dir_structure(path = "output.txt")]
+ output: String,
+}
+
+fn main() -> Result<(), Box> {
+ use dir_structure::DirStructureItem;
+ let path = std::path::Path::new("root");
+ let dir = Dir::read(path)?;
+
+ // and now we can access the fields of Dir as they were loaded above.
+ println!("assignment: {} -> {}", dir.assignment.input, dir.assignment.output);
+ println!("block_with_semis: {} -> {}", dir.block_with_semis.input, dir.block_with_semis.output);
+ println!("call_with_lambdas: {} -> {}", dir.call_with_lambdas.input, dir.call_with_lambdas.output);
+ println!("fn: {} -> {}", dir.r#fn.input, dir.r#fn.output);
+
+ // We can even modify them, and then write back the result.
+ dir.assignment.input = "new input".to_string();
+ dir.assignment.output = "new output".to_string();
+ dir.write(path)?;
+
+ Ok(())
+}
+```
+
+## Advanced usage
+
+### Non-compile-time known directories
+
+In the example above, we knew that the `root` directory always had the same 4 subdirectories, but what if we didn't know that? What if we wanted to read a directory structure that had an arbitrary number of subdirectories, but their contents all shared the same structure?
+
+Well, we can do that really easily too:
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ #[dir_structure(path = self)] // will pass the path of the current
+ // directory to the function responsible for reading
+ inner_dirs: dir_structure::DirChildren,
+}
+
+#[derive(dir_structure::DirStructure)]
+struct InnerDir {
+ #[dir_structure(path = "input.txt")]
+ input: String,
+ #[dir_structure(path = "output.txt")]
+ output: String,
+}
+
+fn main() -> Result<(), Box> {
+ use dir_structure::DirStructureItem;
+ let path = std::path::Path::new("root");
+ let dir = Dir::read(path)?;
+
+ // and now we can access the fields of Dir as they were loaded above.
+ for inner_dir in dir.inner_dirs.iter() {
+ // do something with the inner_dir
+ let name: &std::ffi::OsString = inner_dir.file_name();
+ let InnerDir { input, output } = inner_dir.value();
+ println!("{:?}: {} -> {}", name, input, output);
+ }
+
+ Ok(())
+}
+```
+
+Since by default the `DirStructure` derive macro will use a child directory with the name of the field, we have to explicitly tell it that we want to use the current directory instead. We can do that by adding the `#[dir_structure(path = self)]` attribute.
+
+### Lazy reading of directory contents
+
+In the examples above, we read the directory structure immediately, but what if we had so much data that we didn't want to read it all at once? Well, the library also provides a `DeferredRead` type, which only stores the path, and will read the value later when we explicitly ask it to.
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ #[dir_structure(path = self)]
+ inner_dirs: dir_structure::DirChildren,
+}
+
+#[derive(dir_structure::DirStructure)]
+struct InnerDir {
+ // the DeferredRead type is generic over T, so we can use it
+ // for any type that can be read from a file.
+ #[dir_structure(path = "input.txt")]
+ input: dir_structure::DeferredRead,
+ #[dir_structure(path = "output.txt")]
+ output: dir_structure::DeferredRead,
+}
+
+fn main() -> Result<(), Box> {
+ use dir_structure::DirStructureItem;
+ let path = std::path::Path::new("root");
+ let dir = Dir::read(path)?;
+
+ for inner_dir in dir.inner_dirs.iter() {
+ let name: &std::ffi::OsString = inner_dir.file_name();
+ let InnerDir { input, output } = inner_dir.value();
+ let real_input: String =
+ input.perform_read()?; // actually performs the read
+ let real_output: String = output.perform_read()?;
+ println!("{:?}: {} -> {}", name, real_input, real_output);
+ }
+
+ Ok(())
+}
+```
+
+In this example however, we have to explicitly call `perform_read` on the `DeferredRead` values, which is a bit annoying, as it does no caching of the values. So if we wanted to read the same value multiple times, we would have to call `perform_read` multiple times, which would be inefficient. We will explore an alternative in the next section.
+
+### Lazy and cached reading of directory contents
+
+In the previous example, we had to explicitly call `perform_read` on the `DeferredRead`, but if we have to read it multiple times, it would be inefficient. So we can use the `DeferredReadOrOwn` type, which is also able to cache the value, so that we don't have to read it multiple times.
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ #[dir_structure(path = self)]
+ inner_dirs: dir_structure::DirChildren,
+}
+
+#[derive(dir_structure::DirStructure)]
+struct InnerDir {
+ // DeferredReadOrOwn will defer the read until we call
+ // either `get()` or `perform_and_store_read()` on it.
+ #[dir_structure(path = "input.txt")]
+ input: dir_structure::DeferredReadOrOwn,
+ #[dir_structure(path = "output.txt")]
+ output: dir_structure::DeferredReadOrOwn,
+}
+```
+
+In a nutshell, here is the API:
+
+```rust
+#[derive(Debug, Clone, Hash)]
+pub enum DeferredReadOrOwn
+where
+ T: ReadFrom,
+{
+ Own(T),
+ Deferred(DeferredRead),
+}
+
+impl DeferredReadOrOwn
+where
+ T: ReadFrom,
+{
+ /// Gets the value. If it is not already read, it will read it,
+ /// but without saving it.
+ pub fn get(&self) -> Result
+ where
+ T: Clone, // do note that if T is not Clone, we won't be able to
+ // Clone the value if we have already read it.
+ {
+ // ...
+ }
+
+ /// Performs the read and stores the value. If the value is already read,
+ /// it will just return a reference to it.
+ pub fn perform_and_store_read(&mut self) -> Result<&T> {
+ // ...
+ }
+}
+```
+
+### Reading / writing JSON
+
+With the `json` feature, we can also read and write JSON files using `serde_json`.
+
+```toml
+[dependencies]
+dir-structure = { version = "0.1", features = ["json"] }
+serde = { version = "1", features = ["derive"] }
+```
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ #[dir_structure(path = "f.json")]
+ f: dir_structure::json::Json,
+}
+
+#[derive(Debug, serde::Serialize, serde::Deserialize)]
+struct F {
+ a: String,
+ b: String,
+}
+
+fn main() -> Result<(), Box> {
+ use dir_structure::DirStructureItem;
+ let path = std::path::Path::new("root");
+ let dir = Dir::read(path)?;
+
+ let f: &F = &dir.f.0;
+ println!("f: {:?}", f);
+
+ let new_f = F {
+ a: "new a".to_string(),
+ b: "new b".to_string(),
+ };
+ dir.f.0 = new_f;
+ dir.write(path)?;
+
+ // now `f.json` contains the JSON of the `new_f` value.
+
+ Ok(())
+}
+```
+
+## Library internals
+
+The whole library works with 2 building-block traits: `ReadFrom` and `WriteTo`.
+
+```rust
+pub trait ReadFrom {
+ fn read_from(path: &Path) -> Result
+ where
+ Self: Sized;
+}
+
+pub trait WriteTo {
+ fn write_to(&self, path: &Path) -> Result<()>;
+}
+```
+
+The `ReadFrom` trait is implemented for types that can be read from a path, while the `WriteTo` trait is the opposite of that, and is implemented for types that can be written to a path.
+
+They are both implemented for types that represent whole directory structures, as well as for types that represent individual files.
+
+Directory structures are read and written recursively, so if we had a directory structure like in the beginning:
+
+```
+root
+├───assignment
+│ input.txt
+│ output.txt
+│
+├───block_with_semis
+│ input.txt
+│ output.txt
+│
+├───call_with_lambdas
+│ input.txt
+│ output.txt
+│
+└───fn
+ input.txt
+ output.txt
+```
+
+The derive macro for `Dir` and `InnerDir` at the very beginning of this post would generate something along the lines of:
+
+```rust
+struct Dir {
+ assignment: InnerDir,
+ block_with_semis: InnerDir,
+ call_with_lambdas: InnerDir,
+ r#fn: InnerDir,
+}
+
+impl ReadFrom for Dir {
+ fn read_from(&self, path: &std::path::Path) -> dir_structure::Result {
+ let assignment = InnerDir::read_from(&path.join("assignment"))?;
+ let block_with_semis = InnerDir::read_from(&path.join("block_with_semis"))?;
+ let call_with_lambdas = InnerDir::read_from(&path.join("call_with_lambdas"))?;
+ let r#fn = InnerDir::read_from(&path.join("fn"))?;
+ Ok(Self {
+ assignment,
+ block_with_semis,
+ call_with_lambdas,
+ r#fn,
+ })
+ }
+}
+
+impl WriteTo for Dir {
+ fn write_to(&self, path: &std::path::Path) -> dir_structure::Result<()> {
+ self.assignment.write_to(&path.join("assignment"))?;
+ self.block_with_semis.write_to(&path.join("block_with_semis"))?;
+ self.call_with_lambdas.write_to(&path.join("call_with_lambdas"))?;
+ self.r#fn.write_to(&path.join("fn"))?;
+ Ok(())
+ }
+}
+
+impl DirStructure for Dir {}
+
+struct InnerDir {
+ input: String,
+ output: String,
+}
+
+impl ReadFrom for InnerDir {
+ fn read_from(&self, path: &std::path::Path) -> dir_structure::Result {
+ let input = String::read_from(&path.join("input.txt"))?;
+ let output = String::read_from(&path.join("output.txt"))?;
+ Ok(Self { input, output })
+ }
+}
+
+impl WriteTo for InnerDir {
+ fn write_to(&self, path: &std::path::Path) -> dir_structure::Result<()> {
+ self.input.write_to(&path.join("input.txt"))?;
+ self.output.write_to(&path.join("output.txt"))?;
+ Ok(())
+ }
+}
+
+impl DirStructure for InnerDir {}
+```
+
+`ReadFrom` and `WriteTo` are really simple traits, and they are implemented for the following types:
+
+- `String`
+- `Vec`
+- [`FmtWrapper where T: std::fmt::Display + std::str::FromStr`](#fmtwrapper)
+
+### `FmtWrapper`
+
+The `FmtWrapper` type is a newtype around `T`, which implements `ReadFrom` and `WriteTo` using `std::fmt::Display` and `std::str::FromStr`.
+
+It can be used like this:
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ // with_newtype = FmtWrapper basically says that we want to use
+ // FmtWrapper instead of u32 for reading and writing, and then
+ // use a few conversion functions to convert to and from FmtWrapper.
+ #[dir_structure(with_newtype = FmtWrapper)]
+ a: u32,
+ #[dir_structure(with_newtype = FmtWrapper)]
+ b: u32,
+}
+```
+
+### `#[dir_structure(with_newtype = T)]`
+
+The specific traits involved in the conversions are:
+
+```rust
+pub trait NewtypeToInner {
+ type Inner;
+
+ fn into_inner(self) -> Self::Inner;
+}
+
+pub trait FromRefForWriter<'a> {
+ /// The inner type to cast.
+ type Inner: ?Sized;
+ /// The reference type to cast to.
+ type Wr: WriteTo + 'a;
+
+ /// Casts the reference to the inner type to a [`WriteTo`]
+ /// reference type.
+ fn from_ref_for_writer(value: &'a Self::Inner) -> Self::Wr;
+}
+```
+
+`NewtypeToInner` is pretty much straight-forward, but `FromRefForWriter` is a bit more complicated. It is used to convert a reference to the inner type to a type that holds said reference and implements `WriteTo`. Essentially it is a newtype around `&'a Self::Inner` which implements `WriteTo`.
+
+Both of those functions are used when we use a `with_newtype` attribute on a field.
+
+In the general case:
+
+```rust
+#[derive(dir_structure::DirStructure)]
+struct Dir {
+ #[dir_structure(with_newtype = T)]
+ field: U,
+}
+```
+
+The following bounds must be satisfied for the `with_newtype` attribute to work:
+
+- `T: NewtypeToInner`
+- `T: for<'a> FromRefForWriter<'a, Inner = U>`
\ No newline at end of file
diff --git a/content/blog/index.md b/content/blog/index.md
new file mode 100644
index 00000000..a2d4a5d6
--- /dev/null
+++ b/content/blog/index.md
@@ -0,0 +1,6 @@
+---
+title: Blog
+---
+- [[blog/upsilon/index|upsilon]]: Blog posts about building a self-hosted git service written in Rust.
+- [[blog/cargo-difftests/index|cargo-difftests]]: section of my blog dedicated to the evolution of [[cargo-difftests/index|cargo-difftests]].
+- [[dir-structure]]: a post introducing `dir-structure`, a crate to model file system directories as simple Rust structs.
\ No newline at end of file
diff --git a/content/blog/upsilon/index.md b/content/blog/upsilon/index.md
new file mode 100644
index 00000000..3c8bae73
--- /dev/null
+++ b/content/blog/upsilon/index.md
@@ -0,0 +1,6 @@
+---
+title: Upsilon blog
+---
+These blog posts are about [upsilon], my self-hosted git-host service written in Rust. Feel free to go over [[part-1-introduction|its introduction post]], in which a simple git over http server is implemented.
+
+[upsilon]: https://github.com/dnbln/upsilon
\ No newline at end of file
diff --git a/content/blog/upsilon/part-1-introduction.md b/content/blog/upsilon/part-1-introduction.md
new file mode 100644
index 00000000..3b30400f
--- /dev/null
+++ b/content/blog/upsilon/part-1-introduction.md
@@ -0,0 +1,96 @@
+---
+title: "Upsilon part I: Introduction"
+date: 2022-12-02
+---
+So, I've been working on a new self-hosted git server for almost a month now, and I would like to share some of the things I learned along the way, and also document the journey in the form of this section of my blog. This is the first part of a series of posts about [upsilon](https://github.com/dnbln/upsilon), my new git server.
+
+As of right now, it can talk the `git://` and `http://` protocols with the `git` client, allowing for `clone`s, `pull`s and `push`es, which is enough to be among git servers what brainfuck is among programming languages, enough to do everything you would technically _need_ from a git server, but **_very_** (and I do mean **_very_**) **_far off_** from what you would expect from one.
+
+## Why?
+
+I always wanted to write a fair bit of developer tooling. This may be just another failed attempt, or it may be the start of something.
+
+I got to learn a lot along the way, and I don't regret it.
+
+Now that the why is out of the way, let's get to the how.
+
+## How?
+
+So, the stock `git` package in most distributions is enough to be used for a full server, but the binaries that we're interested in specifically are `git-daemon` for the `git://` protocol ([guide](https://git-scm.com/book/en/v2/Git-on-the-Server-Git-Daemon) and [documentation](https://git-scm.com/docs/git-daemon)), and `git-http-backend` for the `http://` protocol ([guide](https://git-scm.com/book/en/v2/Git-on-the-Server-Smart-HTTP) and [documentation](https://git-scm.com/docs/git-http-backend)).
+
+### For the `git://` protocol
+
+The `git-daemon` binary is a daemon that listens on a port (by default `9418`) and handles the necessary communication with git clients that connect to it. It is the easier of the two protocols to set up, as that only involves running the daemon somewhere and ensure it keeps running for as long as we need it.
+
+The guide suggests doing it as a `systemd` service, but the approach I went with was to run it as a child process of the main server process (while still allowing users to disable that and run the daemon manually, should they wish to do so).
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/daemon.rs#L40-L50)With some more code to handle overrides, but that's not that important.[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/daemon.rs#L95-L97)
+
+(`config.get_path()` is the path to the root directory where all the repositories are stored.)
+
+After that, everything we have to do is hold onto that child, until the web server shuts down, and just send it a signal when that happens:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-web/src/git/git_protocol.rs#L45-L52)
+
+But... we are not done, yet...
+
+Every time we create a git repository, we also have to create a magic `git-daemon-export-ok` file, or otherwise the git daemon will refuse to serve it. This is done here:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/lib.rs#L371-L375)
+
+Since there is no authentication on the `git://` protocol, we have to make sure the repository is "public", or otherwise everyone would be able to access it.
+
+Note: although it's actually possible to use the `git://` protocol for pushing, because it's not authenticated that is a very bad idea. If you would like to try it, you can `--enable` the disabled-by-default `git-receive-pack` service (`--enable=git-receive-pack`). But, again, that is a very bad idea. This allows the git daemon to receive packs, which are basically what commits are made out of. This means that _anyone_(and I mean _anyone_ and _everyone_ who can reach the daemon) can now push to any repository under the base path, provided it has the magic `git-daemon-export-ok` file.
+
+### For the `http://` protocol
+
+Now, this is where the _real_ fun begins.
+
+The `git-http-backend` binary is actually a [CGI](https://en.wikipedia.org/wiki/Common_Gateway_Interface) "script". That is basically an executable file which is run whenever we receive a request on some path. The way CGI works is by passing the path, HTTP method, query string and headers to the executable as environment variables, and then give the request body to it through the process' `stdin`. The executable then has to return the headers and then the response body through the process' `stdout`, in a similar format to normal HTTP responses, except for a few minor differences, of which the most important is the "Status:" header, which is used to specify the HTTP status code.
+
+All in all, this is the code for setting up the environment of the CGI script:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/http_backend.rs#L232-L276)
+
+And passing along the request body:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/http_backend.rs#L278-L290)
+
+And then parsing the headers:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-vcs/src/http_backend.rs#L292-L330)
+
+Then everything we have to do is relay the response body, which happens in `upsilon_web`:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-web/src/git/git_http_protocol.rs#L353-L371)
+
+As you can see, the http protocol requires a little more involvement from the web server's side, but it's not that bad. We get to do authenticated requests now, which is a must for a git server, especially for pushing.
+
+Note: the `git-http-backend` binary also looks for the `git-daemon-export-ok` file, and if not present it will reject the request. This can be overridden with the `GIT_HTTP_EXPORT_ALL` environment variable, but I personally would not recommend it.
+
+### Authentication under the `http://` protocol
+
+`git-http-backend` does not handle authentication, it's up to us as the middle man to handle it.
+
+Like most http servers, the process of authentication works like this:
+
+- the client sends an unauthenticated request to the server.
+- the server reads the request. If it decides that the request needs to be authenticated, it sends a `401 Unauthorized` response, with a `WWW-Authenticate` header, which contains the authentication scheme (and some other information).
+- the client reads the response, and if it's a `401` with a `WWW-Authenticate` header, it prompts the user to enter the credentials, then performs the request again, passing the credentials along this time.
+- the server reads the request, and now decides that it's authenticated, so it finally performs the actual request.
+
+That is actually straight-forward enough. `git push`es will have a path that ends in `git-receive-pack` or the query string `service=git-receive-pack`, so we can use that to decide whether we need an `Authorization` header. If we do, and don't have that, we send a `401 Unauthorized` response with the header `WWW-Authenticate: Basic`, which will make the git client prompt the user for a user and password (or ask the credential manager for it, but that's beyond the scope of the server). Then, when the git client performs the request again, we can check the `Authorization` header, and if it's valid, we can finally pass along the request to `git-http-backend`.
+
+Most of this happens in `upsilon_web`:
+
+[Link](https://github.com/dnbln/upsilon/blob/52921f2250612d936cf42652739731ec54fdd8f5/crates/upsilon-web/src/git/git_http_protocol.rs#L435-L463)
+
+`auth_if_user_has_necessary_permissions` will error if the user doesn't have the necessary permissions to perform the request, and if that happens, we send a `401 Unauthorized` response with the `WWW-Authenticate: Basic` header, or a `403 Forbidden` if the user was actually authenticated but doesn't have the necessary permissions to perform the request on a _specific_ repo (e.g. on GitHub you can't push to someone else's repository, unless you were invited as a collaborator, giving you write permissions on that repository).
+
+## Further reading
+
+I really recommend reading through the git book and documentation on setting up `git-daemon` and `git-http-backend`, with an emphasis on the [`git-http-backend` documentation](https://git-scm.com/docs/git-http-backend), as that is what most of the code in this post was based upon.
+
+## Where to go from here?
+
+Now that a fair bit of the "core" git backend is done, it's time to start working on the web frontend.
\ No newline at end of file
diff --git a/content/blog/upsilon/part-2-git-over-ssh.md b/content/blog/upsilon/part-2-git-over-ssh.md
new file mode 100644
index 00000000..344c51c4
--- /dev/null
+++ b/content/blog/upsilon/part-2-git-over-ssh.md
@@ -0,0 +1,46 @@
+---
+title: "Upsilon part II: Hallo ssh"
+date: 2023-01-20
+---
+In the [[part-1-introduction|previous blog post]], I've went over how to implement a simple git server over HTTP.
+
+After a lot more code (138 commits) and yet still not a single line of frontend, here we are again with another post about [upsilon](https://github.com/dnbln/upsilon), this time explaining how write a server to serve as a git remote, over the `ssh://` protocol.
+
+## The age-old question: Why?
+
+Well, `ssh://` is a very common protocol to serve git repositories, and although it is maybe not as common as `http(s)`, it is still quite handy and easy to use. For completeness' sake, I wanted to support it, and this was also a fantastic opportunity to learn how to actually write a `ssh` server.
+
+As far as implementation details go, the `ssh` parts of the server are pretty similar to how the server handles the data-store, as in, there are some common operations that are shared between `ssh` servers, and swapping out the actual server implementation is as easy as changing a few configuration variables.
+
+That being said, similar to the data-stores, there's also only one current implementation of it, which is based on [russh](https://github.com/warp-tech/russh). Using OpenSSH `sshd` for the server doesn't seem like that good of an idea, as authentication is a little harder to handle when we don't know who is who, thus giving the rise to our custom `ssh` server, and I really do hope I didn't introduce any major security flaws, due to my lack of prior knowledge of the details of the `ssh` protocol itself and how should it be used.
+
+## How?
+
+I'm glad you asked. Looking back on it, I would rank the `ssh` server as somewhere in between `git-daemon` (for the `git://` protocol) and `git-http-backend` (for the `http(s)://` protocol), in terms of difficulty and time required to implement. I didn't spend multiple days banging my head against the wall, asking why does the `git` client think the repository is corrupted, as I did when writing the `http` backend, but still learned quite a bit about `ssh` here... Well you actually learn more when things don't go right, am I right? But ssh had its own fair share of problems too, so let's get to it.
+
+### First things first
+
+The `ssh://` protocol is very different from `http://`, so a few basic thing first about how `git` over `ssh://` operates.
+
+- In `ssh`, after all the preliminary checks have been performed (the client checking whether the server is in the `~/.ssh/known_hosts` list), the client authenticates itself (only allowed with ssh keys, no username / password or, god forbid, no authentication at all; if we would use no authentication at all we might as well just use the `git://` protocol, no point in bothering with `ssh://`, heck it even is faster!).
+- After the client has authenticated itself, the `ssh` session is started, which will go on until the client disconnects.
+- In this session, the client can open multiple channels, which are basically just streams of data (the server can open channels too, but for the purposes of `git` that should not happen, or otherwise the git client would be really surprised and confused).
+- On this channel, the client can send some "requests", but for `git` only one of them is relevant: `exec`. This is also what happens when you pass a command to run when you connect with the plain `ssh` command ( e.g. `ssh hello@example.org echo 1`). The `exec` request is used to run a single command on a channel. The ssh server, when it receives one such request, spawns a shell (`sh -c '...'`) and runs the command inside it, piping `stdout` and `stderr` to the channel (to `data` and `extended_data` with `1` for `stdout` and `stderr` respectively), and piping from the channel (`data`) to the shell's `stdin`.
+- On an `exec` request, the server should also send a `success` message, which is basically just a confirmation that the command was started successfully, or `failed` if it failed to start.
+- The actual `git` commands that the `git` client sends are `git-upload-pack`, `git-receive-pack` and `git-upload-archive`, which are the commands that are used to fetch from, push to, and serve `git archive --remote` with a repository respectively. These commands are run on the channel that was opened by the client, and the server will respond with the appropriate data.
+- Those commands are meant to be run inside `git-shell`. As it turns out, it is not available in git-for-windows, so the webserver just says "no" when we ask it to run a `ssh` server on windows.
+- All the commands are "interactive"-ish, as in they all use `stdin` to do their job. This is why we need to pipe from the channel `data` to the shell's `stdin`, or otherwise they just will not work.
+- After the client decides it is done, it sends an `eof` message, after which the server will close the `stdin` pipe, which the corresponding shell process (and thus the git command that was run in said shell process) will pick up on and finish up. After that, the server will go on and send the rest of the data that it has to send (`stdout` and `stderr`), and finally announce to the client the exit code of the command that was run.
+- After that, the `git` client decides it is done, and so it closes the channel, closes the session, and disconnects.
+
+### Actual implementation process
+
+I've said above that I used [russh](https://github.com/warp-tech/russh) for the `ssh` server, which is a "fork" of [thrussh](https://pijul.org/thrussh) (if it can be considered one), both of which are pure-rust ssh server (~~and client, but that's besides the point~~) libraries.
+
+The first thing I did was to figure out how to get the authentication bit right, and `russh` has [a very nice example](https://docs.rs/russh/0.35.0-beta.9/russh/server/index.html) for the server side of things.
+
+After that, I had to figure out how to actually run the `git` commands, which proved to be a little harder on windows (due to the lack of `git-shell`), so I just gave up on ssh on windows for now.
+
+Tests that use `ssh` are also ignored on windows, because of that, but running Ubuntu in WSL2 on windows is a good workaround for this currently.
+
+The whole (current) implementation of the `russh` server is in the `upsilon-ssh-russh` crate.
\ No newline at end of file
diff --git a/content/blog/upsilon/part-3-upsilon-difftests.md b/content/blog/upsilon/part-3-upsilon-difftests.md
new file mode 100644
index 00000000..4f42a75c
--- /dev/null
+++ b/content/blog/upsilon/part-3-upsilon-difftests.md
@@ -0,0 +1,173 @@
+---
+title: "Upsilon part III: upsilon difftests"
+author: dinu
+date: 2023-02-08
+---
+In [[part-2-git-over-ssh|the previous blog post]], I've gone over how to setup git over ssh. In this blog post, I'm gonna do a 180 and talk about something unrelated (at least not directly) to git, but quite important to get right early in a project: testing.
+
+Having the tests take a while to run is not fun, especially with a large testsuite. So, I decided to add a `quick`, on-by-default, option to `xtask test`, and in this post I would like to give an overview of how it works.
+
+From the title, you might take a guess on how it works... I'll give you a second... yep, it uses [instrumentation-based code coverage](https://doc.rust-lang.org/rustc/instrument-coverage.html), looking through the diffs in the worktree (diff from `HEAD..worktree`), and figuring out which code that the test invokes has been modified, or other inputs to the test have changed.
+
+## The yet-even-older-now question: How?
+
+This is implemented in 2 parts:
+
+- Get the coverage data from the tests
+- Use the coverage data to determine which tests are "dirty"
+
+But, a few things we'll need first:
+
+### Dependencies
+
+#### `cargo-binutils`
+
+We'll need [cargo-binutils](https://github.com/rust-embedded/cargo-binutils). It basically gives us a few commands to be able to invoke the llvm tools distributed with `rustc`. Let's install them:
+
+```bash
+rustup component add llvm-tools-preview
+cargo install cargo-binutils
+```
+
+### Get the coverage data from the tests
+
+If you're not familiar with instrumentation-based code coverage, I'd recommend you read the [instrumentation-based code coverage](https://doc.rust-lang.org/rustc/instrument-coverage.html) page from the `rustc` book.
+
+NOTE
+
+For the instrumentation to work and get one `.profraw` file per test, instead of one per test binary, we need to run the tests individually. `cargo nextest` does this by default, but extra care needs to be taken when running the tests with `cargo test`.
+
+To get started, let's create a new cargo profile:
+
+```toml
+# .cargo/config.toml
+[profile.difftests]
+inherits = "dev"
+rustflags = [
+ "-C", "instrument-coverage",
+ "--cfg", "difftests",
+]
+```
+
+Then, at the beginning of every `#[test]` function:
+
+```rust
+#[cfg(difftests)]
+fn difftests_init(tempdir: &Path, test_name: &str) -> std::io::Result<()> {
+ // to handle tests that spawn children that also need to be instrumented
+ extern "C" {
+ // we can call this function to set the filename of the .profraw file
+ // that will be generated at the end of the program.
+ fn __llvm_profile_set_filename(filename: *const std::ffi::c_char);
+ }
+ if tempdir.exists() {
+ std::fs::remove_dir_all(tempdir)?;
+ }
+ std::fs::create_dir_all(tempdir)?;
+ let self_profraw = tempdir.join("self.profraw");
+ let self_profile_file_str = self_profraw.to_str().unwrap();
+ let self_profile_file_str_c = std::ffi::CString::new(self_profile_file_str).unwrap();
+ unsafe {
+ __llvm_profile_set_filename(self_profile_file_str_c.as_ptr());
+ }
+ // this will set the variable for children processes, so that they
+ // can also write to some file in the tempdir.
+ let profraw_path = tempdir.join("%m_%p.profraw");
+ std::env::set_var("LLVM_PROFILE_FILE", profraw_path);
+ // recommended: also store `std::env::current_exe()`:
+ let exe = std::env::current_exe().unwrap();
+ std::fs::write(tempdir.join("self_exe"), &exe)?;
+ // and test_name:
+ std::fs::write(tempdir.join("test_name"), test_name)?;
+ Ok(())
+}
+
+#[cfg(not(difftests))]
+fn difftests_init(_tempdir: &Path, _test_name: &str) -> std::io::Result<()> {
+ Ok(())
+}
+
+#[test]
+fn test_something() {
+ let test_name = "test_something";
+ let tempdir = PathBuf::from(env!("CARGO_TARGET_TMPDIR")).join(test_name);
+ // `CARGO_TARGET_TMPDIR` is set by cargo, and is a temporary directory
+ // where integration tests can store their data. It is not cleaned up,
+ // cargo just creates it and leaves it to us to manage it.
+ // In unit tests you can some other temporary directories, but we will
+ // need the outputs stored in this dir after the test is over, so do not
+ // perform any cleanup on it!
+ difftests_init(&tempdir, test_name).unwrap();
+ // do the test
+}
+```
+
+Now, if we run:
+
+```bash
+cargo test --profile difftests
+```
+
+NOTE
+
+It is crucial that the test doesn't `abort` / call `std::process::exit` or other similar actions, as they will prevent the `.profraw` file from being written.
+
+We should get a `target/tmp/test_something` directory, filled with a few `.profraw` files (from the children), and a `self.profraw` file for the test itself, among the other files we initialized.
+
+We can now convert those `.profraw` files to a `.profdata` file, which is a format `llvm-cov` can work with:
+
+```bash
+rust-profdata merge -sparse \
+target/tmp/test_something/*.profraw \
+-o target/tmp/test_something/test_something.profdata
+```
+
+Now, after we have the `.profdata` file, we can use `llvm-cov` to get the coverage data:
+
+```bash
+rust-cov export \
+-instr-profile=target/tmp/test_something/test_something.profdata \
+"$(cat target/tmp/test_something/self_exe)" \
+> target/tmp/test_something/test_something.json
+```
+
+After that, it is only a question of what we want to do with the data, which brings us to the next section.
+
+## Use the coverage data to determine which tests are "dirty"
+
+For this, we will need to figure out which tests are "dirty".
+
+### What is a "dirty" test?
+
+For the purposes of this, we shall call a test "dirty" if the source code that is run during the execution of the test has changed, or if any of its inputs have changed.
+
+We will assume that all tests are fully deterministic based on those things alone.
+
+### How to determine if a test is "dirty"?
+
+#### File system mtimes
+
+One could achieve this through many ways, but perhaps the easiest is to use file system mtimes. We can get the mtime of the `self_exe` file, to determine when the test was last run, and compare it to the mtime of all the source / input files that the test used.
+
+For source files, that is easy: just find the regions that have the execution count > 0, and get the mtime of those, then compare it with the time we last ran the test.
+
+For input files, this is a little bit trickier, and you will have to determine how to do it for your specific use case.
+
+For large source files used by many tests, this might cause many tests to be considered "dirty", sometimes unnecessarily, so we can try to be a bit smarter here.
+
+#### `git diff HEAD`
+
+Another way to do this is to use `git diff HEAD` (through `libgit2` APIs of course), which will show us the changes to the worktree since the last commit.
+
+We need a few assumptions:
+
+1. The instrumented tests are run only right after the last commit, in a clean worktree.
+2. Normal tests are run otherwise.
+
+This is a bit more complicated, but it is more accurate when those conditions hold. We can then use the diff from the last commit to determine the changes that were made to the source files, and then we can use the coverage data to determine which tests are affected by those changes.
+
+This is just a bird's eye view of how this can be done, and it is not necessarily the best way to do it, but it is a good starting point.
+
+If you would like to see my current (~~incomplete~~) implementation of this, feel free to check out the [`dev/upsilon-difftests*` crates](https://github.com/dnbln/upsilon/tree/trunk/dev).
+
+Edit (2024-02-25): This was the precursor to [[cargo-difftests/index|cargo-difftests]].
\ No newline at end of file
diff --git a/content/cargo-difftests/analyze-all.md b/content/cargo-difftests/analyze-all.md
new file mode 100644
index 00000000..12c08bed
--- /dev/null
+++ b/content/cargo-difftests/analyze-all.md
@@ -0,0 +1,120 @@
+---
+title: How to use with the analyze-all command
+---
+
+Right now we are only checking tests individually, but by using this command
+you could analyze all the tests in one go. Although that sounds really nice,
+it's output was only ever intended to be read by machines: what it actually
+outputs is a huge JSON string. This section will go over how to analyze that,
+and / or get `cargo-difftests` to actually rerun the tests.
+
+>[!warning]
+> Under construction.
+
+The actual type of the JSON is an array of [AnalyzeAllSingleTestGroup],
+if you would like to parse that.
+
+If you only want to rerun the tests, then maybe this section is for you.
+
+## Automatic test-rerunning
+
+`cargo-difftests analyze-all` accepts an `--action` option, which can
+be either `print` (default), `assert-clean` or `rerun-dirty`.
+
+### `--action=print`
+
+As the name implies, this only prints out the JSON corresponding to
+all the analysis results, and leaves it up to the calling process to
+do something with that.
+
+### `--action=assert-clean`
+
+This action analyzes all the difftest directories that were
+discovered, and if any of them is dirty, then it errors (and
+exits with a non-zero status code). Otherwise, it exits with
+the status code of 0, meaning that all the difftests found
+were clean after analysis.
+
+### `--action=rerun-dirty`
+
+This action analyzes all the difftest directories, and then
+checks the analysis results for the dirty tests. It then
+proceeds to invoke an external command (`--runner` option)
+for all the tests that have to be rerun, and then exits with
+the status code of that external command. Projects with non-trivial
+test pipelines can write special binaries for this purpose, but
+the default of `cargo-difftests-default-runner` should be enough
+for most projects. Writing a custom runner will be covered in a
+later section.
+
+## `cargo-difftests-default-runner`
+
+As previously mentioned, this is the default runner that
+`cargo-difftests` uses. It is a binary that is installed
+alongside `cargo-difftests` by default.
+
+It has a few requirements to be able to use:
+- It has to be in the `PATH` environment variable.
+- It has to be able to find the `cargo` binary in the `PATH` environment variable.
+### Extra configuration
+
+It then proceeds to invoke the following for each test.
+
+```Bash
+cargo difftests collect-profiling-data --filter --exact
+```
+
+- `` is the profile that `cargo-difftests` uses to run tests.
+ By default, it is `difftests`, but can be configured using the `CARGO_DIFFTESTS_PROFILE`
+ environment variable.
+%% `` is the name of the package that the test is in.%%
+- `` is the name of the test.
+- `` is a list of extra arguments that can be passed to `cargo difftests`,
+ specified in the `CARGO_DIFFTESTS_EXTRA_CARGO_ARGS` environment variable. They are separated
+ by `,`s, and then are passed to `cargo difftests collect-profiling-data` as-is.
+
+## Custom test runners
+
+If the default runner is not enough for your project, you can write your own.
+
+Take a look over [the source code of the default runner][default-runner-source] if
+you would like some inspiration, but the gist of it is that you have to write a rust
+binary, which roughly looks like this:
+
+```Rust
+fn rerunner(
+ invocation: cargo_difftests::test_rerunner_core::TestRerunnerInvocation
+) -> T { // T can be anything, but it has to implement std::process::Termination
+ // rerun invocation tests:
+ for test in invocation.tests() {
+ // do something with the tests
+
+ // get test name
+ let test_name = &test.test_name;
+
+ // rerun the test
+ let status = std::process::Command::new("hyper-complex-test-runner")
+ .arg(test_name)
+ .status()
+ .expect("failed to run hyper-complex-test-runner");
+
+ if !status.success() {
+ std::process::exit(1);
+ }
+ }
+
+ // create T
+ T::default()
+}
+
+cargo_difftests::cargo_difftests_test_rerunner!(rerunner); // will create main
+// which takes care of parsing the invocation and calling rerunner
+```
+
+
+
+>[!warning]
+> Keep in mind that this does not also collect new profiling data. This means that if we analyze the code again, we will still have the old profiling data, and that would mean the test hasn't run since, or at least that's what `cargo-difftests` knows. The `hyper-complex-test-runner` has to invoke `cargo difftests collect-profiling-data` if subsequent calls to `cargo-difftests analyze* / rerun-dirty*` will be made, or otherwise they *will* return incorrect results.
+
+[AnalyzeAllSingleTestGroup]: https://docs.rs/cargo-difftests/latest/cargo_difftests/struct.AnalyzeAllSingleTestGroup.html
+[default-runner-source]: https://github.com/dnbln/cargo-difftests/blob/trunk/cargo-difftests/src/bin/cargo-difftests-default-rerunner.rs
\ No newline at end of file
diff --git a/content/cargo-difftests/custom-test-harnesses.md b/content/cargo-difftests/custom-test-harnesses.md
new file mode 100644
index 00000000..1d2b15ac
--- /dev/null
+++ b/content/cargo-difftests/custom-test-harnesses.md
@@ -0,0 +1,12 @@
+---
+title: Custom test harnesses
+---
+In the cases where custom test harnesses are used, the test harnesses should behave in specific ways for `cargo-difftests` to be able to properly interact with them. Namely:
+- The test harnesses must report the tests when invoked with `--list --format=terse`, in the same way as the default test harness: one per line, with the following format
+```
+: test
+```
+- The test harnesses must run the test that it reported with name `test_name` above when using the following list of arguments:
+```
+--exact --nocapture
+```
diff --git a/content/cargo-difftests/index.md b/content/cargo-difftests/index.md
new file mode 100644
index 00000000..3b8c8d58
--- /dev/null
+++ b/content/cargo-difftests/index.md
@@ -0,0 +1,40 @@
+---
+title: Cargo difftests
+---
+
+> "Insanity is doing the same thing over and over again and expecting different results."
+>
+> — Unknown author.
+
+
+>[!warning]
+> Documentation currently under construction.
+
+
+`cargo-difftests` is a [selective re-testing framework][selective-retesting-wikipedia] for rust.
+To put it simply, it is a tool that uses LLVM coverage data +
+some information about what has changed since the last test-run
+to find which tests are most likely to have been affected by those
+changes, and therefore need to be rerun.
+
+The underlying assumption is that if a test passed in the past,
+and none of the code executed in the test has been changed since,
+then the result of the test will not change. While there are some
+edge cases to this, it is generally true for most crates out there.
+
+## What is `cargo-difftests`?
+
+Until now, changes to source code pretty much always involved
+rerunning the entire test suite. This can be quite time-consuming
+for big projects with lots of tests, and beginner contributors
+are often unaware of which tests could be influenced by their
+newly-made changes.
+
+This project attempts to do just that: point out the tests
+whose results would most likely change, given some modifications
+to the source code of the project.
+
+For a quick guide on how to use, see [[usage]].
+
+
+[selective-retesting-wikipedia]: https://en.wikipedia.org/wiki/Regression_testing#Regression_test_selection
\ No newline at end of file
diff --git a/content/cargo-difftests/manual.md b/content/cargo-difftests/manual.md
new file mode 100644
index 00000000..06ed9282
--- /dev/null
+++ b/content/cargo-difftests/manual.md
@@ -0,0 +1,6 @@
+---
+title: Manual setup
+---
+There really isn't any manual setup to be done, `cargo-difftests` versions since `0.6.0` works out-of-the-box for most projects; that is unless they use custom test harnesses. Look over [[custom-test-harnesses]] for what to do in such cases.
+
+Refer to [[usage]] for usage of `cargo-difftests`.
\ No newline at end of file
diff --git a/content/cargo-difftests/usage.md b/content/cargo-difftests/usage.md
new file mode 100644
index 00000000..d11d4895
--- /dev/null
+++ b/content/cargo-difftests/usage.md
@@ -0,0 +1,40 @@
+---
+title: Usage
+---
+As is stated in the [`cargo-difftests` README](https://github.com/dnbln/cargo-difftests/blob/trunk/README.md), one could simply use `cargo-difftests` like so:
+
+```bash
+% # collect profiling data
+% cargo difftests collect-profiling-data
+% touch src/advanced_arithmetic.rs # change mtime
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_add
+clean
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_mul
+dirty
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_div
+dirty
+% cargo difftests collect-profiling-data --filter test_mul --exact
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_mul
+clean
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_div
+dirty
+% cargo difftests collect-profiling-data --filter test_div --exact
+% cargo difftests analyze --dir target/tmp/difftests/tests/test_div
+clean
+```
+
+## Recommended workflow
+
+### Initial profiling data collection
+
+```bash
+cargo difftests collect-profiling-data --compile-index --index-root=difftests-index-root --root=target/tmp/difftests
+```
+
+### Analyze, rerun and collect new profiling data
+
+```bash
+CARGO_DIFFTESTS_EXTRA_ARGS='--compile-index,--index-root=difftests-index-root,--root=target/tmp/difftests' cargo difftests rerun-dirty-from-indexes --index-root=difftests-index-root
+```
+
+`cargo difftests rerun-dirty-from-indexes` is basically an alias for `cargo difftests analyze-all-from-index --action=rerun-dirty`; more information about `analyze-all` can be found in [the analyze-all article](analyze-all.md).
diff --git a/content/index.md b/content/index.md
new file mode 100644
index 00000000..7c89aadd
--- /dev/null
+++ b/content/index.md
@@ -0,0 +1,6 @@
+---
+title: Welcome to Dinu's knowledge garden
+---
+My name is Dinu, and here you can find my knowledge garden. I might use this place to store anything ranging from documentation for some of my projects, to random thoughts and everything in between, ~~*or not*~~. You may find every allotrope of Carbon here, charcoal and diamonds alike. Anyway, good luck, and I hope you can find what you came looking for; that being said, may the [table of contents](toc.md) guide you[^1].
+
+[^1]: May or may not have been a Star Wars reference.
diff --git a/content/textgenerator/prompts/default/getEmailNeg.md b/content/textgenerator/prompts/default/getEmailNeg.md
new file mode 100644
index 00000000..09570607
--- /dev/null
+++ b/content/textgenerator/prompts/default/getEmailNeg.md
@@ -0,0 +1,13 @@
+---
+promptId: getEmailNeg
+name: ✉️ Reply to Email negatively 😡
+description: select the email content and negative reply will be generated
+author: Noureddine
+tags: communication, email
+version: 0.0.1
+---
+prompt:
+reply to this email negatively in professional way.
+email:
+{{context}}
+reply:
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getEmailPos.md b/content/textgenerator/prompts/default/getEmailPos.md
new file mode 100644
index 00000000..79c45abd
--- /dev/null
+++ b/content/textgenerator/prompts/default/getEmailPos.md
@@ -0,0 +1,13 @@
+---
+promptId: getEmailPos
+name: ✉️ Reply to Email positively 😄
+description: select the email and a positive reply will be generated
+author: Noureddine
+tags: communication, email
+version: 0.0.1
+---
+prompt:
+Reply to this email positively in a professional way.
+email:
+{{context}}
+reply:
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getIdeas.md b/content/textgenerator/prompts/default/getIdeas.md
new file mode 100644
index 00000000..ae349ac2
--- /dev/null
+++ b/content/textgenerator/prompts/default/getIdeas.md
@@ -0,0 +1,12 @@
+---
+promptId: getIdeas
+name: 💡BrainStorm Ideas
+description: Brainstorm idea about the context.
+author: Noureddine
+tags: ideas, writing
+version: 0.0.1
+---
+content:
+{{context}}
+prompt:
+brainstorm ideas about this content
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getOutline.md b/content/textgenerator/prompts/default/getOutline.md
new file mode 100644
index 00000000..7bf4d80f
--- /dev/null
+++ b/content/textgenerator/prompts/default/getOutline.md
@@ -0,0 +1,15 @@
+---
+promptId: getOutline
+name: 🗒️Generate Outline
+description: Select a title, an outline will be generated for You.
+required_values: title
+author: Noureddine
+tags: writing
+version: 0.0.1
+---
+title:
+{{title}}
+prompt:
+write an outline for a blog for this title.
+outline:
+-
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getParagraph.md b/content/textgenerator/prompts/default/getParagraph.md
new file mode 100644
index 00000000..54c8e0a5
--- /dev/null
+++ b/content/textgenerator/prompts/default/getParagraph.md
@@ -0,0 +1,15 @@
+---
+promptId: getParagraph
+name: ✍️ Write paragraph
+description: select one item, a paragraph will be generated.
+required_values: title, outline
+author: Noureddine
+tags: writing
+version: 0.0.1
+---
+title:
+{{title}}
+outline:
+{{outline}}
+
+# {{context}}
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getTags.md b/content/textgenerator/prompts/default/getTags.md
new file mode 100644
index 00000000..00691518
--- /dev/null
+++ b/content/textgenerator/prompts/default/getTags.md
@@ -0,0 +1,14 @@
+---
+promptId: getTags
+name: 🏷️Get Tags for Your Content
+description: Select a content and Get suggest Tags for it
+author: Noureddine
+tags: writing, learning
+version: 0.0.1
+max_tokens: 30
+---
+content:
+{{context}}
+prompt:
+suggest tags for the content in markdown format
+tags:
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/getTitles.md b/content/textgenerator/prompts/default/getTitles.md
new file mode 100644
index 00000000..e3356fad
--- /dev/null
+++ b/content/textgenerator/prompts/default/getTitles.md
@@ -0,0 +1,12 @@
+---
+promptId: getTitles
+name: 🗃️ Get Blog Titles
+description: select a content and list of blog titles will be generated
+author: Noureddine
+tags: writing
+version: 0.0.1
+---
+content:
+{{context}}
+prompt:
+suggest 10 attractive blog titles about this content:
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/rewrite.md b/content/textgenerator/prompts/default/rewrite.md
new file mode 100644
index 00000000..7f2e3119
--- /dev/null
+++ b/content/textgenerator/prompts/default/rewrite.md
@@ -0,0 +1,12 @@
+---
+promptId: rewrite
+name: ✏️ Rewrite, Paraphrase
+description: select a content and it will be rewriten.
+author: Noureddine
+tags: writing
+version: 0.0.1
+---
+content:
+{{context}}
+prompt:
+rewrite the content to make it more attractive
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/simplify.md b/content/textgenerator/prompts/default/simplify.md
new file mode 100644
index 00000000..b16cbbd9
--- /dev/null
+++ b/content/textgenerator/prompts/default/simplify.md
@@ -0,0 +1,12 @@
+---
+promptId: simplify
+name: 👼Simplify
+description: select a content and it will be simplified.
+author: Noureddine
+tags: thinking, writing
+version: 0.0.1
+---
+content:
+{{context}}
+prompt:
+make the content very clear and easy to understand
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/summarize.md b/content/textgenerator/prompts/default/summarize.md
new file mode 100644
index 00000000..de80732b
--- /dev/null
+++ b/content/textgenerator/prompts/default/summarize.md
@@ -0,0 +1,12 @@
+---
+promptId: summarize
+name: 🗞️ Summarize
+description: select a content and it will be summarized.
+author: Noureddine
+tags: writing, thinking, learning
+version: 0.0.1
+---
+content:
+{{context}}
+prompt:
+summarize the content
\ No newline at end of file
diff --git a/content/textgenerator/prompts/default/summarizeLarge.md b/content/textgenerator/prompts/default/summarizeLarge.md
new file mode 100644
index 00000000..96ba7a83
--- /dev/null
+++ b/content/textgenerator/prompts/default/summarizeLarge.md
@@ -0,0 +1,18 @@
+---
+promptId: summarizeLarge
+name: 🗞️ Summarizes Large chunk of text
+description: uses langchain chain to summarize large chunk of text
+author: Noureddine
+tags: writing, thinking, learning
+version: 0.0.1
+mode: insert
+chain.type: map_reduce
+splitter.chunkSize: 1000
+splitter.chunkOverlap: 100
+max_tokens: 1000
+temperature: 0
+chain.maxTokens: 1000
+---
+{{tg_selection}}
+***
+{{output}}
\ No newline at end of file
diff --git a/content/textgenerator/prompts/huggingface/classify-bart-large-mnli.md b/content/textgenerator/prompts/huggingface/classify-bart-large-mnli.md
new file mode 100644
index 00000000..3210a0fa
--- /dev/null
+++ b/content/textgenerator/prompts/huggingface/classify-bart-large-mnli.md
@@ -0,0 +1,18 @@
+---
+promptId: classify-bart-large-mnli
+name: 🪄 classify using bart-large-mnli
+description: You need to specify candidate_labels
+author: Noureddine
+tags: huggingface, text, classification
+version: 0.0.1
+provider: custom
+endpoint: "https://api-inference.huggingface.co/models/facebook/bart-large-mnli"
+headers: '{ "Authorization": "Bearer {{keys.hf}}" }'
+body: '{ "parameters": { "candidate_labels": ["refund", "legal", "faq"] }, "inputs": "{{escp prompt}}" }'
+output: '{{requestResults.labels.[0]}}'
+bodyParams:
+steaming: false
+---
+{{selection}}
+***
+=={{output}}==
\ No newline at end of file
diff --git a/content/textgenerator/prompts/huggingface/completeTextBloom.md b/content/textgenerator/prompts/huggingface/completeTextBloom.md
new file mode 100644
index 00000000..b8ab2f07
--- /dev/null
+++ b/content/textgenerator/prompts/huggingface/completeTextBloom.md
@@ -0,0 +1,11 @@
+---
+promptId: completeTextBloom
+name: 🪄 Complete Text using Bloom Model
+description: select considered context and run the command
+author: Noureddine
+tags: huggingface, text, bloom
+version: 0.0.1
+provider: hf
+model: "bigscience/bloom"
+---
+{{selection}}
\ No newline at end of file
diff --git a/content/textgenerator/prompts/huggingface/summarizeBART.md b/content/textgenerator/prompts/huggingface/summarizeBART.md
new file mode 100644
index 00000000..c9d2bbdf
--- /dev/null
+++ b/content/textgenerator/prompts/huggingface/summarizeBART.md
@@ -0,0 +1,18 @@
+---
+promptId: summarizeBART
+name: 🪄 Summarize Text using BRAT Facebook
+description: select considered context and run the command
+author: Noureddine
+tags: huggingface, text, summarization
+version: 0.0.3
+provider: custom
+endpoint: "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
+headers: '{ "Authorization": "Bearer {{keys.hf}}" }'
+body: '{ "inputs": "{{escp prompt}}" }'
+output: '{{requestResults.[0].summary_text}}'
+bodyParams:
+steaming: false
+---
+{{selection}}
+***
+{{output}}
\ No newline at end of file
diff --git a/content/toc.md b/content/toc.md
new file mode 100644
index 00000000..049763b4
--- /dev/null
+++ b/content/toc.md
@@ -0,0 +1,5 @@
+---
+title: Table of contents
+---
+- [cargo-difftests](cargo-difftests/index.md): a selective re-test framework for Rust.
+- [blog](blog/index.md): My blog.
\ No newline at end of file
diff --git a/quartz.config.ts b/quartz.config.ts
index 2cdadb74..f032cef0 100644
--- a/quartz.config.ts
+++ b/quartz.config.ts
@@ -8,15 +8,15 @@ import * as Plugin from "./quartz/plugins"
*/
const config: QuartzConfig = {
configuration: {
- pageTitle: "🪴 Quartz 4.0",
+ pageTitle: "Dinu's notes",
enableSPA: true,
enablePopovers: true,
analytics: {
provider: "plausible",
},
locale: "en-US",
- baseUrl: "quartz.jzhao.xyz",
- ignorePatterns: ["private", "templates", ".obsidian"],
+ baseUrl: "notes.dnbln.dev",
+ ignorePatterns: ["private", "templates", ".obsidian", "textgenerator"],
defaultDateType: "created",
theme: {
cdnCaching: true,
diff --git a/quartz.layout.ts b/quartz.layout.ts
index b5a1639e..712e2ae6 100644
--- a/quartz.layout.ts
+++ b/quartz.layout.ts
@@ -1,14 +1,18 @@
import { PageLayout, SharedLayout } from "./quartz/cfg"
import * as Component from "./quartz/components"
+const explorer = Component.Explorer({
+ title: "Knowledge",
+ folderClickBehavior: "link",
+});
+
// components shared across all pages
export const sharedPageComponents: SharedLayout = {
head: Component.Head(),
header: [],
footer: Component.Footer({
links: {
- GitHub: "https://github.com/jackyzha0/quartz",
- "Discord Community": "https://discord.gg/cRFFHYye7t",
+ GitHub: "https://github.com/dnbln/notes",
},
}),
}
@@ -26,7 +30,7 @@ export const defaultContentPageLayout: PageLayout = {
Component.MobileOnly(Component.Spacer()),
Component.Search(),
Component.Darkmode(),
- Component.DesktopOnly(Component.Explorer()),
+ Component.DesktopOnly(explorer),
],
right: [
Component.Graph(),
@@ -43,7 +47,7 @@ export const defaultListPageLayout: PageLayout = {
Component.MobileOnly(Component.Spacer()),
Component.Search(),
Component.Darkmode(),
- Component.DesktopOnly(Component.Explorer()),
+ Component.DesktopOnly(explorer),
],
right: [],
}
diff --git a/quartz/components/Footer.tsx b/quartz/components/Footer.tsx
index 076c3787..6f0c3ef5 100644
--- a/quartz/components/Footer.tsx
+++ b/quartz/components/Footer.tsx
@@ -14,10 +14,10 @@ export default ((opts?: Options) => {
return (
*/}