mirror of
https://github.com/aljazceru/pubky-core.git
synced 2025-12-18 14:34:25 +01:00
chore(homeserver): Refactor Core (#96)
* first draft * config2 for the time being * more refactoring * write default config if it doesnt exist * added relays to config * some refactor * proper bootstrap nodes and relay config validation * small comments * rename module * renamings * turn listen_ports to listen_socket * connected config with homeserver * cleaned up old config * cleaned up config_old * removed old config.example.toml * cleanup tryfrom conversions * removed dirs-next * review cleanup * extracted default config to its own toml file * use hostname_validator for rfc1123 domain verification * Domain struct * fmt * small config restructure * use SignupMode in config and moved it to config dir * moved and simplified lmdb * save to switch branches * lots done already * missin lock file * pkarr config * constants * app context * used context in more places * made homeserver independant * testing feature * added datadirmock * getting the hang about testing * fixed homeserver core tests * added HandleHolder * make the homeserver tasks stop when its dropped * make server handles optional * properly cleanup all background tasks * moved logs * fixed config default toml * fmt, clippy * moved stuff around * lots of moving and readme * fixed pkarr republisher tests * removed docs from include * fixed and refactored testnet * make simple_testnet work * httprelay shutdown * different testnets * fixing tests1 * fixing tests * fixing more tests * unified pkarr versions * fixed config with bootstrap nodes and relays * split up test_republish_on_signin to prevent timing issues * fixed all tests in e2e? * fixed multi publisher tests * fixed pubky-client readme * fixed testnet readme * added better errors * admin error * fixed tests * format * clippy * cllippy * fixed testnet ports * fixed client future issue * improved testnet * fixed cache_size pkarr relay issue * small improvements * fixed low prio dns record * removed fixed testnet test due to port conflicts * fixed browserify issues * fmt * clippy * changed wait for testnet hs admin * fixed docs clippy issues * added comments * moved icann_domain * renamed datadirs * implemented default for MockDataDir * renamed run() to start() * removed unwraps * fmt * fixed rename test * cleaned up admin trace * added santity values for periodic backup conf and user keys republisher * fmt * fmt * fixed readme lint * removed println * fixed admin server edge to anyhow * added ipv6 support * removed unnecessary expects * renamed testnet * fmt * renamed me * changed import * fmt
This commit is contained in:
committed by
GitHub
parent
6ad1509263
commit
55d52ec4b8
2
.github/workflows/pr-check.yml
vendored
2
.github/workflows/pr-check.yml
vendored
@@ -114,7 +114,7 @@ jobs:
|
||||
|
||||
- name: Wait for testnet homeserver
|
||||
run: |
|
||||
until nc -zv 127.0.0.1 6286; do
|
||||
until nc -zv 127.0.0.1 6288; do
|
||||
echo "Waiting for testnet homeserver to be ready..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
174
Cargo.lock
generated
174
Cargo.lock
generated
@@ -122,6 +122,16 @@ version = "0.7.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
|
||||
|
||||
[[package]]
|
||||
name = "assert-json-diff"
|
||||
version = "2.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-compat"
|
||||
version = "0.2.4"
|
||||
@@ -166,6 +176,12 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "auto-future"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c1e7e457ea78e524f48639f551fd79703ac3f2237f5ecccdf4708f8a75ad373"
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.4.0"
|
||||
@@ -286,6 +302,36 @@ dependencies = [
|
||||
"tower-service",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-test"
|
||||
version = "17.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "317c1f4ecc1e68e0ad5decb78478421055c963ce215e736ed97463fa609cd196"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"assert-json-diff",
|
||||
"auto-future",
|
||||
"axum",
|
||||
"bytes",
|
||||
"bytesize",
|
||||
"cookie",
|
||||
"http",
|
||||
"http-body-util",
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"mime",
|
||||
"pretty_assertions",
|
||||
"reserve-port",
|
||||
"rust-multipart-rfc7578_2",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"tower 0.5.2",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "backtrace"
|
||||
version = "0.3.74"
|
||||
@@ -393,6 +439,12 @@ version = "1.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
|
||||
|
||||
[[package]]
|
||||
name = "bytesize"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.14"
|
||||
@@ -702,6 +754,12 @@ version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c"
|
||||
|
||||
[[package]]
|
||||
name = "diff"
|
||||
version = "0.1.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
@@ -786,9 +844,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "dyn-clone"
|
||||
version = "1.0.18"
|
||||
version = "1.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "feeef44e73baff3a26d371801df019877a9866a8c493d315ab00177843314f35"
|
||||
checksum = "1c7a8fb8a9fbf66c1f703fe16184d10ca0ee9d23be5b4436400408ba54a95005"
|
||||
|
||||
[[package]]
|
||||
name = "e2e"
|
||||
@@ -1011,9 +1069,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"futures-macro",
|
||||
"futures-sink",
|
||||
"futures-task",
|
||||
"memchr",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"slab",
|
||||
@@ -1295,22 +1355,6 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-relay"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0043bce2a7f4a4fbc870c4489223052b3aba084ead8a39581ea69d3e9a3e04a9"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
"axum-server",
|
||||
"futures-util",
|
||||
"tokio",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.10.0"
|
||||
@@ -1607,9 +1651,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.15"
|
||||
version = "0.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab"
|
||||
checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413"
|
||||
|
||||
[[package]]
|
||||
name = "litemap"
|
||||
@@ -1671,9 +1715,9 @@ checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465"
|
||||
|
||||
[[package]]
|
||||
name = "mainline"
|
||||
version = "5.3.1"
|
||||
version = "5.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fae24c3d129b92c8cfea92a9e2014052371a2835e4a6d66dfdb00238e389e56"
|
||||
checksum = "c258b001fa52b7270dc1a239b36a9b608b024e68733648c1757b025204fdc248"
|
||||
dependencies = [
|
||||
"crc",
|
||||
"document-features",
|
||||
@@ -1727,6 +1771,16 @@ version = "0.3.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
|
||||
|
||||
[[package]]
|
||||
name = "mime_guess"
|
||||
version = "2.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
|
||||
dependencies = [
|
||||
"mime",
|
||||
"unicase",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "miniz_oxide"
|
||||
version = "0.8.4"
|
||||
@@ -2043,9 +2097,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkarr"
|
||||
version = "3.6.0"
|
||||
version = "3.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7288f55e8981cce659ff14e05bbc0ade2d3015e45601ed4eb8ae8736c55c2a5b"
|
||||
checksum = "e32222ae3d617bf92414db29085f8a959a4515effce916e038e9399a335a0d6d"
|
||||
dependencies = [
|
||||
"async-compat",
|
||||
"base32",
|
||||
@@ -2081,9 +2135,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pkarr-relay"
|
||||
version = "0.5.7"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "431e2bb798f7dbc155644552ceef790e66b12a7c1c10ccd34d175148af3ba882"
|
||||
checksum = "63aa8f8cd1693c358a0e9baf5221d56d15123ad8d385631c7277c7560a843fd2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
@@ -2094,7 +2148,6 @@ dependencies = [
|
||||
"governor",
|
||||
"http",
|
||||
"httpdate",
|
||||
"mainline",
|
||||
"pkarr",
|
||||
"rustls",
|
||||
"serde",
|
||||
@@ -2118,7 +2171,6 @@ dependencies = [
|
||||
"futures-lite",
|
||||
"hex",
|
||||
"pkarr",
|
||||
"pubky-testnet",
|
||||
"rand 0.9.0",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
@@ -2187,6 +2239,16 @@ dependencies = [
|
||||
"zerocopy 0.7.35",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pretty_assertions"
|
||||
version = "1.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d"
|
||||
dependencies = [
|
||||
"diff",
|
||||
"yansi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.93"
|
||||
@@ -2262,11 +2324,14 @@ dependencies = [
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"axum-server",
|
||||
"axum-test",
|
||||
"base32",
|
||||
"bytes",
|
||||
"clap",
|
||||
"dirs",
|
||||
"dyn-clone",
|
||||
"flume",
|
||||
"futures-lite",
|
||||
"futures-util",
|
||||
"heed",
|
||||
"hex",
|
||||
@@ -2277,6 +2342,7 @@ dependencies = [
|
||||
"pkarr-republisher",
|
||||
"postcard",
|
||||
"pubky-common",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"tempfile",
|
||||
"thiserror 2.0.12",
|
||||
@@ -2295,13 +2361,16 @@ name = "pubky-testnet"
|
||||
version = "0.1.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"http-relay 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"http-relay",
|
||||
"mainline",
|
||||
"pkarr",
|
||||
"pkarr-relay",
|
||||
"pubky",
|
||||
"pubky-common",
|
||||
"pubky-homeserver",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
]
|
||||
@@ -2603,6 +2672,16 @@ dependencies = [
|
||||
"windows-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "reserve-port"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "359fc315ed556eb0e42ce74e76f4b1cd807b50fa6307f3de4e51f92dbe86e2d5"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.17.9"
|
||||
@@ -2638,6 +2717,22 @@ dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust-multipart-rfc7578_2"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bc4bb9e7c9abe5fa5f30c2d8f8fefb9e0080a2c1e3c2e567318d2907054b35d3"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"rand 0.9.0",
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
@@ -2661,9 +2756,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.44"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154"
|
||||
checksum = "e56a18552996ac8d29ecc3b190b4fdbb2d91ca4ec396de7bbffaf43f3d637e96"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"errno",
|
||||
@@ -3084,11 +3179,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tempfile"
|
||||
version = "3.17.1"
|
||||
version = "3.19.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230"
|
||||
checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"fastrand",
|
||||
"getrandom 0.3.1",
|
||||
"once_cell",
|
||||
@@ -3463,6 +3557,12 @@ version = "1.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.17"
|
||||
@@ -3965,6 +4065,12 @@ version = "0.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
|
||||
|
||||
[[package]]
|
||||
name = "yansi"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
|
||||
|
||||
[[package]]
|
||||
name = "yoke"
|
||||
version = "0.7.5"
|
||||
|
||||
@@ -13,3 +13,9 @@ resolver = "2"
|
||||
[profile.release]
|
||||
lto = true
|
||||
opt-level = 'z'
|
||||
|
||||
|
||||
[workspace.dependencies]
|
||||
pkarr = { version = "3.7.1" }
|
||||
mainline = { version = "5.4.0" }
|
||||
pkarr-relay = { version = "0.9.1" }
|
||||
@@ -6,8 +6,8 @@ edition = "2021"
|
||||
[dependencies]
|
||||
pubky-testnet = { path = "../pubky-testnet" }
|
||||
pubky-common = { path = "../pubky-common" }
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
tokio = { version = "1.43.0", features = ["full", "test-util"] }
|
||||
tracing-subscriber = "0.3.19"
|
||||
pkarr = "3.6.0"
|
||||
pkarr = {workspace = true}
|
||||
reqwest = "0.12.15"
|
||||
bytes = "1.10.1"
|
||||
@@ -1,15 +1,18 @@
|
||||
use pkarr::Keypair;
|
||||
use pubky_common::capabilities::{Capabilities, Capability};
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky_testnet::{
|
||||
pubky_homeserver::{MockDataDir, SignupMode},
|
||||
EphemeralTestnet, Testnet,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn basic_authn() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -50,10 +53,10 @@ async fn basic_authn() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn authz() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let http_relay = testnet.run_http_relay().await.unwrap();
|
||||
let http_relay = testnet.http_relay();
|
||||
let http_relay_url = http_relay.local_link_url();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
@@ -62,13 +65,13 @@ async fn authz() {
|
||||
// Third party app side
|
||||
let capabilities: Capabilities = "/pub/pubky.app/:rw,/pub/foo.bar/file:r".try_into().unwrap();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let pubky_auth_request = client.auth_request(http_relay_url, &capabilities).unwrap();
|
||||
|
||||
// Authenticator side
|
||||
{
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
client
|
||||
.signup(&keypair, &server.public_key(), None)
|
||||
@@ -124,10 +127,10 @@ async fn authz() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_users() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let first_keypair = Keypair::random();
|
||||
let second_keypair = Keypair::random();
|
||||
@@ -163,10 +166,10 @@ async fn multiple_users() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn authz_timeout_reconnect() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let http_relay = testnet.run_http_relay().await.unwrap();
|
||||
let http_relay = testnet.http_relay();
|
||||
let http_relay_url = http_relay.local_link_url();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
@@ -176,7 +179,7 @@ async fn authz_timeout_reconnect() {
|
||||
let capabilities: Capabilities = "/pub/pubky.app/:rw,/pub/foo.bar/file:r".try_into().unwrap();
|
||||
|
||||
let client = testnet
|
||||
.client_builder()
|
||||
.pubky_client_builder()
|
||||
.request_timeout(Duration::from_millis(1000))
|
||||
.build()
|
||||
.unwrap();
|
||||
@@ -187,14 +190,14 @@ async fn authz_timeout_reconnect() {
|
||||
{
|
||||
let url = pubky_auth_request.url().clone();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
client
|
||||
.signup(&keypair, &server.public_key(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
tokio::spawn(async move {
|
||||
tokio::time::sleep(Duration::from_millis(400)).await;
|
||||
tokio::time::sleep(Duration::from_millis(1000)).await;
|
||||
// loop {
|
||||
client.send_auth_token(&keypair, &url).await.unwrap();
|
||||
// }
|
||||
@@ -245,12 +248,15 @@ async fn authz_timeout_reconnect() {
|
||||
#[tokio::test]
|
||||
async fn test_signup_with_token() {
|
||||
// 1. Start a test homeserver with closed signups (i.e. signup tokens required)
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver_with_signup_tokens().await.unwrap();
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let admin_password = "admin";
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let mut mock_dir = MockDataDir::test();
|
||||
mock_dir.config_toml.general.signup_mode = SignupMode::TokenRequired;
|
||||
let server = testnet
|
||||
.create_homeserver_suite_with_mock(mock_dir)
|
||||
.await
|
||||
.unwrap();
|
||||
let keypair = Keypair::random();
|
||||
|
||||
// 2. Try to signup with an invalid token "AAAAA" and expect failure.
|
||||
@@ -263,39 +269,7 @@ async fn test_signup_with_token() {
|
||||
);
|
||||
|
||||
// 3. Call the admin endpoint to generate a valid signup token.
|
||||
// The admin endpoint is protected via the header "X-Admin-Password"
|
||||
// and the password we set up above.
|
||||
let admin_url = format!(
|
||||
"https://{}/admin/generate_signup_token",
|
||||
server.public_key()
|
||||
);
|
||||
|
||||
// 3.1. Call the admin endpoint *with a WRONG admin password* to ensure we get 401 UNAUTHORIZED.
|
||||
let wrong_password_response = client
|
||||
.get(&admin_url)
|
||||
.header("X-Admin-Password", "wrong_admin_password")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
wrong_password_response.status(),
|
||||
StatusCode::UNAUTHORIZED,
|
||||
"Wrong admin password should return 401"
|
||||
);
|
||||
|
||||
// 3.1 Now call the admin endpoint again, this time with the correct password.
|
||||
let admin_response = client
|
||||
.get(&admin_url)
|
||||
.header("X-Admin-Password", admin_password)
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
admin_response.status(),
|
||||
StatusCode::OK,
|
||||
"Admin endpoint should return OK"
|
||||
);
|
||||
let valid_token = admin_response.text().await.unwrap(); // The token string.
|
||||
let valid_token = server.admin().create_signup_token().await.unwrap();
|
||||
|
||||
// 4. Now signup with the valid token. Expect success and a session back.
|
||||
let session = client
|
||||
@@ -321,16 +295,17 @@ async fn test_signup_with_token() {
|
||||
// but when a signin happens after the record is “old” (in test, after 1 second),
|
||||
// the record is republished (its timestamp increases).
|
||||
#[tokio::test]
|
||||
async fn test_republish_on_signin() {
|
||||
async fn test_republish_on_signin_old_enough() {
|
||||
// Setup the testnet and run a homeserver.
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
// Create a client that will republish conditionally if a record is older than 1 second
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
// Create a client that will republish conditionally if a record is older than 1ms.
|
||||
let client = testnet
|
||||
.client_builder()
|
||||
.max_record_age(Duration::from_secs(1))
|
||||
.pubky_client_builder()
|
||||
.max_record_age(Duration::from_millis(1))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let server = testnet.homeserver_suite();
|
||||
let keypair = Keypair::random();
|
||||
|
||||
// Signup publishes a new record.
|
||||
@@ -346,11 +321,62 @@ async fn test_republish_on_signin() {
|
||||
.unwrap();
|
||||
let ts1 = record1.timestamp().as_u64();
|
||||
|
||||
// Immediately sign in. This spawns a background task to update the record
|
||||
// Immediately sign in. This should update the record
|
||||
// with PublishStrategy::IfOlderThan.
|
||||
client.signin(&keypair).await.unwrap();
|
||||
// Wait a short time to let the background task complete.
|
||||
tokio::time::sleep(Duration::from_millis(5)).await;
|
||||
client
|
||||
.signin_and_ensure_record_published(&keypair, true)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let record2 = client
|
||||
.pkarr()
|
||||
.resolve_most_recent(&keypair.public_key())
|
||||
.await
|
||||
.unwrap();
|
||||
let ts2 = record2.timestamp().as_u64();
|
||||
|
||||
// Because the signin happened after max_age(Duration::from_millis(1)),
|
||||
// the record should have been republished.
|
||||
assert_ne!(
|
||||
ts1, ts2,
|
||||
"Record was not republished after threshold exceeded"
|
||||
);
|
||||
}
|
||||
|
||||
// This test verifies that when a signin happens immediately after signup,
|
||||
// the record is not republished on signin (its timestamp remains unchanged)
|
||||
// but when a signin happens after the record is “old” (in test, after 1 second),
|
||||
// the record is republished (its timestamp increases).
|
||||
#[tokio::test]
|
||||
async fn test_republish_on_signin_not_old_enough() {
|
||||
// Setup the testnet and run a homeserver.
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
// Create a client that will republish conditionally if a record is older than 1hr.
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let server = testnet.homeserver_suite();
|
||||
let keypair = Keypair::random();
|
||||
|
||||
// Signup publishes a new record.
|
||||
client
|
||||
.signup(&keypair, &server.public_key(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
// Resolve the record and get its timestamp.
|
||||
let record1 = client
|
||||
.pkarr()
|
||||
.resolve_most_recent(&keypair.public_key())
|
||||
.await
|
||||
.unwrap();
|
||||
let ts1 = record1.timestamp().as_u64();
|
||||
|
||||
// Immediately sign in. This updates the record
|
||||
// with PublishStrategy::IfOlderThan.
|
||||
client
|
||||
.signin_and_ensure_record_published(&keypair, true)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let record2 = client
|
||||
.pkarr()
|
||||
.resolve_most_recent(&keypair.public_key())
|
||||
@@ -364,37 +390,21 @@ async fn test_republish_on_signin() {
|
||||
ts1, ts2,
|
||||
"Record republished too early; timestamps should be equal"
|
||||
);
|
||||
|
||||
// Wait long enough for the record to be considered 'old' (greater than 1 second).
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
// Sign in again. Now the background task should trigger a republish.
|
||||
client.signin(&keypair).await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(5)).await;
|
||||
let record3 = client
|
||||
.pkarr()
|
||||
.resolve_most_recent(&keypair.public_key())
|
||||
.await
|
||||
.unwrap();
|
||||
let ts3 = record3.timestamp().as_u64();
|
||||
|
||||
// Now the republished record's timestamp should be greater than before.
|
||||
assert!(
|
||||
ts3 > ts2,
|
||||
"Record was not republished after threshold exceeded"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_republish_homeserver() {
|
||||
// Setup the testnet and run a homeserver.
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
let max_record_age = Duration::from_secs(5);
|
||||
|
||||
// Create a client that will republish conditionally if a record is older than 1 second
|
||||
let client = testnet
|
||||
.client_builder()
|
||||
.max_record_age(Duration::from_secs(1))
|
||||
.pubky_client_builder()
|
||||
.max_record_age(max_record_age)
|
||||
.build()
|
||||
.unwrap();
|
||||
let server = testnet.create_homeserver_suite().await.unwrap();
|
||||
let keypair = Keypair::random();
|
||||
|
||||
// Signup publishes a new record.
|
||||
@@ -428,7 +438,7 @@ async fn test_republish_homeserver() {
|
||||
);
|
||||
|
||||
// Wait long enough for the record to be considered 'old'.
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
tokio::time::sleep(max_record_age).await;
|
||||
// Call republish_homeserver again; now the record should be updated.
|
||||
client
|
||||
.republish_homeserver(&keypair, &server.public_key())
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky_testnet::EphemeralTestnet;
|
||||
use reqwest::Method;
|
||||
|
||||
#[tokio::test]
|
||||
async fn http_get_pubky() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let homeserver = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let response = client
|
||||
.get(format!("https://{}/", homeserver.public_key()))
|
||||
.get(format!("https://{}/", server.public_key()))
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -18,12 +19,12 @@ async fn http_get_pubky() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn http_get_icann() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let response = client
|
||||
.request(Default::default(), "https://example.com/")
|
||||
.request(Method::GET, "https://example.com/")
|
||||
.send()
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use bytes::Bytes;
|
||||
use pkarr::Keypair;
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky_testnet::EphemeralTestnet;
|
||||
use reqwest::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn put_get_delete() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -37,7 +37,7 @@ async fn put_get_delete() {
|
||||
// Use regular web method to get data from homeserver (with query pubky-host)
|
||||
let regular_url = format!(
|
||||
"{}pub/foo.txt?pubky-host={}",
|
||||
server.url(),
|
||||
server.icann_http_url(),
|
||||
keypair.public_key()
|
||||
);
|
||||
|
||||
@@ -71,10 +71,10 @@ async fn put_get_delete() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn unauthorized_put_delete() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -88,7 +88,7 @@ async fn unauthorized_put_delete() {
|
||||
let url = format!("pubky://{public_key}/pub/foo.txt");
|
||||
let url = url.as_str();
|
||||
|
||||
let other_client = testnet.client_builder().build().unwrap();
|
||||
let other_client = testnet.pubky_client_builder().build().unwrap();
|
||||
{
|
||||
let other = Keypair::random();
|
||||
|
||||
@@ -139,10 +139,10 @@ async fn unauthorized_put_delete() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn list() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -345,10 +345,10 @@ async fn list() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_shallow() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -558,10 +558,10 @@ async fn list_shallow() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn list_events() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -592,7 +592,7 @@ async fn list_events() {
|
||||
|
||||
let feed_url = format!("https://{}/events/", server.public_key());
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let cursor;
|
||||
|
||||
@@ -657,10 +657,10 @@ async fn list_events() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_after_event() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -677,7 +677,7 @@ async fn read_after_event() {
|
||||
|
||||
let feed_url = format!("https://{}/events/", server.public_key());
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
{
|
||||
let response = client
|
||||
@@ -710,10 +710,10 @@ async fn read_after_event() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn dont_delete_shared_blobs() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let homeserver = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let homeserver = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let homeserver_pubky = homeserver.public_key();
|
||||
|
||||
@@ -786,10 +786,10 @@ async fn dont_delete_shared_blobs() {
|
||||
#[tokio::test]
|
||||
async fn stream() {
|
||||
// TODO: test better streaming API
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let server = testnet.homeserver_suite();
|
||||
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@ use std::{
|
||||
collections::HashMap,
|
||||
net::{SocketAddr, TcpListener},
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
@@ -23,7 +24,6 @@ use axum::{
|
||||
use axum_server::Handle;
|
||||
use tokio::sync::{oneshot, Mutex};
|
||||
|
||||
use futures_util::TryFutureExt;
|
||||
use tower_http::{cors::CorsLayer, trace::TraceLayer};
|
||||
use url::Url;
|
||||
|
||||
@@ -66,7 +66,6 @@ impl HttpRelayBuilder {
|
||||
/// An implementation of _some_ of [Http relay spec](https://httprelay.io/).
|
||||
pub struct HttpRelay {
|
||||
pub(crate) http_handle: Handle,
|
||||
|
||||
http_address: SocketAddr,
|
||||
}
|
||||
|
||||
@@ -81,19 +80,21 @@ impl HttpRelay {
|
||||
.with_state(shared_state);
|
||||
|
||||
let http_handle = Handle::new();
|
||||
let shutdown_handle = http_handle.clone();
|
||||
|
||||
let http_listener = TcpListener::bind(SocketAddr::from(([0, 0, 0, 0], config.http_port)))?;
|
||||
let http_address = http_listener.local_addr()?;
|
||||
|
||||
tokio::spawn(
|
||||
tokio::spawn(async move {
|
||||
axum_server::from_tcp(http_listener)
|
||||
.handle(http_handle.clone())
|
||||
.serve(app.into_make_service())
|
||||
.map_err(|error| tracing::error!(?error, "HttpRelay http server error")),
|
||||
);
|
||||
.await
|
||||
.map_err(|error| tracing::error!(?error, "HttpRelay http server error"))
|
||||
});
|
||||
|
||||
Ok(Self {
|
||||
http_handle,
|
||||
http_handle: shutdown_handle,
|
||||
http_address,
|
||||
})
|
||||
}
|
||||
@@ -129,8 +130,16 @@ impl HttpRelay {
|
||||
url
|
||||
}
|
||||
|
||||
/// Shut down this http relay server.
|
||||
pub fn shutdown(&self) {
|
||||
/// Gracefully shuts down the HTTP relay.
|
||||
pub async fn shutdown(self) -> anyhow::Result<()> {
|
||||
self.http_handle
|
||||
.graceful_shutdown(Some(Duration::from_secs(1)));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for HttpRelay {
|
||||
fn drop(&mut self) {
|
||||
self.http_handle.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ categories = ["web-programming"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.95"
|
||||
pkarr = "3.5.3"
|
||||
pkarr = {workspace = true}
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
tracing = "0.1.41"
|
||||
futures-lite = { version = "2.6.0"}
|
||||
@@ -25,6 +25,3 @@ ctrlc = "3.4.5"
|
||||
hex = "0.4.3"
|
||||
rand = "0.9.0"
|
||||
|
||||
[dev-dependencies]
|
||||
pubky-testnet = { path = "../pubky-testnet" }
|
||||
|
||||
|
||||
@@ -187,8 +187,7 @@ impl MultiRepublisher {
|
||||
mod tests {
|
||||
use std::num::NonZeroU8;
|
||||
|
||||
use pkarr::{dns::Name, ClientBuilder, Keypair, PublicKey};
|
||||
use pubky_testnet::Testnet;
|
||||
use pkarr::{dns::Name, Keypair, PublicKey};
|
||||
|
||||
use crate::{multi_republisher::MultiRepublisher, republisher::RepublisherSettings};
|
||||
|
||||
@@ -207,10 +206,9 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_success() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
// Create testnet pkarr builder
|
||||
let mut pkarr_builder = ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&testnet.bootstrap()).no_relays();
|
||||
let dht = pkarr::mainline::Testnet::new(3).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
|
||||
let public_keys = publish_sample_packets(&pkarr_client, 1).await;
|
||||
@@ -219,7 +217,7 @@ mod tests {
|
||||
let mut settings = RepublisherSettings::default();
|
||||
settings
|
||||
.pkarr_client(pkarr_client)
|
||||
.min_sufficient_node_publish_count(NonZeroU8::new(1).unwrap());
|
||||
.min_sufficient_node_publish_count(NonZeroU8::new(3).unwrap());
|
||||
let publisher = MultiRepublisher::new_with_settings(settings, Some(pkarr_builder));
|
||||
let results = publisher.run_serially(public_keys).await.unwrap();
|
||||
let result = results.get(&public_key).unwrap();
|
||||
@@ -231,19 +229,18 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_insufficient() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
// Create testnet pkarr builder
|
||||
let mut pkarr_builder = ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&testnet.bootstrap()).no_relays();
|
||||
let dht = pkarr::mainline::Testnet::new(3).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_keys = publish_sample_packets(&pkarr_client, 1).await;
|
||||
|
||||
let public_keys = publish_sample_packets(&pkarr_client, 1).await;
|
||||
let public_key = public_keys.first().unwrap().clone();
|
||||
|
||||
let mut settings = RepublisherSettings::default();
|
||||
settings
|
||||
.pkarr_client(pkarr_client)
|
||||
.min_sufficient_node_publish_count(NonZeroU8::new(2).unwrap());
|
||||
.min_sufficient_node_publish_count(NonZeroU8::new(4).unwrap());
|
||||
let publisher = MultiRepublisher::new_with_settings(settings, Some(pkarr_builder));
|
||||
let results = publisher.run_serially(public_keys).await.unwrap();
|
||||
let result = results.get(&public_key).unwrap();
|
||||
|
||||
@@ -242,7 +242,6 @@ mod tests {
|
||||
use std::{num::NonZeroU8, time::Duration};
|
||||
|
||||
use pkarr::{dns::Name, Keypair, PublicKey, SignedPacket};
|
||||
use pubky_testnet::Testnet;
|
||||
|
||||
use crate::publisher::{PublishError, Publisher, PublisherSettings};
|
||||
|
||||
@@ -257,12 +256,13 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_success() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(3).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let (_, packet) = sample_packet();
|
||||
|
||||
let required_nodes = 1;
|
||||
let required_nodes = 3;
|
||||
let mut settings = PublisherSettings::default();
|
||||
settings
|
||||
.pkarr_client(pkarr_client)
|
||||
@@ -271,17 +271,18 @@ mod tests {
|
||||
let res = publisher.publish_once().await;
|
||||
assert!(res.is_ok());
|
||||
let success = res.unwrap();
|
||||
assert_eq!(success.published_nodes_count, 1);
|
||||
assert_eq!(success.published_nodes_count, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_insufficient() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(3).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let (_, packet) = sample_packet();
|
||||
|
||||
let required_nodes = 2;
|
||||
let required_nodes = 4;
|
||||
let mut settings = PublisherSettings::default();
|
||||
settings
|
||||
.pkarr_client(pkarr_client)
|
||||
@@ -296,15 +297,16 @@ mod tests {
|
||||
published_nodes_count,
|
||||
} = err
|
||||
{
|
||||
assert_eq!(published_nodes_count, 1);
|
||||
assert_eq!(published_nodes_count, 3);
|
||||
};
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_delay() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(3).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let (_, packet) = sample_packet();
|
||||
|
||||
let required_nodes = 1;
|
||||
|
||||
@@ -250,7 +250,6 @@ mod tests {
|
||||
|
||||
use crate::republisher::{Republisher, RepublisherSettings};
|
||||
use pkarr::{dns::Name, Keypair, PublicKey};
|
||||
use pubky_testnet::Testnet;
|
||||
|
||||
async fn publish_sample_packets(client: &pkarr::Client) -> PublicKey {
|
||||
let key = Keypair::random();
|
||||
@@ -269,9 +268,13 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_success() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder
|
||||
.no_default_network()
|
||||
.bootstrap(&dht.bootstrap)
|
||||
.no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = publish_sample_packets(&pkarr_client).await;
|
||||
|
||||
let required_nodes = 1;
|
||||
@@ -288,9 +291,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn single_key_republish_missing() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = Keypair::random().public_key();
|
||||
|
||||
let required_nodes = 1;
|
||||
@@ -308,9 +312,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn retry_delay() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = Keypair::random().public_key();
|
||||
|
||||
let required_nodes = 1;
|
||||
@@ -337,9 +342,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn republish_retry_missing() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = Keypair::random().public_key();
|
||||
|
||||
let required_nodes = 1;
|
||||
@@ -360,9 +366,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn republish_with_condition_fail() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = publish_sample_packets(&pkarr_client).await;
|
||||
|
||||
let required_nodes = 1;
|
||||
@@ -383,9 +390,10 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn republish_with_condition_success() {
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
let pubky_client = testnet.client_builder().build().unwrap();
|
||||
let pkarr_client = pubky_client.pkarr().clone();
|
||||
let dht = pkarr::mainline::Testnet::new(1).unwrap();
|
||||
let mut pkarr_builder = pkarr::ClientBuilder::default();
|
||||
pkarr_builder.bootstrap(&dht.bootstrap).no_relays();
|
||||
let pkarr_client = pkarr_builder.clone().build().unwrap();
|
||||
let public_key = publish_sample_packets(&pkarr_client).await;
|
||||
|
||||
let required_nodes = 1;
|
||||
|
||||
@@ -23,7 +23,7 @@ wasm-bindgen = "0.2.100"
|
||||
url = "2.5.4"
|
||||
bytes = "^1.10.0"
|
||||
base64 = "0.22.1"
|
||||
pkarr = { version = "3.3.3", features = ["full"] }
|
||||
pkarr = { workspace = true, features = ["full"] }
|
||||
cookie = "0.18.1"
|
||||
tracing = "0.1.41"
|
||||
cookie_store = { version = "0.21.1", default-features = false }
|
||||
@@ -59,7 +59,7 @@ futures-lite = "2.6.0"
|
||||
pubky-testnet = { path = "../pubky-testnet" }
|
||||
tokio = "1.43.0"
|
||||
tracing-subscriber = "0.3.19"
|
||||
mainline = "5.3.1"
|
||||
mainline = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
cfg_aliases = "0.2.1"
|
||||
|
||||
@@ -5,19 +5,16 @@ Rust implementation implementation of [Pubky](https://github.com/pubky/pubky-cor
|
||||
## Quick Start
|
||||
|
||||
```rust
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky::{Client, Keypair};
|
||||
use pubky_testnet::EphemeralTestnet;
|
||||
use pubky::Keypair;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main () {
|
||||
// Mainline Dht testnet and a temporary homeserver for unit testing.
|
||||
let testnet = Testnet::run_with_hardcoded_configurations().await.unwrap();
|
||||
let homeserver = testnet.run_homeserver().await.unwrap();
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
let client = Client::builder().testnet().build().unwrap();
|
||||
|
||||
// Uncomment the following line instead if you are not just testing.
|
||||
// let client Client::builder().build().unwrap();
|
||||
let homeserver = testnet.homeserver_suite();
|
||||
|
||||
// Generate a keypair
|
||||
let keypair = Keypair::random();
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
* @returns {Promise<string>} - The signup token.
|
||||
* @throws Will throw an error if the request fails.
|
||||
*/
|
||||
export async function createSignupToken(client, homeserver_address ="localhost:6286", adminPassword = "admin") {
|
||||
export async function createSignupToken(client, homeserver_address ="localhost:6288", adminPassword = "admin") {
|
||||
const adminUrl = `http://${homeserver_address}/admin/generate_signup_token`;
|
||||
const response = await client.fetch(adminUrl, {
|
||||
method: "GET",
|
||||
@@ -16,7 +16,6 @@ export async function createSignupToken(client, homeserver_address ="localhost:6
|
||||
"X-Admin-Password": adminPassword,
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to get signup token: ${response.statusText}`);
|
||||
}
|
||||
|
||||
@@ -114,26 +114,46 @@ impl Client {
|
||||
|
||||
/// Signin to a homeserver.
|
||||
/// After a successful signin, a background task is spawned to republish the user's
|
||||
/// PKarr record if it is missing or older than 6 hours. We don't mind if it succeed
|
||||
/// PKarr record if it is missing or older than 1 hour. We don't mind if it succeed
|
||||
/// or fails. We want signin to return fast.
|
||||
pub async fn signin(&self, keypair: &Keypair) -> Result<Session> {
|
||||
self.signin_and_ensure_record_published(keypair, false)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Signin to a homeserver and ensure the user's PKarr record is published.
|
||||
///
|
||||
/// Same as `signin(keypair)` but gives the option to wait for the pkarr packet to be
|
||||
/// published in sync. `signin(keypair)` does publish the packet async.
|
||||
pub async fn signin_and_ensure_record_published(
|
||||
&self,
|
||||
keypair: &Keypair,
|
||||
publish_sync: bool,
|
||||
) -> Result<Session> {
|
||||
let token = AuthToken::sign(keypair, vec![Capability::root()]);
|
||||
let session = self.signin_with_authtoken(&token).await?;
|
||||
|
||||
// Spawn a background task to republish the record.
|
||||
let client_clone = self.clone();
|
||||
let keypair_clone = keypair.clone();
|
||||
let future = async move {
|
||||
// Resolve the record and republish if existing and older MAX_HOMESERVER_RECORD_AGE_SECS
|
||||
let _ = client_clone
|
||||
.publish_homeserver(&keypair_clone, None, PublishStrategy::IfOlderThan)
|
||||
.await;
|
||||
};
|
||||
if publish_sync {
|
||||
// Wait for the publish to complete.
|
||||
self.publish_homeserver(keypair, None, PublishStrategy::IfOlderThan)
|
||||
.await?;
|
||||
} else {
|
||||
// Spawn a background task to republish the record.
|
||||
let client_clone = self.clone();
|
||||
let keypair_clone = keypair.clone();
|
||||
|
||||
#[cfg(not(wasm_browser))]
|
||||
tokio::spawn(future);
|
||||
#[cfg(wasm_browser)]
|
||||
wasm_bindgen_futures::spawn_local(future);
|
||||
let future = async move {
|
||||
// Resolve the record and republish if existing and older MAX_HOMESERVER_RECORD_AGE_SECS
|
||||
let _ = client_clone
|
||||
.publish_homeserver(&keypair_clone, None, PublishStrategy::IfOlderThan)
|
||||
.await;
|
||||
};
|
||||
// Spawn a background task to republish the record.
|
||||
#[cfg(not(wasm_browser))]
|
||||
tokio::spawn(future);
|
||||
#[cfg(wasm_browser)]
|
||||
wasm_bindgen_futures::spawn_local(future);
|
||||
}
|
||||
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
@@ -14,10 +14,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::request], in that it can make requests to:
|
||||
/// 1. HTTPs URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
@@ -26,6 +26,7 @@ impl Client {
|
||||
let url = url.as_str();
|
||||
|
||||
if url.starts_with("pubky://") {
|
||||
// Rewrite pubky:// urls to https://_pubky.
|
||||
let url = format!("https://_pubky.{}", url.split_at(8).1);
|
||||
|
||||
return self.http.request(method, url);
|
||||
@@ -42,10 +43,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::get], in that it can make requests to:
|
||||
/// 1. HTTP(s) URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
@@ -67,10 +68,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::put], in that it can make requests to:
|
||||
/// 1. HTTP(s) URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
@@ -83,10 +84,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::patch], in that it can make requests to:
|
||||
/// 1. HTTP(s) URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
@@ -99,10 +100,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::delete], in that it can make requests to:
|
||||
/// 1. HTTP(s) URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
@@ -115,10 +116,10 @@ impl Client {
|
||||
///
|
||||
/// Differs from [reqwest::Client::head], in that it can make requests to:
|
||||
/// 1. HTTP(s) URLs with with a [pkarr::PublicKey] as Top Level Domain, by resolving
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// corresponding endpoints, and verifying TLS certificates accordingly.
|
||||
/// (example: `https://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`)
|
||||
/// 2. Pubky URLs like `pubky://o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
/// by converting the url into `https://_pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy`
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
|
||||
@@ -39,16 +39,19 @@ impl Client {
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Calculate the age of the existing record.
|
||||
let packet_age = match existing {
|
||||
Some(ref record) => {
|
||||
let elapsed = Timestamp::now() - record.timestamp();
|
||||
Duration::from_micros(elapsed.as_u64())
|
||||
}
|
||||
None => Duration::from_secs(u64::MAX), // Use max duration if no record exists.
|
||||
};
|
||||
|
||||
// Determine if we should publish based on the given strategy.
|
||||
let should_publish = match strategy {
|
||||
PublishStrategy::Force => true,
|
||||
PublishStrategy::IfOlderThan => match existing {
|
||||
Some(ref record) => {
|
||||
let elapsed = Timestamp::now() - record.timestamp();
|
||||
Duration::from_micros(elapsed.as_u64()) > self.max_record_age
|
||||
}
|
||||
None => true,
|
||||
},
|
||||
PublishStrategy::IfOlderThan => packet_age > self.max_record_age,
|
||||
};
|
||||
|
||||
if should_publish {
|
||||
|
||||
@@ -22,7 +22,7 @@ crypto_secretbox = { version = "0.1.1", features = ["std"] }
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
pubky-timestamp = { version = "0.4.0", features = ["full"] }
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
pkarr = { version = "3.3.3", default-features = false, features = ["keys"] }
|
||||
pkarr = { workspace = true, features = ["keys"] }
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
js-sys = "0.3.77"
|
||||
|
||||
@@ -30,7 +30,7 @@ pub struct AuthToken {
|
||||
///
|
||||
/// Version 0:
|
||||
/// - Signer is implicitly the same as the root keypair for
|
||||
/// the [AuthToken::pubky], without any delegation.
|
||||
/// the [AuthToken::pubky], without any delegation.
|
||||
/// - Capabilities are only meant for resoucres on the homeserver.
|
||||
version: u8,
|
||||
/// Timestamp
|
||||
|
||||
@@ -31,7 +31,7 @@ heed = "0.21.0"
|
||||
hex = "0.4.3"
|
||||
httpdate = "1.0.3"
|
||||
postcard = { version = "1.1.1", features = ["alloc"] }
|
||||
pkarr = { version = "3.3.3", features = ["dht", "lmdb-cache", "tls"] }
|
||||
pkarr = { workspace = true, features = ["dht", "lmdb-cache", "tls"] }
|
||||
pubky-common = { version = "0.3.1", path = "../pubky-common" }
|
||||
serde = { version = "1.0.217", features = ["derive"] }
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
@@ -48,7 +48,12 @@ pkarr-republisher = { version = "0.1.0", path = "../pkarr-republisher" }
|
||||
thiserror = "2.0.12"
|
||||
dirs = "6.0.0"
|
||||
hostname-validator = "1.1.1"
|
||||
axum-test = "17.2.0"
|
||||
tempfile = { version = "3.10.1"}
|
||||
dyn-clone = "1.0.19"
|
||||
reqwest = "0.12.15"
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3.10.1"
|
||||
futures-lite = "2.6.0"
|
||||
|
||||
|
||||
@@ -1,34 +1,92 @@
|
||||
# Pubky Homeserver
|
||||
|
||||
A pubky-core homeserver that acts as users' agent on the Internet, providing data availability and more.
|
||||
Pubky homeserver that acts as user's agent on the Internet, providing data availability and more.
|
||||
|
||||
## Usage
|
||||
|
||||
### Library
|
||||
|
||||
You can use the Homeserver as a library in other crates/binaries or for testing purposes.
|
||||
Use the Homeserver as a library in other crates/binaries or for testing purposes.
|
||||
The `HomeserverSuite` is all bells and wistles included.
|
||||
|
||||
```rust
|
||||
use anyhow::Result;
|
||||
use pubky_homeserver::Homeserver;
|
||||
use pubky_homeserver::HomeserverSuite;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let homeserver = unsafe {
|
||||
Homeserver::builder().run().await.unwrap()
|
||||
};
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let suite = HomeserverSuite::run_with_data_dir_path(PathBuf::from("~/.pubky")).await?;
|
||||
println!(
|
||||
"Homeserver HTTP listening on {}",
|
||||
server.core().icann_http_url()
|
||||
);
|
||||
println!(
|
||||
"Homeserver Pubky TLS listening on {} and {}",
|
||||
server.core().pubky_tls_dns_url(),
|
||||
server.core().pubky_tls_ip_url()
|
||||
);
|
||||
println!(
|
||||
"Admin server listening on http://{}",
|
||||
server.admin().listen_socket()
|
||||
);
|
||||
tokio::signal::ctrl_c().await?;
|
||||
|
||||
println!("Shutting down Homeserver");
|
||||
|
||||
homeserver.shutdown();
|
||||
println!("Shutting down Homeserver");
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
Run the suite with a temporary directory and your custom config. This is a good way to test the server.
|
||||
|
||||
```rust
|
||||
use anyhow::Result;
|
||||
use pubky_homeserver::{HomeserverSuite, DataDirMock};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let mut config = ConfigToml::default(); // Use ConfigToml::test() for random ports.
|
||||
// Set config values however you like
|
||||
config.admin.admin_password = "alternative_password".to_string();
|
||||
// Creates a temporary directory that gets cleaned up
|
||||
// as soon as the suite is dropped.
|
||||
let mock_dir = DataDirMock::new(config, None).unwrap();
|
||||
let suite = HomeserverSuite::run_with_data_dir_mock(mock_dir).await.unwrap();
|
||||
}
|
||||
|
||||
|
||||
Run the `HomeserverCore` only without the admin server.
|
||||
|
||||
```rust
|
||||
use anyhow::Result;
|
||||
use pubky_homeserver::HomeserverCore;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let mut core = HomeserverCore::from_data_dir_path(PathBuf::from("~/.pubky")).await?;
|
||||
core.listen().await?;
|
||||
println!(
|
||||
"Homeserver HTTP listening on {}",
|
||||
core().icann_http_url()
|
||||
);
|
||||
println!(
|
||||
"Homeserver Pubky TLS listening on {} and {}",
|
||||
core().pubky_tls_dns_url(),
|
||||
core().pubky_tls_ip_url()
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### Binary
|
||||
|
||||
Use `cargo run -- --data-dir=~/.pubky`.
|
||||
|
||||
### Signup Token
|
||||
|
||||
If homeserver is set to require signup tokens, you can create a new signup token using the admin endpoint:
|
||||
|
||||
```rust,ignore
|
||||
let response = pubky_client
|
||||
.get(&format!("https://{homeserver_pubkey}/admin/generate_signup_token"))
|
||||
.get(&format!("https://127.0.0.1:6288/admin/generate_signup_token"))
|
||||
.header("X-Admin-Password", "admin") // Use your admin password. This is testnet default pwd.
|
||||
.send()
|
||||
.await
|
||||
@@ -39,7 +97,7 @@ let signup_token = response.text().await.unwrap();
|
||||
via CLI with `curl`
|
||||
|
||||
```bash
|
||||
curl -X GET "https://<homeserver_ip:port>/admin/generate_signup_token" \
|
||||
curl -X GET "https://127.0.0.1:6288/admin/generate_signup_token" \
|
||||
-H "X-Admin-Password: admin"
|
||||
# Use your admin password. This is testnet default pwd.
|
||||
```
|
||||
@@ -47,7 +105,7 @@ curl -X GET "https://<homeserver_ip:port>/admin/generate_signup_token" \
|
||||
or from JS
|
||||
|
||||
```js
|
||||
const url = "http://${homeserver_address}/admin/generate_signup_token";
|
||||
const url = "http://127.0.0.1:6288/admin/generate_signup_token";
|
||||
const response = await client.fetch(url, {
|
||||
method: "GET",
|
||||
headers: {
|
||||
@@ -55,26 +113,4 @@ const response = await client.fetch(url, {
|
||||
},
|
||||
});
|
||||
const signupToken = await response.text();
|
||||
```
|
||||
|
||||
### Binary
|
||||
|
||||
Use `cargo run`
|
||||
|
||||
```bash
|
||||
cargo run -- --config=./src/config.toml
|
||||
```
|
||||
|
||||
Or Build first then run from target.
|
||||
|
||||
Build
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
Run with an optional config file
|
||||
|
||||
```bash
|
||||
../target/release/pubky-homeserver --config=./src/config.toml
|
||||
```
|
||||
```
|
||||
@@ -20,17 +20,6 @@ pubky_listen_socket = "127.0.0.1:6287"
|
||||
# May be put behind a reverse proxy with TLS enabled.
|
||||
icann_listen_socket = "127.0.0.1:6286"
|
||||
|
||||
# An ICANN domain name is necessary to support legacy browsers
|
||||
#
|
||||
# Make sure to setup a domain name and point it the IP
|
||||
# address of this machine where you are running this server.
|
||||
#
|
||||
# This domain should point to the `<public_ip>:<public_port>`.
|
||||
#
|
||||
# ICANN TLS is not natively supported, so you should be running
|
||||
# a reverse proxy and managing certificates yourself.
|
||||
icann_domain = "example.com"
|
||||
|
||||
|
||||
[admin]
|
||||
# The port number to run the admin HTTP (clear text) server on.
|
||||
@@ -43,10 +32,30 @@ admin_password = "admin"
|
||||
[pkdns]
|
||||
# The public IP address and port of the homeserver pubky_drive_api to be advertised on the DHT.
|
||||
# Must be set to be reachable from the outside.
|
||||
public_socket = "127.0.0.1:6286"
|
||||
public_ip = "127.0.0.1"
|
||||
|
||||
# The pubky tls port in case it differs from the pubky_listen_socket port.
|
||||
# Defaults to the pubky_listen_socket port.
|
||||
public_pubky_tls_port = 6287
|
||||
|
||||
# The icann http port in case it differs from the icann_listen_socket port.
|
||||
# Defaults to the icann_listen_socket port.
|
||||
public_icann_http_port = 80
|
||||
|
||||
# An ICANN domain name is necessary to support legacy browsers
|
||||
#
|
||||
# Make sure to setup a domain name and point it the IP
|
||||
# address of this machine where you are running this server.
|
||||
#
|
||||
# This domain should point to the `<public_ip>:<public_port>`.
|
||||
#
|
||||
# ICANN TLS is not natively supported, so you should be running
|
||||
# a reverse proxy and managing certificates yourself.
|
||||
icann_domain = "example.com"
|
||||
|
||||
# The interval at which user keys are republished to the DHT.
|
||||
user_keys_republisher_interval = 14400 # 4 hours in seconds
|
||||
# 0 means disabled.
|
||||
user_keys_republisher_interval = 14400 # 4 hours in seconds
|
||||
|
||||
# List of bootstrap nodes for the DHT.
|
||||
# domain:port format.
|
||||
@@ -59,4 +68,11 @@ dht_bootstrap_nodes = [
|
||||
|
||||
# Relay node urls for the DHT.
|
||||
# Improves the availability of pkarr packets.
|
||||
dht_relay_nodes = ["https://relay.pkarr.org", "https://pkarr.pubky.org"]
|
||||
# If not set and no bootstrap nodes are set, the default pkarr relay nodes will be used.
|
||||
dht_relay_nodes = [
|
||||
"https://relay.pkarr.org",
|
||||
"https://pkarr.pubky.org"
|
||||
]
|
||||
|
||||
# Default UDP request timeout for the DHT
|
||||
dht_request_timeout_ms = 2000
|
||||
|
||||
183
pubky-homeserver/src/admin/app.rs
Normal file
183
pubky-homeserver/src/admin/app.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::routes::{generate_signup_token, root};
|
||||
use super::trace::with_trace_layer;
|
||||
use super::{app_state::AppState, auth_middleware::AdminAuthLayer};
|
||||
use crate::app_context::AppContext;
|
||||
use crate::{AppContextConversionError, MockDataDir, PersistentDataDir};
|
||||
use axum::{routing::get, Router};
|
||||
use axum_server::Handle;
|
||||
use tokio::task::JoinHandle;
|
||||
use tower_http::cors::CorsLayer;
|
||||
|
||||
/// Folder /admin router
|
||||
/// Admin password required.
|
||||
fn create_admin_router(password: &str) -> Router<AppState> {
|
||||
Router::new()
|
||||
.route(
|
||||
"/generate_signup_token",
|
||||
get(generate_signup_token::generate_signup_token),
|
||||
)
|
||||
.layer(AdminAuthLayer::new(password.to_string()))
|
||||
}
|
||||
|
||||
/// main / router
|
||||
/// This part is not protected by the admin auth middleware
|
||||
fn create_app(state: AppState, password: &str) -> axum::routing::IntoMakeService<Router> {
|
||||
let admin_router = create_admin_router(password);
|
||||
|
||||
let app = Router::new()
|
||||
.nest("/admin", admin_router)
|
||||
.route("/", get(root::root))
|
||||
.with_state(state)
|
||||
.layer(CorsLayer::very_permissive());
|
||||
|
||||
with_trace_layer(app).into_make_service()
|
||||
}
|
||||
|
||||
/// Errors that can occur when building a `AdminServer`.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum AdminServerBuildError {
|
||||
/// Failed to create admin server.
|
||||
#[error("Failed to create admin server: {0}")]
|
||||
Server(anyhow::Error),
|
||||
|
||||
/// Failed to boostrap from the data directory.
|
||||
#[error("Failed to boostrap from the data directory: {0}")]
|
||||
DataDir(AppContextConversionError),
|
||||
}
|
||||
|
||||
/// Admin server
|
||||
///
|
||||
/// This server is protected by the admin auth middleware.
|
||||
///
|
||||
/// When dropped, the server will stop.
|
||||
pub struct AdminServer {
|
||||
http_handle: Handle,
|
||||
join_handle: JoinHandle<()>,
|
||||
socket: SocketAddr,
|
||||
password: String,
|
||||
}
|
||||
|
||||
impl AdminServer {
|
||||
/// Create a new admin server from a data directory.
|
||||
pub async fn from_data_dir(data_dir: PersistentDataDir) -> Result<Self, AdminServerBuildError> {
|
||||
let context = AppContext::try_from(data_dir).map_err(AdminServerBuildError::DataDir)?;
|
||||
Self::start(&context).await
|
||||
}
|
||||
|
||||
/// Create a new admin server from a data directory path.
|
||||
pub async fn from_data_dir_path(data_dir_path: PathBuf) -> Result<Self, AdminServerBuildError> {
|
||||
let data_dir = PersistentDataDir::new(data_dir_path);
|
||||
Self::from_data_dir(data_dir).await
|
||||
}
|
||||
|
||||
/// Create a new admin server from a mock data directory.
|
||||
pub async fn from_mock_dir(mock_dir: MockDataDir) -> Result<Self, AdminServerBuildError> {
|
||||
let context = AppContext::try_from(mock_dir).map_err(AdminServerBuildError::DataDir)?;
|
||||
Self::start(&context).await
|
||||
}
|
||||
|
||||
/// Run the admin server.
|
||||
pub async fn start(context: &AppContext) -> Result<Self, AdminServerBuildError> {
|
||||
let password = context.config_toml.admin.admin_password.clone();
|
||||
let state = AppState::new(context.db.clone());
|
||||
let socket = context.config_toml.admin.listen_socket;
|
||||
let app = create_app(state, password.as_str());
|
||||
let listener = std::net::TcpListener::bind(socket)
|
||||
.map_err(|e| AdminServerBuildError::Server(e.into()))?;
|
||||
let socket = listener
|
||||
.local_addr()
|
||||
.map_err(|e| AdminServerBuildError::Server(e.into()))?;
|
||||
let http_handle = Handle::new();
|
||||
let inner_http_handle = http_handle.clone();
|
||||
let join_handle = tokio::spawn(async move {
|
||||
axum_server::from_tcp(listener)
|
||||
.handle(inner_http_handle)
|
||||
.serve(app)
|
||||
.await
|
||||
.unwrap_or_else(|e| tracing::error!("Admin server error: {}", e));
|
||||
});
|
||||
Ok(Self {
|
||||
http_handle,
|
||||
socket,
|
||||
join_handle,
|
||||
password,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the socket address of the admin server.
|
||||
pub fn listen_socket(&self) -> SocketAddr {
|
||||
self.socket
|
||||
}
|
||||
|
||||
/// Create a signup token for the given homeserver.
|
||||
pub async fn create_signup_token(&self) -> anyhow::Result<String> {
|
||||
let admin_socket = self.listen_socket();
|
||||
let url = format!("http://{}/admin/generate_signup_token", admin_socket);
|
||||
let response = reqwest::Client::new()
|
||||
.get(url)
|
||||
.header("X-Admin-Password", &self.password)
|
||||
.send()
|
||||
.await?;
|
||||
let response = response.error_for_status()?;
|
||||
let body = response.text().await?;
|
||||
Ok(body)
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for AdminServer {
|
||||
fn drop(&mut self) {
|
||||
self.http_handle
|
||||
.graceful_shutdown(Some(Duration::from_secs(5)));
|
||||
self.join_handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use axum_test::TestServer;
|
||||
|
||||
use crate::persistence::lmdb::LmDB;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_root() {
|
||||
let server = TestServer::new(create_app(AppState::new(LmDB::test()), "test")).unwrap();
|
||||
let response = server.get("/").expect_success().await;
|
||||
response.assert_status_ok();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_signup_token_fail() {
|
||||
let server = TestServer::new(create_app(AppState::new(LmDB::test()), "test")).unwrap();
|
||||
// No password
|
||||
let response = server
|
||||
.get("/admin/generate_signup_token")
|
||||
.expect_failure()
|
||||
.await;
|
||||
response.assert_status_unauthorized();
|
||||
|
||||
// wrong password
|
||||
let response = server
|
||||
.get("/admin/generate_signup_token")
|
||||
.add_header("X-Admin-Password", "wrongpassword")
|
||||
.expect_failure()
|
||||
.await;
|
||||
response.assert_status_unauthorized();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_generate_signup_token_success() {
|
||||
let server = TestServer::new(create_app(AppState::new(LmDB::test()), "test")).unwrap();
|
||||
let response = server
|
||||
.get("/admin/generate_signup_token")
|
||||
.add_header("X-Admin-Password", "test")
|
||||
.expect_success()
|
||||
.await;
|
||||
response.assert_status_ok();
|
||||
}
|
||||
}
|
||||
12
pubky-homeserver/src/admin/app_state.rs
Normal file
12
pubky-homeserver/src/admin/app_state.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
use crate::persistence::lmdb::LmDB;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AppState {
|
||||
pub(crate) db: LmDB,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub fn new(db: LmDB) -> Self {
|
||||
Self { db }
|
||||
}
|
||||
}
|
||||
8
pubky-homeserver/src/admin/mod.rs
Normal file
8
pubky-homeserver/src/admin/mod.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
mod app;
|
||||
mod app_state;
|
||||
mod auth_middleware;
|
||||
mod routes;
|
||||
mod trace;
|
||||
|
||||
pub use app::AdminServer;
|
||||
pub use app::AdminServerBuildError;
|
||||
@@ -0,0 +1,9 @@
|
||||
use crate::core::Result;
|
||||
use axum::{extract::State, http::StatusCode, response::IntoResponse};
|
||||
|
||||
use super::super::app_state::AppState;
|
||||
|
||||
pub async fn generate_signup_token(State(mut state): State<AppState>) -> Result<impl IntoResponse> {
|
||||
let token = state.db.generate_signup_token()?;
|
||||
Ok((StatusCode::OK, token))
|
||||
}
|
||||
2
pubky-homeserver/src/admin/routes/mod.rs
Normal file
2
pubky-homeserver/src/admin/routes/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub(crate) mod generate_signup_token;
|
||||
pub(crate) mod root;
|
||||
6
pubky-homeserver/src/admin/routes/root.rs
Normal file
6
pubky-homeserver/src/admin/routes/root.rs
Normal file
@@ -0,0 +1,6 @@
|
||||
use crate::core::Result;
|
||||
use axum::{http::StatusCode, response::IntoResponse};
|
||||
|
||||
pub async fn root() -> Result<impl IntoResponse> {
|
||||
Ok((StatusCode::OK, "Homeserver - Admin Endpoint"))
|
||||
}
|
||||
40
pubky-homeserver/src/admin/trace.rs
Normal file
40
pubky-homeserver/src/admin/trace.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use axum::{extract::Request, Router};
|
||||
use tower_http::trace::{
|
||||
DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, OnFailure, OnRequest, OnResponse,
|
||||
TraceLayer,
|
||||
};
|
||||
use tracing::{Level, Span};
|
||||
|
||||
pub fn with_trace_layer(router: Router) -> Router {
|
||||
router.layer(
|
||||
TraceLayer::new_for_http()
|
||||
.make_span_with(move |request: &Request| {
|
||||
let uri = request.uri().to_string();
|
||||
tracing::span!(
|
||||
Level::INFO,
|
||||
"request",
|
||||
method = %request.method(),
|
||||
uri = ?uri,
|
||||
version = ?request.version(),
|
||||
)
|
||||
})
|
||||
.on_request(|request: &Request, span: &Span| {
|
||||
// Use the default behavior for other spans
|
||||
DefaultOnRequest::new().on_request(request, span);
|
||||
})
|
||||
.on_response(
|
||||
|response: &axum::response::Response, latency: std::time::Duration, span: &Span| {
|
||||
// Use the default behavior for other spans
|
||||
DefaultOnResponse::new().on_response(response, latency, span);
|
||||
},
|
||||
)
|
||||
.on_failure(
|
||||
|error: tower_http::classify::ServerErrorsFailureClass,
|
||||
latency: std::time::Duration,
|
||||
span: &Span| {
|
||||
// Use the default behavior for other spans
|
||||
DefaultOnFailure::new().on_failure(error, latency, span);
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
139
pubky-homeserver/src/app_context.rs
Normal file
139
pubky-homeserver/src/app_context.rs
Normal file
@@ -0,0 +1,139 @@
|
||||
//!
|
||||
//! The application context shared between all components.
|
||||
//! Think of it as a simple Dependency Injection container.
|
||||
//!
|
||||
//! Create with a `DataDir` instance: `AppContext::try_from(data_dir)`
|
||||
//!
|
||||
|
||||
use std::{sync::Arc, time::Duration};
|
||||
|
||||
use pkarr::Keypair;
|
||||
|
||||
use crate::{persistence::lmdb::LmDB, ConfigToml, DataDir, MockDataDir, PersistentDataDir};
|
||||
|
||||
/// Errors that can occur when converting a `DataDir` to an `AppContext`.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AppContextConversionError {
|
||||
/// Failed to ensure data directory exists and is writable.
|
||||
#[error("Failed to ensure data directory exists and is writable: {0}")]
|
||||
DataDir(anyhow::Error),
|
||||
/// Failed to read or create config file.
|
||||
#[error("Failed to read or create config file: {0}")]
|
||||
Config(anyhow::Error),
|
||||
/// Failed to read or create keypair.
|
||||
#[error("Failed to read or create keypair: {0}")]
|
||||
Keypair(anyhow::Error),
|
||||
/// Failed to open LMDB.
|
||||
#[error("Failed to open LMDB: {0}")]
|
||||
LmDB(anyhow::Error),
|
||||
/// Failed to build pkarr client.
|
||||
#[error("Failed to build pkarr client: {0}")]
|
||||
Pkarr(pkarr::errors::BuildError),
|
||||
}
|
||||
|
||||
/// The application context shared between all components.
|
||||
/// Think of it as a simple Dependency Injection container.
|
||||
///
|
||||
/// Create with a `DataDir` instance: `AppContext::try_from(data_dir)`
|
||||
///
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AppContext {
|
||||
/// A list of all shared resources.
|
||||
pub(crate) db: LmDB,
|
||||
pub(crate) config_toml: ConfigToml,
|
||||
/// Keep data_dir alive. The mock dir will cleanup on drop.
|
||||
pub(crate) data_dir: Arc<dyn DataDir>,
|
||||
pub(crate) keypair: Keypair,
|
||||
/// Main pkarr instance. This will automatically turn into a DHT server after 15 minutes after startup.
|
||||
/// We need to keep this alive.
|
||||
pub(crate) pkarr_client: pkarr::Client,
|
||||
/// pkarr client builder in case we need to create a more instances.
|
||||
/// Comes ready with the correct bootstrap nodes and relays.
|
||||
pub(crate) pkarr_builder: pkarr::ClientBuilder,
|
||||
}
|
||||
|
||||
impl AppContext {
|
||||
/// Create a new AppContext for testing.
|
||||
pub fn test() -> Self {
|
||||
use crate::MockDataDir;
|
||||
let data_dir = MockDataDir::test();
|
||||
Self::try_from(data_dir).expect("failed to build AppContext from DataDirMock")
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<Arc<dyn DataDir>> for AppContext {
|
||||
type Error = AppContextConversionError;
|
||||
|
||||
fn try_from(dir: Arc<dyn DataDir>) -> Result<Self, Self::Error> {
|
||||
dir.ensure_data_dir_exists_and_is_writable()
|
||||
.map_err(AppContextConversionError::DataDir)?;
|
||||
let conf = dir
|
||||
.read_or_create_config_file()
|
||||
.map_err(AppContextConversionError::Config)?;
|
||||
let keypair = dir
|
||||
.read_or_create_keypair()
|
||||
.map_err(AppContextConversionError::Keypair)?;
|
||||
|
||||
let db_path = dir.path().join("data/lmdb");
|
||||
let pkarr_builder = Self::build_pkarr_builder_from_config(&conf);
|
||||
Ok(Self {
|
||||
db: unsafe { LmDB::open(db_path).map_err(AppContextConversionError::LmDB)? },
|
||||
pkarr_client: pkarr_builder
|
||||
.clone()
|
||||
.build()
|
||||
.map_err(AppContextConversionError::Pkarr)?,
|
||||
pkarr_builder,
|
||||
config_toml: conf,
|
||||
keypair,
|
||||
data_dir: dir,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<PersistentDataDir> for AppContext {
|
||||
type Error = AppContextConversionError;
|
||||
|
||||
fn try_from(dir: PersistentDataDir) -> Result<Self, Self::Error> {
|
||||
let arc_dir: Arc<dyn DataDir> = Arc::new(dir);
|
||||
Self::try_from(arc_dir)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<MockDataDir> for AppContext {
|
||||
type Error = AppContextConversionError;
|
||||
|
||||
fn try_from(dir: MockDataDir) -> Result<Self, Self::Error> {
|
||||
let arc_dir: Arc<dyn DataDir> = Arc::new(dir);
|
||||
Self::try_from(arc_dir)
|
||||
}
|
||||
}
|
||||
|
||||
impl AppContext {
|
||||
/// Build the pkarr client builder based on the config.
|
||||
fn build_pkarr_builder_from_config(config_toml: &ConfigToml) -> pkarr::ClientBuilder {
|
||||
let mut builder = pkarr::ClientBuilder::default();
|
||||
if let Some(bootstrap_nodes) = &config_toml.pkdns.dht_bootstrap_nodes {
|
||||
let nodes = bootstrap_nodes
|
||||
.iter()
|
||||
.map(|node| node.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
builder.bootstrap(&nodes);
|
||||
|
||||
// If we set custom bootstrap nodes, we don't want to use the default pkarr relay nodes.
|
||||
// Otherwise, we could end up with a DHT with testnet boostrap nodes and mainnet relays
|
||||
// which would give very weird results.
|
||||
builder.no_relays();
|
||||
}
|
||||
|
||||
if let Some(relays) = &config_toml.pkdns.dht_relay_nodes {
|
||||
builder
|
||||
.relays(relays)
|
||||
.expect("parameters are already urls and therefore valid.");
|
||||
}
|
||||
if let Some(request_timeout) = &config_toml.pkdns.dht_request_timeout_ms {
|
||||
let duration = Duration::from_millis(request_timeout.get());
|
||||
builder.request_timeout(duration);
|
||||
}
|
||||
builder
|
||||
}
|
||||
}
|
||||
3
pubky-homeserver/src/constants.rs
Normal file
3
pubky-homeserver/src/constants.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
// The default limit of a list api if no `limit` query parameter is provided.
|
||||
pub const DEFAULT_LIST_LIMIT: u16 = 100;
|
||||
pub const DEFAULT_MAX_LIST_LIMIT: u16 = 1000;
|
||||
@@ -1,74 +0,0 @@
|
||||
use super::tables::{Tables, TABLES_COUNT};
|
||||
/// Protecting fields from being mutated by modules in crate::database
|
||||
use crate::core::CoreConfig;
|
||||
use heed::{Env, EnvOpenOptions};
|
||||
use std::{fs, path::PathBuf};
|
||||
|
||||
use super::migrations;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DB {
|
||||
pub(crate) env: Env,
|
||||
pub(crate) tables: Tables,
|
||||
pub(crate) buffers_dir: PathBuf,
|
||||
pub(crate) max_chunk_size: usize,
|
||||
config: CoreConfig,
|
||||
}
|
||||
|
||||
impl DB {
|
||||
/// # Safety
|
||||
/// DB uses LMDB, [opening][heed::EnvOpenOptions::open] which is marked unsafe,
|
||||
/// because the possible Undefined Behavior (UB) if the lock file is broken.
|
||||
pub unsafe fn open(config: CoreConfig) -> anyhow::Result<Self> {
|
||||
let buffers_dir = config.storage.clone().join("buffers");
|
||||
|
||||
// Cleanup buffers.
|
||||
let _ = fs::remove_dir(&buffers_dir);
|
||||
fs::create_dir_all(&buffers_dir)?;
|
||||
|
||||
let env = unsafe {
|
||||
EnvOpenOptions::new()
|
||||
.max_dbs(TABLES_COUNT)
|
||||
.map_size(config.db_map_size)
|
||||
.open(&config.storage)
|
||||
}?;
|
||||
|
||||
let tables = migrations::run(&env)?;
|
||||
|
||||
let db = DB {
|
||||
env,
|
||||
tables,
|
||||
config,
|
||||
buffers_dir,
|
||||
max_chunk_size: max_chunk_size(),
|
||||
};
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
// Create an ephemeral database for testing purposes.
|
||||
pub fn test() -> DB {
|
||||
unsafe { DB::open(CoreConfig::test()).unwrap() }
|
||||
}
|
||||
|
||||
// === Getters ===
|
||||
|
||||
pub fn config(&self) -> &CoreConfig {
|
||||
&self.config
|
||||
}
|
||||
}
|
||||
|
||||
/// calculate optimal chunk size:
|
||||
/// - <https://lmdb.readthedocs.io/en/release/#storage-efficiency-limits>
|
||||
/// - <https://github.com/lmdbjava/benchmarks/blob/master/results/20160710/README.md#test-2-determine-24816-kb-byte-values>
|
||||
fn max_chunk_size() -> usize {
|
||||
let page_size = page_size::get();
|
||||
|
||||
// - 16 bytes Header per page (LMDB)
|
||||
// - Each page has to contain 2 records
|
||||
// - 8 bytes per record (LMDB) (empirically, it seems to be 10 not 8)
|
||||
// - 12 bytes key:
|
||||
// - timestamp : 8 bytes
|
||||
// - chunk index: 4 bytes
|
||||
((page_size - 16) / 2) - (8 + 2) - 12
|
||||
}
|
||||
@@ -1,235 +1,221 @@
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::backup::backup_lmdb_periodically;
|
||||
use crate::core::database::DB;
|
||||
use super::key_republisher::HomeserverKeyRepublisher;
|
||||
use super::periodic_backup::PeriodicBackup;
|
||||
use crate::app_context::AppContextConversionError;
|
||||
use crate::core::user_keys_republisher::UserKeysRepublisher;
|
||||
use crate::SignupMode;
|
||||
use crate::persistence::lmdb::LmDB;
|
||||
use crate::{app_context::AppContext, PersistentDataDir};
|
||||
use crate::{DataDir, MockDataDir, SignupMode};
|
||||
use anyhow::Result;
|
||||
use axum::Router;
|
||||
use axum_server::{
|
||||
tls_rustls::{RustlsAcceptor, RustlsConfig},
|
||||
Handle,
|
||||
};
|
||||
use futures_util::TryFutureExt;
|
||||
use pubky_common::auth::AuthVerifier;
|
||||
use tokio::time::sleep;
|
||||
|
||||
pub const DEFAULT_REPUBLISHER_INTERVAL: u64 = 4 * 60 * 60; // 4 hours in seconds
|
||||
|
||||
pub const DEFAULT_STORAGE_DIR: &str = "pubky";
|
||||
pub const DEFAULT_MAP_SIZE: usize = 10995116277760; // 10TB (not = disk-space used)
|
||||
|
||||
pub const DEFAULT_LIST_LIMIT: u16 = 100;
|
||||
pub const DEFAULT_MAX_LIST_LIMIT: u16 = 1000;
|
||||
use std::{
|
||||
net::{SocketAddr, TcpListener},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AppState {
|
||||
pub(crate) verifier: AuthVerifier,
|
||||
pub(crate) db: DB,
|
||||
pub(crate) admin: AdminConfig,
|
||||
pub(crate) db: LmDB,
|
||||
pub(crate) signup_mode: SignupMode,
|
||||
}
|
||||
|
||||
const INITIAL_DELAY_BEFORE_REPUBLISH: Duration = Duration::from_secs(60);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Errors that can occur when building a `HomeserverCore`.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum HomeserverBuildError {
|
||||
/// Failed to run the key republisher.
|
||||
#[error("Key republisher error: {0}")]
|
||||
KeyRepublisher(anyhow::Error),
|
||||
/// Failed to run the ICANN web server.
|
||||
#[error("ICANN web server error: {0}")]
|
||||
IcannWebServer(anyhow::Error),
|
||||
/// Failed to run the Pubky TLS web server.
|
||||
#[error("Pubky TLS web server error: {0}")]
|
||||
PubkyTlsServer(anyhow::Error),
|
||||
/// Failed to convert the data directory to an AppContext.
|
||||
#[error("AppContext conversion error: {0}")]
|
||||
AppContext(AppContextConversionError),
|
||||
}
|
||||
|
||||
/// A side-effect-free Core of the [crate::Homeserver].
|
||||
pub struct HomeserverCore {
|
||||
pub(crate) router: Router,
|
||||
#[allow(dead_code)]
|
||||
// Keep this alive. Republishing is stopped when the UserKeysRepublisher is dropped.
|
||||
pub(crate) user_keys_republisher: UserKeysRepublisher,
|
||||
#[allow(dead_code)]
|
||||
// Keep this alive. Republishing is stopped when the HomeserverKeyRepublisher is dropped.
|
||||
pub(crate) key_republisher: HomeserverKeyRepublisher,
|
||||
#[allow(dead_code)] // Keep this alive. Backup is stopped when the PeriodicBackup is dropped.
|
||||
pub(crate) periodic_backup: PeriodicBackup,
|
||||
/// Keep context alive.
|
||||
context: AppContext,
|
||||
pub(crate) icann_http_handle: Handle,
|
||||
pub(crate) pubky_tls_handle: Handle,
|
||||
pub(crate) icann_http_socket: SocketAddr,
|
||||
pub(crate) pubky_tls_socket: SocketAddr,
|
||||
}
|
||||
|
||||
impl HomeserverCore {
|
||||
/// Create a side-effect-free Homeserver core.
|
||||
///
|
||||
/// # Safety
|
||||
/// HomeserverCore uses LMDB, [opening][heed::EnvOpenOptions::open] which is marked unsafe,
|
||||
/// because the possible Undefined Behavior (UB) if the lock file is broken.
|
||||
pub unsafe fn new(config: CoreConfig, admin: AdminConfig) -> Result<Self> {
|
||||
let db = unsafe { DB::open(config.clone())? };
|
||||
/// Create a Homeserver from a data directory path like `~/.pubky`.
|
||||
pub async fn from_persistent_data_dir_path(
|
||||
dir_path: PathBuf,
|
||||
) -> std::result::Result<Self, HomeserverBuildError> {
|
||||
let data_dir = PersistentDataDir::new(dir_path);
|
||||
Self::from_persistent_data_dir(data_dir).await
|
||||
}
|
||||
|
||||
let state = AppState {
|
||||
verifier: AuthVerifier::default(),
|
||||
db: db.clone(),
|
||||
admin,
|
||||
};
|
||||
/// Create a Homeserver from a data directory.
|
||||
pub async fn from_persistent_data_dir(
|
||||
data_dir: PersistentDataDir,
|
||||
) -> std::result::Result<Self, HomeserverBuildError> {
|
||||
Self::from_data_dir(Arc::new(data_dir)).await
|
||||
}
|
||||
|
||||
// Spawn the backup process. This task will run forever.
|
||||
if let Some(backup_interval) = config.lmdb_backup_interval {
|
||||
let backup_path = config.storage.join("backup");
|
||||
tokio::spawn(backup_lmdb_periodically(
|
||||
db.clone(),
|
||||
backup_path,
|
||||
backup_interval,
|
||||
));
|
||||
}
|
||||
/// Create a Homeserver from a mock data directory.
|
||||
pub async fn from_mock_data_dir(
|
||||
mock_dir: MockDataDir,
|
||||
) -> std::result::Result<Self, HomeserverBuildError> {
|
||||
Self::from_data_dir(Arc::new(mock_dir)).await
|
||||
}
|
||||
|
||||
let router = super::routes::create_app(state.clone());
|
||||
/// Run the homeserver with configurations from a data directory.
|
||||
pub(crate) async fn from_data_dir(
|
||||
dir: Arc<dyn DataDir>,
|
||||
) -> std::result::Result<Self, HomeserverBuildError> {
|
||||
let context = AppContext::try_from(dir).map_err(HomeserverBuildError::AppContext)?;
|
||||
Self::new(context).await
|
||||
}
|
||||
|
||||
let user_keys_republisher = UserKeysRepublisher::new(
|
||||
db.clone(),
|
||||
config
|
||||
.user_keys_republisher_interval
|
||||
.unwrap_or(Duration::from_secs(DEFAULT_REPUBLISHER_INTERVAL)),
|
||||
);
|
||||
/// Create a Homeserver from an AppContext.
|
||||
/// - Publishes the homeserver's pkarr packet to the DHT.
|
||||
/// - (Optional) Publishes the user's keys to the DHT.
|
||||
/// - (Optional) Runs a periodic backup of the database.
|
||||
/// - Creates the web server (router) for testing. Use `listen` to start the server.
|
||||
pub async fn new(context: AppContext) -> std::result::Result<Self, HomeserverBuildError> {
|
||||
let router = Self::create_router(&context);
|
||||
let (icann_http_handle, icann_http_socket) =
|
||||
Self::start_icann_http_server(&context, router.clone())
|
||||
.await
|
||||
.map_err(HomeserverBuildError::IcannWebServer)?;
|
||||
let (pubky_tls_handle, pubky_tls_socket) = Self::start_pubky_tls_server(&context, router)
|
||||
.await
|
||||
.map_err(HomeserverBuildError::PubkyTlsServer)?;
|
||||
|
||||
let key_republisher = HomeserverKeyRepublisher::start(
|
||||
&context,
|
||||
icann_http_socket.port(),
|
||||
pubky_tls_socket.port(),
|
||||
)
|
||||
.await
|
||||
.map_err(HomeserverBuildError::KeyRepublisher)?;
|
||||
let user_keys_republisher =
|
||||
UserKeysRepublisher::start_delayed(&context, INITIAL_DELAY_BEFORE_REPUBLISH);
|
||||
let periodic_backup = PeriodicBackup::start(&context);
|
||||
|
||||
let user_keys_republisher_clone = user_keys_republisher.clone();
|
||||
if config.is_user_keys_republisher_enabled() {
|
||||
// Delayed start of the republisher to give time for the homeserver to start.
|
||||
tokio::spawn(async move {
|
||||
sleep(INITIAL_DELAY_BEFORE_REPUBLISH).await;
|
||||
user_keys_republisher_clone.run().await;
|
||||
});
|
||||
}
|
||||
Ok(Self {
|
||||
router,
|
||||
user_keys_republisher,
|
||||
key_republisher,
|
||||
periodic_backup,
|
||||
context,
|
||||
icann_http_handle,
|
||||
pubky_tls_handle,
|
||||
icann_http_socket,
|
||||
pubky_tls_socket,
|
||||
})
|
||||
}
|
||||
|
||||
/// Stop the home server background tasks.
|
||||
#[allow(dead_code)]
|
||||
pub async fn stop(&mut self) {
|
||||
self.user_keys_republisher.stop().await;
|
||||
pub(crate) fn create_router(context: &AppContext) -> Router {
|
||||
let state = AppState {
|
||||
verifier: AuthVerifier::default(),
|
||||
db: context.db.clone(),
|
||||
signup_mode: context.config_toml.general.signup_mode.clone(),
|
||||
};
|
||||
super::routes::create_app(state.clone())
|
||||
}
|
||||
|
||||
/// Start the ICANN HTTP server
|
||||
async fn start_icann_http_server(
|
||||
context: &AppContext,
|
||||
router: Router,
|
||||
) -> Result<(Handle, SocketAddr)> {
|
||||
// Icann http server
|
||||
let http_listener = TcpListener::bind(context.config_toml.drive.icann_listen_socket)?;
|
||||
let http_socket = http_listener.local_addr()?;
|
||||
let http_handle = Handle::new();
|
||||
tokio::spawn(
|
||||
axum_server::from_tcp(http_listener)
|
||||
.handle(http_handle.clone())
|
||||
.serve(router.into_make_service_with_connect_info::<SocketAddr>())
|
||||
.map_err(|error| {
|
||||
tracing::error!(?error, "Homeserver icann http server error");
|
||||
println!("Homeserver icann http server error: {:?}", error);
|
||||
}),
|
||||
);
|
||||
|
||||
Ok((http_handle, http_socket))
|
||||
}
|
||||
|
||||
/// Start the Pubky TLS server
|
||||
async fn start_pubky_tls_server(
|
||||
context: &AppContext,
|
||||
router: Router,
|
||||
) -> Result<(Handle, SocketAddr)> {
|
||||
// Pubky tls server
|
||||
let https_listener = TcpListener::bind(context.config_toml.drive.pubky_listen_socket)?;
|
||||
let https_socket = https_listener.local_addr()?;
|
||||
let https_handle = Handle::new();
|
||||
tokio::spawn(
|
||||
axum_server::from_tcp(https_listener)
|
||||
.acceptor(RustlsAcceptor::new(RustlsConfig::from_config(Arc::new(
|
||||
context.keypair.to_rpk_rustls_server_config(),
|
||||
))))
|
||||
.handle(https_handle.clone())
|
||||
.serve(router.into_make_service_with_connect_info::<SocketAddr>())
|
||||
.map_err(|error| {
|
||||
tracing::error!(?error, "Homeserver pubky tls server error");
|
||||
println!("Homeserver pubky tls server error: {:?}", error);
|
||||
}),
|
||||
);
|
||||
|
||||
Ok((https_handle, https_socket))
|
||||
}
|
||||
|
||||
/// Get the URL of the icann http server.
|
||||
pub fn icann_http_url(&self) -> String {
|
||||
format!("http://{}", self.icann_http_socket)
|
||||
}
|
||||
|
||||
/// Get the URL of the pubky tls server with the Pubky DNS name.
|
||||
pub fn pubky_tls_dns_url(&self) -> String {
|
||||
format!("https://{}", self.context.keypair.public_key())
|
||||
}
|
||||
|
||||
/// Get the URL of the pubky tls server with the Pubky IP address.
|
||||
pub fn pubky_tls_ip_url(&self) -> String {
|
||||
format!("https://{}", self.pubky_tls_socket)
|
||||
}
|
||||
|
||||
/// Shutdown the http and tls servers.
|
||||
pub fn shutdown(&self) {
|
||||
self.icann_http_handle
|
||||
.graceful_shutdown(Some(Duration::from_secs(5)));
|
||||
self.pubky_tls_handle
|
||||
.graceful_shutdown(Some(Duration::from_secs(5)));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub struct AdminConfig {
|
||||
/// The password used to authorize admin endpoints.
|
||||
pub password: Option<String>,
|
||||
/// Determines whether new signups require a valid token.
|
||||
pub signup_mode: SignupMode,
|
||||
}
|
||||
|
||||
impl AdminConfig {
|
||||
pub fn test() -> Self {
|
||||
AdminConfig {
|
||||
password: Some("admin".to_string()),
|
||||
signup_mode: SignupMode::Open,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
/// Database configurations
|
||||
pub struct CoreConfig {
|
||||
/// Path to the storage directory.
|
||||
///
|
||||
/// Defaults to a directory in the OS data directory
|
||||
pub storage: PathBuf,
|
||||
pub db_map_size: usize,
|
||||
|
||||
/// The default limit of a list api if no `limit` query parameter is provided.
|
||||
///
|
||||
/// Defaults to `100`
|
||||
pub default_list_limit: u16,
|
||||
/// The maximum limit of a list api, even if a `limit` query parameter is provided.
|
||||
///
|
||||
/// Defaults to `1000`
|
||||
pub max_list_limit: u16,
|
||||
|
||||
/// The interval at which the user keys republisher runs. None is disabled.
|
||||
///
|
||||
/// Defaults to `60*60*4` (4 hours)
|
||||
pub user_keys_republisher_interval: Option<Duration>,
|
||||
|
||||
/// The interval at which the LMDB backup is performed. None means disabled.
|
||||
pub lmdb_backup_interval: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for CoreConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
storage: storage(None)
|
||||
.expect("operating environment provides no directory for application data"),
|
||||
db_map_size: DEFAULT_MAP_SIZE,
|
||||
|
||||
default_list_limit: DEFAULT_LIST_LIMIT,
|
||||
max_list_limit: DEFAULT_MAX_LIST_LIMIT,
|
||||
|
||||
user_keys_republisher_interval: Some(Duration::from_secs(60 * 60 * 4)),
|
||||
|
||||
lmdb_backup_interval: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CoreConfig {
|
||||
pub fn test() -> Self {
|
||||
let storage = std::env::temp_dir()
|
||||
.join(pubky_common::timestamp::Timestamp::now().to_string())
|
||||
.join(DEFAULT_STORAGE_DIR);
|
||||
|
||||
Self {
|
||||
storage,
|
||||
db_map_size: 10485760,
|
||||
lmdb_backup_interval: None,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_user_keys_republisher_enabled(&self) -> bool {
|
||||
self.user_keys_republisher_interval.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn storage(storage: Option<String>) -> anyhow::Result<PathBuf> {
|
||||
if let Some(storage) = storage {
|
||||
Ok(PathBuf::from(storage))
|
||||
} else {
|
||||
dirs::home_dir()
|
||||
.map(|dir| dir.join(".pubky/data/lmdb"))
|
||||
.ok_or_else(|| {
|
||||
anyhow::anyhow!("operating environment provides no directory for application data")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::Request,
|
||||
http::{header, Method},
|
||||
response::Response,
|
||||
};
|
||||
use pkarr::Keypair;
|
||||
use pubky_common::{auth::AuthToken, capabilities::Capability};
|
||||
use tower::ServiceExt;
|
||||
|
||||
use super::*;
|
||||
|
||||
impl HomeserverCore {
|
||||
/// Test version of [HomeserverCore::new], using an ephemeral small storage.
|
||||
pub fn test() -> Result<Self> {
|
||||
unsafe { HomeserverCore::new(CoreConfig::test(), AdminConfig::test()) }
|
||||
}
|
||||
|
||||
// === Public Methods ===
|
||||
|
||||
pub async fn create_root_user(&mut self, keypair: &Keypair) -> Result<String> {
|
||||
let auth_token = AuthToken::sign(keypair, vec![Capability::root()]);
|
||||
|
||||
let response = self
|
||||
.call(
|
||||
Request::builder()
|
||||
.uri("/signup")
|
||||
.header("host", keypair.public_key().to_string())
|
||||
.method(Method::POST)
|
||||
.body(Body::from(auth_token.serialize()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let header_value = response
|
||||
.headers()
|
||||
.get(header::SET_COOKIE)
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.expect("should return a set-cookie header")
|
||||
.to_string();
|
||||
|
||||
Ok(header_value)
|
||||
}
|
||||
|
||||
pub async fn call(&self, request: Request) -> Result<Response> {
|
||||
Ok(self.router.clone().oneshot(request).await?)
|
||||
}
|
||||
impl Drop for HomeserverCore {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
251
pubky-homeserver/src/core/key_republisher.rs
Normal file
251
pubky-homeserver/src/core/key_republisher.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
//! Background task to republish the homeserver's pkarr packet to the DHT.
|
||||
//!
|
||||
//! This task is started by the [crate::HomeserverCore] and runs until the homeserver is stopped.
|
||||
//!
|
||||
//! The task is responsible for:
|
||||
//! - Republishing the homeserver's pkarr packet to the DHT every hour.
|
||||
//! - Stopping the task when the homeserver is stopped.
|
||||
|
||||
use std::net::IpAddr;
|
||||
|
||||
use anyhow::Result;
|
||||
use pkarr::dns::Name;
|
||||
use pkarr::errors::PublishError;
|
||||
use pkarr::{dns::rdata::SVCB, SignedPacket};
|
||||
|
||||
use crate::app_context::AppContext;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{interval, Duration};
|
||||
|
||||
/// Republishes the homeserver's pkarr packet to the DHT every hour.
|
||||
pub struct HomeserverKeyRepublisher {
|
||||
join_handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl HomeserverKeyRepublisher {
|
||||
pub async fn start(
|
||||
context: &AppContext,
|
||||
icann_http_port: u16,
|
||||
pubky_tls_port: u16,
|
||||
) -> Result<Self> {
|
||||
let signed_packet = create_signed_packet(context, icann_http_port, pubky_tls_port)?;
|
||||
let join_handle =
|
||||
Self::start_periodic_republish(context.pkarr_client.clone(), &signed_packet).await?;
|
||||
Ok(Self { join_handle })
|
||||
}
|
||||
|
||||
async fn publish_once(
|
||||
client: &pkarr::Client,
|
||||
signed_packet: &SignedPacket,
|
||||
) -> Result<(), PublishError> {
|
||||
let res = client.publish(signed_packet, None).await;
|
||||
if let Err(e) = &res {
|
||||
tracing::warn!(
|
||||
"Failed to publish the homeserver's pkarr packet to the DHT: {}",
|
||||
e
|
||||
);
|
||||
} else {
|
||||
tracing::info!("Published the homeserver's pkarr packet to the DHT.");
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Start the periodic republish task which will republish the server packet to the DHT every hour.
|
||||
///
|
||||
/// # Errors
|
||||
/// - Throws an error if the initial publish fails.
|
||||
/// - Throws an error if the periodic republish task is already running.
|
||||
async fn start_periodic_republish(
|
||||
client: pkarr::Client,
|
||||
signed_packet: &SignedPacket,
|
||||
) -> anyhow::Result<JoinHandle<()>> {
|
||||
// Publish once to make sure the packet is published to the DHT before this
|
||||
// function returns.
|
||||
// Throws an error if the packet is not published to the DHT.
|
||||
Self::publish_once(&client, signed_packet).await?;
|
||||
|
||||
// Start the periodic republish task.
|
||||
let signed_packet = signed_packet.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut interval = interval(Duration::from_secs(60 * 60)); // 1 hour in seconds
|
||||
interval.tick().await; // This ticks immediatly. Wait for first interval before starting the loop.
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let _ = Self::publish_once(&client, &signed_packet).await;
|
||||
}
|
||||
});
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Stop the periodic republish task.
|
||||
pub fn stop(&self) {
|
||||
self.join_handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for HomeserverKeyRepublisher {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_signed_packet(
|
||||
context: &AppContext,
|
||||
local_icann_http_port: u16,
|
||||
local_pubky_tls_port: u16,
|
||||
) -> Result<SignedPacket> {
|
||||
let root_name: Name = "."
|
||||
.try_into()
|
||||
.expect(". is the root domain and always valid");
|
||||
|
||||
let mut signed_packet_builder = SignedPacket::builder();
|
||||
|
||||
let public_ip = context.config_toml.pkdns.public_ip;
|
||||
let public_pubky_tls_port = context
|
||||
.config_toml
|
||||
.pkdns
|
||||
.public_pubky_tls_port
|
||||
.unwrap_or(local_pubky_tls_port);
|
||||
let public_icann_http_port = context
|
||||
.config_toml
|
||||
.pkdns
|
||||
.public_icann_http_port
|
||||
.unwrap_or(local_icann_http_port);
|
||||
|
||||
// `SVCB(HTTPS)` record pointing to the pubky tls port and the public ip address
|
||||
// This is what is used in all applications expect for browsers.
|
||||
let mut svcb = SVCB::new(0, root_name.clone());
|
||||
svcb.set_port(public_pubky_tls_port);
|
||||
match &public_ip {
|
||||
IpAddr::V4(ip) => {
|
||||
svcb.set_ipv4hint([ip.to_bits()])?;
|
||||
}
|
||||
IpAddr::V6(ip) => {
|
||||
svcb.set_ipv6hint([ip.to_bits()])?;
|
||||
}
|
||||
};
|
||||
signed_packet_builder = signed_packet_builder.https(root_name.clone(), svcb, 60 * 60);
|
||||
|
||||
// `SVCB` record pointing to the icann http port and the ICANN domain for browsers support.
|
||||
// Low priority to not override the `SVCB(HTTPS)` record.
|
||||
// Why are we doing this?
|
||||
// The pubky-client in the browser can only do regular HTTP(s) requests.
|
||||
// Pubky TLS requests are therefore not possible. Therefore, we need to fallback to the ICANN domain./
|
||||
//
|
||||
// TODO: Is it possible to point the SVCB record to the IP address via a `A` record?
|
||||
// This would remove the ICANN domain dependency.
|
||||
if let Some(domain) = &context.config_toml.pkdns.icann_domain {
|
||||
let mut svcb = SVCB::new(10, root_name.clone());
|
||||
|
||||
let http_port_be_bytes = public_icann_http_port.to_be_bytes();
|
||||
if domain.0 == "localhost" {
|
||||
svcb.set_param(
|
||||
pubky_common::constants::reserved_param_keys::HTTP_PORT,
|
||||
&http_port_be_bytes,
|
||||
)?;
|
||||
}
|
||||
svcb.target = domain.0.as_str().try_into()?;
|
||||
signed_packet_builder = signed_packet_builder.https(root_name.clone(), svcb, 60 * 60);
|
||||
}
|
||||
|
||||
// `A` record to the public IP. This is used for regular browser connections.
|
||||
signed_packet_builder = signed_packet_builder.address(root_name.clone(), public_ip, 60 * 60);
|
||||
|
||||
Ok(signed_packet_builder.build(&context.keypair)?)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use futures_lite::StreamExt;
|
||||
use pkarr::extra::endpoints::Endpoint;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resolve_https_endpoint_with_pkarr_client() {
|
||||
let context = AppContext::test();
|
||||
let _republisher = HomeserverKeyRepublisher::start(&context, 8080, 8080)
|
||||
.await
|
||||
.unwrap();
|
||||
let pkarr_client = context.pkarr_client.clone();
|
||||
let hs_pubky = context.keypair.public_key();
|
||||
// Make sure the pkarr packet of the hs is resolvable.
|
||||
let _packet = pkarr_client.resolve(&hs_pubky).await.unwrap();
|
||||
// Make sure the pkarr client can resolve the endpoint of the hs.
|
||||
let qname = format!("{}", hs_pubky);
|
||||
let endpoint = pkarr_client
|
||||
.resolve_https_endpoint(qname.as_str())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
endpoint.to_socket_addrs().first().unwrap().clone(),
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_endpoints() {
|
||||
let mut context = AppContext::test();
|
||||
context.keypair = pkarr::Keypair::random();
|
||||
let _republisher = HomeserverKeyRepublisher::start(&context, 8080, 8080)
|
||||
.await
|
||||
.unwrap();
|
||||
let pubkey = context.keypair.public_key();
|
||||
|
||||
let client = pkarr::Client::builder().build().unwrap();
|
||||
let packet = client.resolve(&pubkey).await.unwrap();
|
||||
let rr: Vec<&pkarr::dns::ResourceRecord> = packet.all_resource_records().collect();
|
||||
assert_eq!(rr.len(), 3);
|
||||
|
||||
let endpoints: Vec<Endpoint> = client
|
||||
.resolve_https_endpoints(&pubkey.to_z32())
|
||||
.collect()
|
||||
.await;
|
||||
assert_eq!(endpoints.len(), 2);
|
||||
|
||||
//SignedPacket
|
||||
//{
|
||||
// ResourceRecord {
|
||||
// name: Name("8um71us3fyw6h8wbcxb5ar3rwusy1a6u49956ikzojg3gcwd1dty", "54"),
|
||||
// class: IN,
|
||||
// ttl: 3600,
|
||||
// rdata: A(A { address: 574725291 }),
|
||||
// cache_flush: false },
|
||||
//
|
||||
// ResourceRecord {
|
||||
// name: Name("8um71us3fyw6h8wbcxb5ar3rwusy1a6u49956ikzojg3gcwd1dty", "54"),
|
||||
// class: IN,
|
||||
// ttl: 3600,
|
||||
// rdata: HTTPS(HTTPS(SVCB {
|
||||
// priority: 0,
|
||||
// target: Name("", "1"),
|
||||
// params: {3: [24, 143]} })),
|
||||
// cache_flush: false },
|
||||
//
|
||||
// ResourceRecord {
|
||||
// name: Name("8um71us3fyw6h8wbcxb5ar3rwusy1a6u49956ikzojg3gcwd1dty", "54"),
|
||||
// class: IN,
|
||||
// ttl: 3600,
|
||||
// rdata: HTTPS(HTTPS(SVCB {
|
||||
// priority: 10,
|
||||
// target: Name("homeserver.pubky.app", "22"), params: {} })),
|
||||
// cache_flush: false }],
|
||||
//
|
||||
//[
|
||||
// Endpoint {
|
||||
// target: ".",
|
||||
// public_key: PublicKey(8um71us3fyw6h8wbcxb5ar3rwusy1a6u49956ikzojg3gcwd1dty),
|
||||
// port: 6287,
|
||||
// addrs: [34.65.156.171],
|
||||
// params: {3: [24, 143]} },
|
||||
//
|
||||
// Endpoint {
|
||||
// target: "homeserver.pubky.app",
|
||||
// public_key: PublicKey(8um71us3fyw6h8wbcxb5ar3rwusy1a6u49956ikzojg3gcwd1dty),
|
||||
// port: 0,
|
||||
// addrs: [],
|
||||
// params: {} }]
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
pub mod admin;
|
||||
pub mod authz;
|
||||
pub mod pubky_host;
|
||||
pub mod trace;
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
mod backup;
|
||||
pub mod database;
|
||||
mod error;
|
||||
mod extractors;
|
||||
mod homeserver_core;
|
||||
mod key_republisher;
|
||||
mod layers;
|
||||
mod periodic_backup;
|
||||
mod routes;
|
||||
mod user_keys_republisher;
|
||||
|
||||
pub use error::*;
|
||||
pub use homeserver_core::*;
|
||||
|
||||
@@ -1,10 +1,54 @@
|
||||
use crate::core::database::DB;
|
||||
use crate::{app_context::AppContext, persistence::lmdb::LmDB};
|
||||
use heed::CompactionOption;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
use tokio::time::interval;
|
||||
use tokio::{task::JoinHandle, time::interval};
|
||||
use tracing::{error, info};
|
||||
|
||||
pub(crate) struct PeriodicBackup {
|
||||
handle: Option<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
const BACKUP_INTERVAL_DANGERZONE: Duration = Duration::from_secs(30);
|
||||
|
||||
impl PeriodicBackup {
|
||||
pub fn start(context: &AppContext) -> Self {
|
||||
let backup_interval =
|
||||
Duration::from_secs(context.config_toml.general.lmdb_backup_interval_s);
|
||||
let is_disabled = backup_interval.as_secs() == 0;
|
||||
if is_disabled {
|
||||
tracing::info!("LMDB backup is disabled.");
|
||||
return Self { handle: None };
|
||||
}
|
||||
if backup_interval < BACKUP_INTERVAL_DANGERZONE {
|
||||
tracing::warn!(
|
||||
"The configured LMDB backup interval is less than {}s!.",
|
||||
BACKUP_INTERVAL_DANGERZONE.as_secs(),
|
||||
);
|
||||
}
|
||||
let db = context.db.clone();
|
||||
let backup_path = context.data_dir.path().join("backup");
|
||||
tracing::info!(
|
||||
"Starting LMDB backup with interval {}s",
|
||||
backup_interval.as_secs()
|
||||
);
|
||||
let handle = tokio::spawn(async move {
|
||||
backup_lmdb_periodically(db, backup_path, backup_interval).await;
|
||||
});
|
||||
Self {
|
||||
handle: Some(handle),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PeriodicBackup {
|
||||
fn drop(&mut self) {
|
||||
if let Some(handle) = self.handle.take() {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Periodically creates a backup of the LMDB environment every 4 hours.
|
||||
///
|
||||
/// The backup process performs the following steps:
|
||||
@@ -16,7 +60,7 @@ use tracing::{error, info};
|
||||
///
|
||||
/// * `db` - The LMDB database handle.
|
||||
/// * `backup_path` - The base path for the backup file (extensions will be appended).
|
||||
pub async fn backup_lmdb_periodically(db: DB, backup_path: PathBuf, period: Duration) {
|
||||
pub async fn backup_lmdb_periodically(db: LmDB, backup_path: PathBuf, period: Duration) {
|
||||
let mut interval_timer = interval(period);
|
||||
|
||||
interval_timer.tick().await; // Ignore the first tick as it is instant.
|
||||
@@ -48,7 +92,7 @@ pub async fn backup_lmdb_periodically(db: DB, backup_path: PathBuf, period: Dura
|
||||
///
|
||||
/// * `db` - The LMDB database handle.
|
||||
/// * `backup_path` - The base path for the backup file (extensions will be appended).
|
||||
fn do_backup(db: DB, backup_path: PathBuf) {
|
||||
fn do_backup(db: LmDB, backup_path: PathBuf) {
|
||||
// Define file paths for the temporary and final backup files.
|
||||
let final_backup_path = backup_path.with_extension("mdb");
|
||||
let temp_backup_path = backup_path.with_extension("tmp");
|
||||
@@ -90,7 +134,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_do_backup_creates_backup_file() {
|
||||
// Create a test DB instance.
|
||||
let db = DB::test();
|
||||
let db = LmDB::test();
|
||||
|
||||
// Create a temporary directory to store the backup.
|
||||
let temp_dir = tempdir().expect("Failed to create temporary directory");
|
||||
@@ -1,14 +0,0 @@
|
||||
use crate::core::{error::Result, layers::admin::AdminAuthLayer, AppState};
|
||||
use axum::{extract::State, http::StatusCode, response::IntoResponse, routing::get, Router};
|
||||
|
||||
pub async fn generate_signup_token(State(mut state): State<AppState>) -> Result<impl IntoResponse> {
|
||||
let token = state.db.generate_signup_token()?;
|
||||
Ok((StatusCode::OK, token))
|
||||
}
|
||||
|
||||
pub fn router(state: AppState) -> Router<AppState> {
|
||||
let admin_password = state.admin.password.unwrap_or_default();
|
||||
Router::new()
|
||||
.route("/generate_signup_token", get(generate_signup_token))
|
||||
.layer(AdminAuthLayer::new(admin_password))
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::persistence::lmdb::tables::users::User;
|
||||
use crate::{
|
||||
core::{
|
||||
database::tables::users::User,
|
||||
error::{Error, Result},
|
||||
AppState,
|
||||
},
|
||||
@@ -50,7 +50,7 @@ pub async fn signup(
|
||||
txn.commit()?;
|
||||
|
||||
// 3) If signup_mode == token_required, require & validate a `signup_token` param.
|
||||
if state.admin.signup_mode == SignupMode::TokenRequired {
|
||||
if state.signup_mode == SignupMode::TokenRequired {
|
||||
let signup_token_param = params
|
||||
.get("signup_token")
|
||||
.ok_or_else(|| Error::new(StatusCode::BAD_REQUEST, Some("signup_token required")))?;
|
||||
|
||||
@@ -17,7 +17,6 @@ use crate::core::AppState;
|
||||
|
||||
use super::layers::{pubky_host::PubkyHostLayer, trace::with_trace_layer};
|
||||
|
||||
mod admin;
|
||||
mod auth;
|
||||
mod feed;
|
||||
mod root;
|
||||
@@ -41,7 +40,6 @@ fn base() -> Router<AppState> {
|
||||
pub fn create_app(state: AppState) -> Router {
|
||||
let app = base()
|
||||
.merge(tenants::router(state.clone()))
|
||||
.nest("/admin", admin::router(state.clone()))
|
||||
.layer(CookieManagerLayer::new())
|
||||
.layer(CorsLayer::very_permissive())
|
||||
.layer(ServiceBuilder::new().layer(middleware::from_fn(add_server_header)))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use axum::response::IntoResponse;
|
||||
|
||||
pub async fn handler() -> Result<impl IntoResponse, String> {
|
||||
Ok("This a Pubky homeserver.".to_string())
|
||||
Ok("Pubky Homeserver".to_string())
|
||||
}
|
||||
|
||||
@@ -9,11 +9,11 @@ use pkarr::PublicKey;
|
||||
use std::str::FromStr;
|
||||
|
||||
use crate::core::{
|
||||
database::tables::entries::Entry,
|
||||
error::{Error, Result},
|
||||
extractors::{ListQueryParams, PubkyHost},
|
||||
AppState,
|
||||
};
|
||||
use crate::persistence::lmdb::tables::entries::Entry;
|
||||
|
||||
pub async fn head(
|
||||
State(state): State<AppState>,
|
||||
@@ -190,124 +190,115 @@ impl From<&Entry> for HeaderMap {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use axum::{
|
||||
body::Body,
|
||||
http::{header, Method, Request, StatusCode},
|
||||
};
|
||||
use axum::http::{header, StatusCode};
|
||||
use pkarr::Keypair;
|
||||
use pubky_common::{auth::AuthToken, capabilities::Capability};
|
||||
|
||||
use crate::core::HomeserverCore;
|
||||
use crate::{app_context::AppContext, core::HomeserverCore};
|
||||
|
||||
pub async fn create_root_user(
|
||||
server: &axum_test::TestServer,
|
||||
keypair: &Keypair,
|
||||
) -> anyhow::Result<String> {
|
||||
let auth_token = AuthToken::sign(keypair, vec![Capability::root()]);
|
||||
let body_bytes: axum::body::Bytes = auth_token.serialize().into();
|
||||
let response = server
|
||||
.post("/signup")
|
||||
.add_header("host", keypair.public_key().to_string())
|
||||
.bytes(body_bytes)
|
||||
.expect_success()
|
||||
.await;
|
||||
|
||||
let header_value = response
|
||||
.headers()
|
||||
.get(header::SET_COOKIE)
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.expect("should return a set-cookie header")
|
||||
.to_string();
|
||||
|
||||
Ok(header_value)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn if_last_modified() {
|
||||
let mut server = HomeserverCore::test().unwrap();
|
||||
let context = AppContext::test();
|
||||
let router = HomeserverCore::create_router(&context);
|
||||
let server = axum_test::TestServer::new(router).unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
let public_key = keypair.public_key();
|
||||
let cookie = server.create_root_user(&keypair).await.unwrap().to_string();
|
||||
let cookie = create_root_user(&server, &keypair)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_string();
|
||||
|
||||
let data = vec![1_u8, 2, 3, 4, 5];
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.header("host", public_key.to_string())
|
||||
.uri("/pub/foo")
|
||||
.method(Method::PUT)
|
||||
.header(header::COOKIE, cookie)
|
||||
.body(Body::from(data))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
server
|
||||
.put("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.add_header(header::COOKIE, cookie)
|
||||
.bytes(data.into())
|
||||
.expect_success()
|
||||
.await;
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.header("host", public_key.to_string())
|
||||
.uri("/pub/foo")
|
||||
.method(Method::GET)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.get("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.expect_success()
|
||||
.await;
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.header("host", public_key.to_string())
|
||||
.uri("/pub/foo")
|
||||
.method(Method::GET)
|
||||
.header(
|
||||
header::IF_MODIFIED_SINCE,
|
||||
response.headers().get(header::LAST_MODIFIED).unwrap(),
|
||||
)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
.get("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.add_header(
|
||||
header::IF_MODIFIED_SINCE,
|
||||
response.headers().get(header::LAST_MODIFIED).unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::NOT_MODIFIED);
|
||||
response.assert_status(StatusCode::NOT_MODIFIED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn if_none_match() {
|
||||
let mut server = HomeserverCore::test().unwrap();
|
||||
let context = AppContext::test();
|
||||
let router = HomeserverCore::create_router(&context);
|
||||
let server = axum_test::TestServer::new(router).unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
let public_key = keypair.public_key();
|
||||
|
||||
let cookie = server.create_root_user(&keypair).await.unwrap().to_string();
|
||||
let cookie = create_root_user(&server, &keypair)
|
||||
.await
|
||||
.unwrap()
|
||||
.to_string();
|
||||
|
||||
let data = vec![1_u8, 2, 3, 4, 5];
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.uri("/pub/foo")
|
||||
.header("host", public_key.to_string())
|
||||
.method(Method::PUT)
|
||||
.header(header::COOKIE, cookie)
|
||||
.body(Body::from(data))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
server
|
||||
.put("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.add_header(header::COOKIE, cookie)
|
||||
.bytes(data.into())
|
||||
.expect_success()
|
||||
.await;
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.uri("/pub/foo")
|
||||
.header("host", public_key.to_string())
|
||||
.method(Method::GET)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.get("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.expect_success()
|
||||
.await;
|
||||
|
||||
let response = server
|
||||
.call(
|
||||
Request::builder()
|
||||
.uri("/pub/foo")
|
||||
.header("host", public_key.to_string())
|
||||
.method(Method::GET)
|
||||
.header(
|
||||
header::IF_NONE_MATCH,
|
||||
response.headers().get(header::ETAG).unwrap(),
|
||||
)
|
||||
.body(Body::empty())
|
||||
.unwrap(),
|
||||
.get("/pub/foo")
|
||||
.add_header("host", public_key.to_string())
|
||||
.add_header(
|
||||
header::IF_NONE_MATCH,
|
||||
response.headers().get(header::ETAG).unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::NOT_MODIFIED);
|
||||
response.assert_status(StatusCode::NOT_MODIFIED);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,72 +1,74 @@
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
Arc,
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
|
||||
use pkarr::PublicKey;
|
||||
use pkarr_republisher::{
|
||||
MultiRepublishResult, MultiRepublisher, RepublisherSettings, ResilientClientBuilderError,
|
||||
};
|
||||
use tokio::{
|
||||
sync::RwLock,
|
||||
task::JoinHandle,
|
||||
time::{interval, Instant},
|
||||
};
|
||||
|
||||
use crate::core::database::DB;
|
||||
use crate::{app_context::AppContext, persistence::lmdb::LmDB};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum UserKeysRepublisherError {
|
||||
pub(crate) enum UserKeysRepublisherError {
|
||||
#[error(transparent)]
|
||||
DB(heed::Error),
|
||||
#[error(transparent)]
|
||||
Pkarr(ResilientClientBuilderError),
|
||||
}
|
||||
|
||||
const MIN_REPUBLISH_INTERVAL: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
/// Publishes the pkarr keys of all users to the Mainline DHT.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct UserKeysRepublisher {
|
||||
db: DB,
|
||||
handle: Arc<RwLock<Option<JoinHandle<()>>>>,
|
||||
is_running: Arc<AtomicBool>,
|
||||
republish_interval: Duration,
|
||||
pub(crate) struct UserKeysRepublisher {
|
||||
handle: Option<JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl UserKeysRepublisher {
|
||||
pub fn new(db: DB, republish_interval: Duration) -> Self {
|
||||
Self {
|
||||
db,
|
||||
handle: Arc::new(RwLock::new(None)),
|
||||
is_running: Arc::new(AtomicBool::new(false)),
|
||||
republish_interval,
|
||||
/// Run the user keys republisher with an initial delay.
|
||||
pub fn start_delayed(context: &AppContext, initial_delay: Duration) -> Self {
|
||||
let db = context.db.clone();
|
||||
let is_disabled = context.config_toml.pkdns.user_keys_republisher_interval == 0;
|
||||
if is_disabled {
|
||||
tracing::info!("User keys republisher is disabled.");
|
||||
return Self { handle: None };
|
||||
}
|
||||
let mut republish_interval =
|
||||
Duration::from_secs(context.config_toml.pkdns.user_keys_republisher_interval);
|
||||
if republish_interval < MIN_REPUBLISH_INTERVAL {
|
||||
tracing::warn!(
|
||||
"The configured user keys republisher interval is less than {}s. To avoid spamming the Mainline DHT, the value is set to {}s.",
|
||||
MIN_REPUBLISH_INTERVAL.as_secs(),
|
||||
MIN_REPUBLISH_INTERVAL.as_secs()
|
||||
);
|
||||
republish_interval = MIN_REPUBLISH_INTERVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/// Run the user keys republisher.
|
||||
pub async fn run(&self) {
|
||||
tracing::info!(
|
||||
"Initialize user keys republisher with interval {:?}",
|
||||
self.republish_interval
|
||||
"Initialize user keys republisher with an interval of {:?} and an initial delay of {:?}",
|
||||
republish_interval,
|
||||
initial_delay
|
||||
);
|
||||
let mut lock = self.handle.write().await;
|
||||
if lock.is_some() {
|
||||
return;
|
||||
}
|
||||
let db = self.db.clone();
|
||||
let republish_interval = self.republish_interval;
|
||||
let handle: JoinHandle<()> =
|
||||
tokio::spawn(async move { Self::run_loop(db, republish_interval).await });
|
||||
|
||||
*lock = Some(handle);
|
||||
self.is_running.store(true, Ordering::Relaxed);
|
||||
if republish_interval < Duration::from_secs(60 * 60) {
|
||||
tracing::warn!(
|
||||
"User keys republisher interval is less than 60min. This is strongly discouraged "
|
||||
);
|
||||
}
|
||||
|
||||
let pkarr_builder = context.pkarr_builder.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
tokio::time::sleep(initial_delay).await;
|
||||
Self::run_loop(db, republish_interval, pkarr_builder).await
|
||||
});
|
||||
Self {
|
||||
handle: Some(handle),
|
||||
}
|
||||
}
|
||||
|
||||
// Get all user public keys from the database.
|
||||
async fn get_all_user_keys(db: DB) -> Result<Vec<PublicKey>, heed::Error> {
|
||||
async fn get_all_user_keys(db: LmDB) -> Result<Vec<PublicKey>, heed::Error> {
|
||||
let rtxn = db.env.read_txn()?;
|
||||
let users = db.tables.users.iter(&rtxn)?;
|
||||
|
||||
@@ -83,17 +85,20 @@ impl UserKeysRepublisher {
|
||||
///
|
||||
/// - If the database cannot be read, an error is returned.
|
||||
/// - If the pkarr keys cannot be republished, an error is returned.
|
||||
async fn republish_keys_once(db: DB) -> Result<MultiRepublishResult, UserKeysRepublisherError> {
|
||||
async fn republish_keys_once(
|
||||
db: LmDB,
|
||||
pkarr_builder: pkarr::ClientBuilder,
|
||||
) -> Result<MultiRepublishResult, UserKeysRepublisherError> {
|
||||
let keys = Self::get_all_user_keys(db)
|
||||
.await
|
||||
.map_err(UserKeysRepublisherError::DB)?;
|
||||
if keys.is_empty() {
|
||||
tracing::info!("No user keys to republish.");
|
||||
tracing::debug!("No user keys to republish.");
|
||||
return Ok(MultiRepublishResult::new(HashMap::new()));
|
||||
}
|
||||
let mut settings = RepublisherSettings::default();
|
||||
settings.republish_condition(|_| true);
|
||||
let republisher = MultiRepublisher::new_with_settings(settings, None);
|
||||
let republisher = MultiRepublisher::new_with_settings(settings, Some(pkarr_builder));
|
||||
// TODO: Only publish if user points to this home server.
|
||||
let results = republisher
|
||||
.run(keys, 12)
|
||||
@@ -103,13 +108,13 @@ impl UserKeysRepublisher {
|
||||
}
|
||||
|
||||
/// Internal run loop that publishes all user pkarr keys to the Mainline DHT continuously.
|
||||
async fn run_loop(db: DB, republish_interval: Duration) {
|
||||
async fn run_loop(db: LmDB, republish_interval: Duration, pkarr_builder: pkarr::ClientBuilder) {
|
||||
let mut interval = interval(republish_interval);
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let start = Instant::now();
|
||||
tracing::info!("Republishing user keys...");
|
||||
let result = match Self::republish_keys_once(db.clone()).await {
|
||||
tracing::debug!("Republishing user keys...");
|
||||
let result = match Self::republish_keys_once(db.clone(), pkarr_builder.clone()).await {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Error republishing user keys: {:?}", e);
|
||||
@@ -121,7 +126,7 @@ impl UserKeysRepublisher {
|
||||
continue;
|
||||
}
|
||||
if result.missing().is_empty() {
|
||||
tracing::info!(
|
||||
tracing::debug!(
|
||||
"Republished {} user keys within {:.1}s. {} success, {} missing, {} failed.",
|
||||
result.len(),
|
||||
elapsed.as_secs_f32(),
|
||||
@@ -141,46 +146,25 @@ impl UserKeysRepublisher {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the user keys republisher.
|
||||
#[allow(dead_code)]
|
||||
pub async fn stop(&mut self) {
|
||||
let mut lock = self.handle.write().await;
|
||||
|
||||
if let Some(handle) = lock.take() {
|
||||
impl Drop for UserKeysRepublisher {
|
||||
fn drop(&mut self) {
|
||||
if let Some(handle) = self.handle.take() {
|
||||
handle.abort();
|
||||
*lock = None;
|
||||
self.is_running.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Stops the republisher synchronously.
|
||||
#[allow(dead_code)]
|
||||
pub fn stop_sync(&mut self) {
|
||||
let mut lock = self.handle.blocking_write();
|
||||
|
||||
if let Some(handle) = lock.take() {
|
||||
handle.abort();
|
||||
*lock = None;
|
||||
self.is_running.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::core::user_keys_republisher::UserKeysRepublisher;
|
||||
use crate::persistence::lmdb::tables::users::User;
|
||||
use crate::persistence::lmdb::LmDB;
|
||||
use pkarr::Keypair;
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::core::{
|
||||
database::{tables::users::User, DB},
|
||||
user_keys_republisher::UserKeysRepublisher,
|
||||
};
|
||||
|
||||
async fn init_db_with_users(count: usize) -> DB {
|
||||
let db = DB::test();
|
||||
async fn init_db_with_users(count: usize) -> LmDB {
|
||||
let db = LmDB::test();
|
||||
let mut wtxn = db.env.write_txn().unwrap();
|
||||
for _ in 0..count {
|
||||
let user = User::new();
|
||||
@@ -195,24 +179,13 @@ mod tests {
|
||||
#[tokio::test]
|
||||
async fn test_republish_keys_once() {
|
||||
let db = init_db_with_users(10).await;
|
||||
let result = UserKeysRepublisher::republish_keys_once(db).await.unwrap();
|
||||
let pkarr_builder = pkarr::ClientBuilder::default();
|
||||
let result = UserKeysRepublisher::republish_keys_once(db, pkarr_builder)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(result.len(), 10);
|
||||
assert_eq!(result.success().len(), 0);
|
||||
assert_eq!(result.missing().len(), 10);
|
||||
assert_eq!(result.publishing_failed().len(), 0);
|
||||
}
|
||||
|
||||
/// Test that the republisher stops instantly.
|
||||
#[tokio::test]
|
||||
async fn start_and_stop() {
|
||||
let mut republisher =
|
||||
UserKeysRepublisher::new(init_db_with_users(1000).await, Duration::from_secs(1));
|
||||
let start = Instant::now();
|
||||
republisher.run().await;
|
||||
assert!(republisher.handle.read().await.is_some());
|
||||
republisher.stop().await;
|
||||
let elapsed = start.elapsed();
|
||||
assert!(elapsed < Duration::from_secs(1));
|
||||
assert!(republisher.handle.read().await.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
//!
|
||||
//! Configuration file for the homeserver.
|
||||
//!
|
||||
use super::{domain_port::DomainPort, SignupMode};
|
||||
use super::{domain_port::DomainPort, Domain, SignupMode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fmt::Debug,
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||
num::NonZeroU64,
|
||||
str::FromStr,
|
||||
};
|
||||
@@ -22,37 +22,59 @@ pub const DEFAULT_CONFIG: &str = include_str!("../../config.default.toml");
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub struct PkdnsToml {
|
||||
/// The public IP address and port of the server to be advertised in the DHT.
|
||||
#[serde(default = "default_public_socket")]
|
||||
pub public_socket: SocketAddr,
|
||||
#[serde(default = "default_public_ip")]
|
||||
pub public_ip: IpAddr,
|
||||
|
||||
/// The public port of the Pubky TLS Drive API in case it's different from the listening port.
|
||||
#[serde(default)]
|
||||
pub public_pubky_tls_port: Option<u16>,
|
||||
|
||||
/// The public port of the regular http API in case it's different from the listening port.
|
||||
#[serde(default)]
|
||||
pub public_icann_http_port: Option<u16>,
|
||||
|
||||
/// Optional domain name of the regular http API.
|
||||
#[serde(default)]
|
||||
pub icann_domain: Option<Domain>,
|
||||
|
||||
/// The interval at which the user keys are republished in the DHT.
|
||||
/// 0 means disabled.
|
||||
#[serde(default = "default_user_keys_republisher_interval")]
|
||||
pub user_keys_republisher_interval: NonZeroU64,
|
||||
pub user_keys_republisher_interval: u64,
|
||||
|
||||
/// The list of bootstrap nodes for the DHT. If None, the default pkarr bootstrap nodes will be used.
|
||||
#[serde(default)]
|
||||
#[serde(default = "default_dht_bootstrap_nodes")]
|
||||
pub dht_bootstrap_nodes: Option<Vec<DomainPort>>,
|
||||
|
||||
/// The list of relay nodes for the DHT. If None, the default pkarr relay nodes will be used.
|
||||
#[serde(default)]
|
||||
/// The list of relay nodes for the DHT.
|
||||
/// If not set and no bootstrap nodes are set, the default pkarr relay nodes will be used.
|
||||
#[serde(default = "default_dht_relay_nodes")]
|
||||
pub dht_relay_nodes: Option<Vec<Url>>,
|
||||
|
||||
/// The request timeout for the DHT. If None, the default pkarr request timeout will be used.
|
||||
#[serde(default = "default_dht_request_timeout")]
|
||||
pub dht_request_timeout_ms: Option<NonZeroU64>,
|
||||
}
|
||||
|
||||
fn default_public_socket() -> SocketAddr {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
||||
let port = 6286;
|
||||
SocketAddr::from((ip, port))
|
||||
fn default_public_ip() -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))
|
||||
}
|
||||
|
||||
fn default_user_keys_republisher_interval() -> NonZeroU64 {
|
||||
fn default_dht_bootstrap_nodes() -> Option<Vec<DomainPort>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn default_dht_relay_nodes() -> Option<Vec<Url>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn default_dht_request_timeout() -> Option<NonZeroU64> {
|
||||
None
|
||||
}
|
||||
|
||||
fn default_user_keys_republisher_interval() -> u64 {
|
||||
// 4 hours
|
||||
NonZeroU64::new(14400).expect("14400 is a valid non-zero u64")
|
||||
}
|
||||
|
||||
fn default_pubky_drive_listen_socket() -> SocketAddr {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
||||
let port = 6287;
|
||||
SocketAddr::from((ip, port))
|
||||
14400
|
||||
}
|
||||
|
||||
/// All configuration related to file drive
|
||||
@@ -64,15 +86,14 @@ pub struct DriveToml {
|
||||
/// The port on which the regular http API will listen.
|
||||
#[serde(default = "default_icann_drive_listen_socket")]
|
||||
pub icann_listen_socket: SocketAddr,
|
||||
/// Optional domain name of the regular http API.
|
||||
#[serde(default)]
|
||||
pub icann_domain: Option<String>,
|
||||
}
|
||||
|
||||
fn default_pubky_drive_listen_socket() -> SocketAddr {
|
||||
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 6287))
|
||||
}
|
||||
|
||||
fn default_icann_drive_listen_socket() -> SocketAddr {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
||||
let port = 6286;
|
||||
SocketAddr::from((ip, port))
|
||||
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 6286))
|
||||
}
|
||||
|
||||
/// All configuration related to the admin API
|
||||
@@ -91,9 +112,7 @@ fn default_admin_password() -> String {
|
||||
}
|
||||
|
||||
fn default_admin_listen_socket() -> SocketAddr {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1));
|
||||
let port = 6288;
|
||||
SocketAddr::from((ip, port))
|
||||
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 6288))
|
||||
}
|
||||
|
||||
/// All configuration related to the admin API
|
||||
@@ -151,9 +170,12 @@ impl ConfigToml {
|
||||
DEFAULT_CONFIG
|
||||
.split("\n")
|
||||
.map(|line| {
|
||||
let is_not_commented_variable =
|
||||
!line.starts_with("#") && !line.starts_with("[") && line.is_empty();
|
||||
if is_not_commented_variable {
|
||||
let is_title = line.starts_with("[");
|
||||
let is_comment = line.starts_with("#");
|
||||
let is_empty = line.is_empty();
|
||||
|
||||
let is_other = !is_title && !is_comment && !is_empty;
|
||||
if is_other {
|
||||
format!("# {}", line)
|
||||
} else {
|
||||
line.to_string()
|
||||
@@ -162,11 +184,25 @@ impl ConfigToml {
|
||||
.collect::<Vec<String>>()
|
||||
.join("\n")
|
||||
}
|
||||
|
||||
/// Returns a default config appropriate for testing.
|
||||
pub fn test() -> Self {
|
||||
let mut config = Self::default();
|
||||
// For easy testing, we set the signup mode to open.
|
||||
config.general.signup_mode = SignupMode::Open;
|
||||
// Set the listen ports to randomly available ports so they don't conflict.
|
||||
config.drive.icann_listen_socket = SocketAddr::from(([127, 0, 0, 1], 0));
|
||||
config.drive.pubky_listen_socket = SocketAddr::from(([127, 0, 0, 1], 0));
|
||||
config.admin.listen_socket = SocketAddr::from(([127, 0, 0, 1], 0));
|
||||
config.pkdns.icann_domain =
|
||||
Some(Domain::from_str("localhost").expect("localhost is a valid domain"));
|
||||
config
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConfigToml {
|
||||
fn default() -> Self {
|
||||
DEFAULT_CONFIG
|
||||
ConfigToml::default_string()
|
||||
.parse()
|
||||
.expect("Default config is always valid")
|
||||
}
|
||||
@@ -190,11 +226,12 @@ mod tests {
|
||||
let c: ConfigToml = ConfigToml::default();
|
||||
|
||||
assert_eq!(c.general.signup_mode, SignupMode::TokenRequired);
|
||||
assert_eq!(c.general.lmdb_backup_interval_s, 0);
|
||||
assert_eq!(
|
||||
c.drive.icann_listen_socket,
|
||||
default_icann_drive_listen_socket()
|
||||
);
|
||||
assert_eq!(c.drive.icann_domain, Some("example.com".to_string()));
|
||||
assert_eq!(c.pkdns.icann_domain, None);
|
||||
|
||||
assert_eq!(
|
||||
c.drive.pubky_listen_socket,
|
||||
@@ -205,27 +242,17 @@ mod tests {
|
||||
assert_eq!(c.admin.admin_password, default_admin_password());
|
||||
|
||||
// Verify pkdns config
|
||||
assert_eq!(c.pkdns.public_socket, default_public_socket());
|
||||
assert_eq!(c.pkdns.public_ip, default_public_ip());
|
||||
assert_eq!(c.pkdns.public_pubky_tls_port, None);
|
||||
assert_eq!(c.pkdns.public_icann_http_port, None);
|
||||
assert_eq!(
|
||||
c.pkdns.user_keys_republisher_interval,
|
||||
default_user_keys_republisher_interval()
|
||||
);
|
||||
assert_eq!(
|
||||
c.pkdns.dht_bootstrap_nodes,
|
||||
Some(vec![
|
||||
DomainPort::from_str("router.bittorrent.com:6881").unwrap(),
|
||||
DomainPort::from_str("dht.transmissionbt.com:6881").unwrap(),
|
||||
DomainPort::from_str("dht.libtorrent.org:25401").unwrap(),
|
||||
DomainPort::from_str("relay.pkarr.org:6881").unwrap(),
|
||||
])
|
||||
);
|
||||
assert_eq!(
|
||||
c.pkdns.dht_relay_nodes,
|
||||
Some(vec![
|
||||
Url::parse("https://relay.pkarr.org").unwrap(),
|
||||
Url::parse("https://pkarr.pubky.org").unwrap(),
|
||||
])
|
||||
);
|
||||
assert_eq!(c.pkdns.dht_bootstrap_nodes, None);
|
||||
assert_eq!(c.pkdns.dht_relay_nodes, None);
|
||||
|
||||
assert_eq!(c.pkdns.dht_request_timeout_ms, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -233,6 +260,10 @@ mod tests {
|
||||
// Sanity check that the default config is valid
|
||||
// even when the variables are commented out.
|
||||
let s = ConfigToml::default_string();
|
||||
let _: ConfigToml = s.parse().expect("Failed to parse config");
|
||||
let parsed: ConfigToml = s.parse().expect("Failed to parse config");
|
||||
assert_eq!(
|
||||
parsed.pkdns.dht_bootstrap_nodes, None,
|
||||
"dht_bootstrap_nodes not commented out"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,227 +1,25 @@
|
||||
use super::ConfigToml;
|
||||
use std::{io::Write, os::unix::fs::PermissionsExt, path::PathBuf};
|
||||
use dyn_clone::DynClone;
|
||||
use std::path::Path;
|
||||
|
||||
/// The data directory for the homeserver.
|
||||
/// A trait for the data directory.
|
||||
/// Used to abstract the data directory from the rest of the code.
|
||||
///
|
||||
/// This is the directory that will store the homeservers data.
|
||||
///
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DataDir {
|
||||
expanded_path: PathBuf,
|
||||
}
|
||||
|
||||
impl DataDir {
|
||||
/// Creates a new data directory.
|
||||
/// `path` will be expanded to the home directory if it starts with "~".
|
||||
pub fn new(path: PathBuf) -> Self {
|
||||
Self {
|
||||
expanded_path: Self::expand_home_dir(path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the full path to the data directory.
|
||||
pub fn path(&self) -> &PathBuf {
|
||||
&self.expanded_path
|
||||
}
|
||||
|
||||
/// Expands the data directory to the home directory if it starts with "~".
|
||||
/// Return the full path to the data directory.
|
||||
fn expand_home_dir(path: PathBuf) -> PathBuf {
|
||||
let path = match path.to_str() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
// Path not valid utf-8 so we can't expand it.
|
||||
return path;
|
||||
}
|
||||
};
|
||||
|
||||
if path.starts_with("~/") {
|
||||
if let Some(home) = dirs::home_dir() {
|
||||
let without_home = path.strip_prefix("~/").expect("Invalid ~ prefix");
|
||||
let joined = home.join(without_home);
|
||||
return joined;
|
||||
}
|
||||
}
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
/// To create a real dir and a test dir.
|
||||
pub trait DataDir: std::fmt::Debug + DynClone + Send + Sync {
|
||||
/// Returns the path to the data directory.
|
||||
fn path(&self) -> &Path;
|
||||
/// Makes sure the data directory exists.
|
||||
/// Create the directory if it doesn't exist.
|
||||
pub fn ensure_data_dir_exists_and_is_writable(&self) -> anyhow::Result<()> {
|
||||
std::fs::create_dir_all(&self.expanded_path)?;
|
||||
|
||||
// Check if we can write to the data directory
|
||||
let test_file_path = self
|
||||
.expanded_path
|
||||
.join("test_write_f2d560932f9b437fa9ef430ba436d611"); // random file name to not conflict with anything
|
||||
std::fs::write(test_file_path.clone(), b"test")
|
||||
.map_err(|err| anyhow::anyhow!("Failed to write to data directory: {}", err))?;
|
||||
std::fs::remove_file(test_file_path)
|
||||
.map_err(|err| anyhow::anyhow!("Failed to write to data directory: {}", err))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the config file path in this directory.
|
||||
pub fn get_config_file_path(&self) -> PathBuf {
|
||||
self.expanded_path.join("config.toml")
|
||||
}
|
||||
fn ensure_data_dir_exists_and_is_writable(&self) -> anyhow::Result<()>;
|
||||
|
||||
/// Reads the config file from the data directory.
|
||||
/// Creates a default config file if it doesn't exist.
|
||||
pub fn read_or_create_config_file(&self) -> anyhow::Result<ConfigToml> {
|
||||
let config_file_path = self.get_config_file_path();
|
||||
if !config_file_path.exists() {
|
||||
self.write_default_config_file()?;
|
||||
}
|
||||
let config = ConfigToml::from_file(config_file_path)?;
|
||||
Ok(config)
|
||||
}
|
||||
fn read_or_create_config_file(&self) -> anyhow::Result<ConfigToml>;
|
||||
|
||||
fn write_default_config_file(&self) -> anyhow::Result<()> {
|
||||
let config_string = ConfigToml::default_string();
|
||||
let config_file_path = self.get_config_file_path();
|
||||
let mut config_file = std::fs::File::create(config_file_path)?;
|
||||
config_file.write_all(config_string.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path to the secret file.
|
||||
pub fn get_secret_file_path(&self) -> PathBuf {
|
||||
self.expanded_path.join("secret")
|
||||
}
|
||||
|
||||
/// Reads the secret file. Creates a new secret file if it doesn't exist.
|
||||
pub fn read_or_create_keypair(&self) -> anyhow::Result<pkarr::Keypair> {
|
||||
let secret_file_path = self.get_secret_file_path();
|
||||
if !secret_file_path.exists() {
|
||||
// Create a new secret file
|
||||
let keypair = pkarr::Keypair::random();
|
||||
let secret = keypair.secret_key();
|
||||
let hex_string = hex::encode(secret);
|
||||
std::fs::write(secret_file_path.clone(), hex_string)?;
|
||||
std::fs::set_permissions(&secret_file_path, std::fs::Permissions::from_mode(0o600))?;
|
||||
tracing::info!("Secret file created at {}", secret_file_path.display());
|
||||
}
|
||||
// Read the secret file
|
||||
let secret = std::fs::read(secret_file_path)?;
|
||||
let secret_bytes = hex::decode(secret)?;
|
||||
let secret_bytes: [u8; 32] = secret_bytes.try_into().map_err(|_| {
|
||||
anyhow::anyhow!("Failed to convert secret bytes into array of length 32")
|
||||
})?;
|
||||
let keypair = pkarr::Keypair::from_secret_key(&secret_bytes);
|
||||
Ok(keypair)
|
||||
}
|
||||
/// Reads the secret file from the data directory.
|
||||
/// Creates a new secret file if it doesn't exist.
|
||||
fn read_or_create_keypair(&self) -> anyhow::Result<pkarr::Keypair>;
|
||||
}
|
||||
|
||||
impl Default for DataDir {
|
||||
fn default() -> Self {
|
||||
Self::new(PathBuf::from("~/.pubky"))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Test that the home directory is expanded correctly.
|
||||
#[test]
|
||||
pub fn test_expand_home_dir() {
|
||||
let data_dir = DataDir::new(PathBuf::from("~/.pubky"));
|
||||
let homedir = dirs::home_dir().unwrap();
|
||||
let expanded_path = homedir.join(".pubky");
|
||||
assert_eq!(data_dir.expanded_path, expanded_path);
|
||||
}
|
||||
|
||||
/// Test that the data directory is created if it doesn't exist.
|
||||
#[test]
|
||||
pub fn test_ensure_data_dir_exists_and_is_accessible() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
assert!(test_path.exists());
|
||||
// temp_dir will be automatically cleaned up when it goes out of scope
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_get_default_config_file_path_exists() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
let config_file_path = data_dir.get_config_file_path();
|
||||
assert!(!config_file_path.exists()); // Should not exist yet
|
||||
|
||||
let mut config_file = std::fs::File::create(config_file_path.clone()).unwrap();
|
||||
config_file.write_all(b"test").unwrap();
|
||||
assert!(config_file_path.exists()); // Should exist now
|
||||
// temp_dir will be automatically cleaned up when it goes out of scope
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_or_create_config_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
let _ = data_dir.read_or_create_config_file().unwrap(); // Should create a default config file
|
||||
assert!(data_dir.get_config_file_path().exists());
|
||||
|
||||
let _ = data_dir.read_or_create_config_file().unwrap(); // Should read the existing file
|
||||
assert!(data_dir.get_config_file_path().exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_or_create_config_file_dont_override_existing_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
// Write a broken config file
|
||||
let config_file_path = data_dir.get_config_file_path();
|
||||
std::fs::write(config_file_path.clone(), b"test").unwrap();
|
||||
assert!(config_file_path.exists()); // Should exist now
|
||||
|
||||
// Try to read the config file and fail because config is broken
|
||||
let read_result = data_dir.read_or_create_config_file();
|
||||
assert!(read_result.is_err());
|
||||
|
||||
// Make sure the broken config file is still there
|
||||
let content = std::fs::read_to_string(config_file_path).unwrap();
|
||||
assert_eq!(content, "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_create_secret_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
let _ = data_dir.read_or_create_keypair().unwrap();
|
||||
assert!(data_dir.get_secret_file_path().exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_dont_override_existing_secret_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = DataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
// Create a secret file
|
||||
let secret_file_path = data_dir.get_secret_file_path();
|
||||
std::fs::write(secret_file_path.clone(), b"test").unwrap();
|
||||
|
||||
let result = data_dir.read_or_create_keypair();
|
||||
assert!(result.is_err());
|
||||
assert!(data_dir.get_secret_file_path().exists());
|
||||
let content = std::fs::read_to_string(secret_file_path).unwrap();
|
||||
assert_eq!(content, "test");
|
||||
}
|
||||
}
|
||||
dyn_clone::clone_trait_object!(DataDir);
|
||||
|
||||
@@ -27,6 +27,12 @@ impl Domain {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Domain {
|
||||
fn default() -> Self {
|
||||
Self("localhost".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Domain {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::result::Result;
|
||||
use std::str::FromStr;
|
||||
|
||||
use super::domain::Domain;
|
||||
|
||||
/// A domain and port pair.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct DomainPort {
|
||||
/// The domain name.
|
||||
pub domain: Domain,
|
||||
@@ -14,6 +14,12 @@ pub struct DomainPort {
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
impl Debug for DomainPort {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}:{}", self.domain, self.port)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for DomainPort {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
|
||||
65
pubky-homeserver/src/data_directory/mock_data_dir.rs
Normal file
65
pubky-homeserver/src/data_directory/mock_data_dir.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use std::path::Path;
|
||||
|
||||
use super::DataDir;
|
||||
|
||||
/// Mock data directory for testing.
|
||||
///
|
||||
/// It uses a temporary directory to store all data in. The data is removed as soon as the object is dropped.
|
||||
///
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MockDataDir {
|
||||
pub(crate) temp_dir: std::sync::Arc<tempfile::TempDir>,
|
||||
/// The configuration for the homeserver.
|
||||
pub config_toml: super::ConfigToml,
|
||||
/// The keypair for the homeserver.
|
||||
pub keypair: pkarr::Keypair,
|
||||
}
|
||||
|
||||
impl MockDataDir {
|
||||
/// Create a new DataDirMock with a temporary directory.
|
||||
///
|
||||
/// If keypair is not provided, a new one will be generated.
|
||||
pub fn new(
|
||||
config_toml: super::ConfigToml,
|
||||
keypair: Option<pkarr::Keypair>,
|
||||
) -> anyhow::Result<Self> {
|
||||
let keypair = keypair.unwrap_or_else(pkarr::Keypair::random);
|
||||
Ok(Self {
|
||||
temp_dir: std::sync::Arc::new(tempfile::TempDir::new()?),
|
||||
config_toml,
|
||||
keypair,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a mock data directory with a config and keypair appropriate for testing.
|
||||
pub fn test() -> Self {
|
||||
let config = super::ConfigToml::test();
|
||||
let keypair = pkarr::Keypair::from_secret_key(&[0; 32]);
|
||||
Self::new(config, Some(keypair)).expect("failed to create MockDataDir")
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MockDataDir {
|
||||
fn default() -> Self {
|
||||
Self::test()
|
||||
}
|
||||
}
|
||||
|
||||
impl DataDir for MockDataDir {
|
||||
fn path(&self) -> &Path {
|
||||
self.temp_dir.path()
|
||||
}
|
||||
|
||||
fn ensure_data_dir_exists_and_is_writable(&self) -> anyhow::Result<()> {
|
||||
Ok(()) // Always ok because this is validated by the tempfile crate.
|
||||
}
|
||||
|
||||
fn read_or_create_config_file(&self) -> anyhow::Result<super::ConfigToml> {
|
||||
Ok(self.config_toml.clone())
|
||||
}
|
||||
|
||||
fn read_or_create_keypair(&self) -> anyhow::Result<pkarr::Keypair> {
|
||||
Ok(self.keypair.clone())
|
||||
}
|
||||
}
|
||||
@@ -2,10 +2,14 @@ mod config_toml;
|
||||
mod data_dir;
|
||||
mod domain;
|
||||
mod domain_port;
|
||||
mod mock_data_dir;
|
||||
mod persistent_data_dir;
|
||||
mod signup_mode;
|
||||
|
||||
pub use config_toml::{ConfigReadError, ConfigToml};
|
||||
pub use data_dir::DataDir;
|
||||
pub use domain::Domain;
|
||||
pub use domain_port::DomainPort;
|
||||
pub use mock_data_dir::MockDataDir;
|
||||
pub use persistent_data_dir::PersistentDataDir;
|
||||
pub use signup_mode::SignupMode;
|
||||
|
||||
233
pubky-homeserver/src/data_directory/persistent_data_dir.rs
Normal file
233
pubky-homeserver/src/data_directory/persistent_data_dir.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
use super::{data_dir::DataDir, ConfigToml};
|
||||
use std::{
|
||||
io::Write,
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
/// The data directory for the homeserver.
|
||||
///
|
||||
/// This is the directory that will store the homeservers data.
|
||||
///
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PersistentDataDir {
|
||||
expanded_path: PathBuf,
|
||||
}
|
||||
|
||||
impl PersistentDataDir {
|
||||
/// Creates a new data directory.
|
||||
/// `path` will be expanded to the home directory if it starts with "~".
|
||||
pub fn new(path: PathBuf) -> Self {
|
||||
Self {
|
||||
expanded_path: Self::expand_home_dir(path),
|
||||
}
|
||||
}
|
||||
|
||||
/// Expands the data directory to the home directory if it starts with "~".
|
||||
/// Return the full path to the data directory.
|
||||
fn expand_home_dir(path: PathBuf) -> PathBuf {
|
||||
let path = match path.to_str() {
|
||||
Some(path) => path,
|
||||
None => {
|
||||
// Path not valid utf-8 so we can't expand it.
|
||||
return path;
|
||||
}
|
||||
};
|
||||
|
||||
if path.starts_with("~/") {
|
||||
if let Some(home) = dirs::home_dir() {
|
||||
let without_home = path.strip_prefix("~/").expect("Invalid ~ prefix");
|
||||
let joined = home.join(without_home);
|
||||
return joined;
|
||||
}
|
||||
}
|
||||
PathBuf::from(path)
|
||||
}
|
||||
|
||||
/// Returns the config file path in this directory.
|
||||
pub fn get_config_file_path(&self) -> PathBuf {
|
||||
self.expanded_path.join("config.toml")
|
||||
}
|
||||
|
||||
fn write_default_config_file(&self) -> anyhow::Result<()> {
|
||||
let config_string = ConfigToml::default_string();
|
||||
let config_file_path = self.get_config_file_path();
|
||||
let mut config_file = std::fs::File::create(config_file_path)?;
|
||||
config_file.write_all(config_string.as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path to the secret file.
|
||||
pub fn get_secret_file_path(&self) -> PathBuf {
|
||||
self.expanded_path.join("secret")
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PersistentDataDir {
|
||||
fn default() -> Self {
|
||||
Self::new(PathBuf::from("~/.pubky"))
|
||||
}
|
||||
}
|
||||
|
||||
impl DataDir for PersistentDataDir {
|
||||
/// Returns the full path to the data directory.
|
||||
fn path(&self) -> &Path {
|
||||
&self.expanded_path
|
||||
}
|
||||
|
||||
/// Makes sure the data directory exists.
|
||||
/// Create the directory if it doesn't exist.
|
||||
fn ensure_data_dir_exists_and_is_writable(&self) -> anyhow::Result<()> {
|
||||
std::fs::create_dir_all(&self.expanded_path)?;
|
||||
|
||||
// Check if we can write to the data directory
|
||||
let test_file_path = self
|
||||
.expanded_path
|
||||
.join("test_write_f2d560932f9b437fa9ef430ba436d611"); // random file name to not conflict with anything
|
||||
std::fs::write(test_file_path.clone(), b"test")
|
||||
.map_err(|err| anyhow::anyhow!("Failed to write to data directory: {}", err))?;
|
||||
std::fs::remove_file(test_file_path)
|
||||
.map_err(|err| anyhow::anyhow!("Failed to write to data directory: {}", err))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reads the config file from the data directory.
|
||||
/// Creates a default config file if it doesn't exist.
|
||||
fn read_or_create_config_file(&self) -> anyhow::Result<ConfigToml> {
|
||||
let config_file_path = self.get_config_file_path();
|
||||
if !config_file_path.exists() {
|
||||
self.write_default_config_file()?;
|
||||
}
|
||||
let config = ConfigToml::from_file(config_file_path)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Reads the secret file. Creates a new secret file if it doesn't exist.
|
||||
fn read_or_create_keypair(&self) -> anyhow::Result<pkarr::Keypair> {
|
||||
let secret_file_path = self.get_secret_file_path();
|
||||
if !secret_file_path.exists() {
|
||||
// Create a new secret file
|
||||
let keypair = pkarr::Keypair::random();
|
||||
let secret = keypair.secret_key();
|
||||
let hex_string = hex::encode(secret);
|
||||
std::fs::write(secret_file_path.clone(), hex_string)?;
|
||||
std::fs::set_permissions(&secret_file_path, std::fs::Permissions::from_mode(0o600))?;
|
||||
tracing::info!("Secret file created at {}", secret_file_path.display());
|
||||
}
|
||||
// Read the secret file
|
||||
let secret = std::fs::read(secret_file_path)?;
|
||||
let secret_bytes = hex::decode(secret)?;
|
||||
let secret_bytes: [u8; 32] = secret_bytes.try_into().map_err(|_| {
|
||||
anyhow::anyhow!("Failed to convert secret bytes into array of length 32")
|
||||
})?;
|
||||
let keypair = pkarr::Keypair::from_secret_key(&secret_bytes);
|
||||
Ok(keypair)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Write;
|
||||
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
/// Test that the home directory is expanded correctly.
|
||||
#[test]
|
||||
pub fn test_expand_home_dir() {
|
||||
let data_dir = PersistentDataDir::new(PathBuf::from("~/.pubky"));
|
||||
let homedir = dirs::home_dir().unwrap();
|
||||
let expanded_path = homedir.join(".pubky");
|
||||
assert_eq!(data_dir.expanded_path, expanded_path);
|
||||
}
|
||||
|
||||
/// Test that the data directory is created if it doesn't exist.
|
||||
#[test]
|
||||
pub fn test_ensure_data_dir_exists_and_is_accessible() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
assert!(test_path.exists());
|
||||
// temp_dir will be automatically cleaned up when it goes out of scope
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_get_default_config_file_path_exists() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
let config_file_path = data_dir.get_config_file_path();
|
||||
assert!(!config_file_path.exists()); // Should not exist yet
|
||||
|
||||
let mut config_file = std::fs::File::create(config_file_path.clone()).unwrap();
|
||||
config_file.write_all(b"test").unwrap();
|
||||
assert!(config_file_path.exists()); // Should exist now
|
||||
// temp_dir will be automatically cleaned up when it goes out of scope
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_or_create_config_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
let _ = data_dir.read_or_create_config_file().unwrap(); // Should create a default config file
|
||||
assert!(data_dir.get_config_file_path().exists());
|
||||
|
||||
let _ = data_dir.read_or_create_config_file().unwrap(); // Should read the existing file
|
||||
assert!(data_dir.get_config_file_path().exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_read_or_create_config_file_dont_override_existing_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
// Write a broken config file
|
||||
let config_file_path = data_dir.get_config_file_path();
|
||||
std::fs::write(config_file_path.clone(), b"test").unwrap();
|
||||
assert!(config_file_path.exists()); // Should exist now
|
||||
|
||||
// Try to read the config file and fail because config is broken
|
||||
let read_result = data_dir.read_or_create_config_file();
|
||||
assert!(read_result.is_err());
|
||||
|
||||
// Make sure the broken config file is still there
|
||||
let content = std::fs::read_to_string(config_file_path).unwrap();
|
||||
assert_eq!(content, "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_create_secret_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
let _ = data_dir.read_or_create_keypair().unwrap();
|
||||
assert!(data_dir.get_secret_file_path().exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn test_dont_override_existing_secret_file() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let test_path = temp_dir.path().join(".pubky");
|
||||
let data_dir = PersistentDataDir::new(test_path.clone());
|
||||
data_dir.ensure_data_dir_exists_and_is_writable().unwrap();
|
||||
|
||||
// Create a secret file
|
||||
let secret_file_path = data_dir.get_secret_file_path();
|
||||
std::fs::write(secret_file_path.clone(), b"test").unwrap();
|
||||
|
||||
let result = data_dir.read_or_create_keypair();
|
||||
assert!(result.is_err());
|
||||
assert!(data_dir.get_secret_file_path().exists());
|
||||
let content = std::fs::read_to_string(secret_file_path).unwrap();
|
||||
assert_eq!(content, "test");
|
||||
}
|
||||
}
|
||||
@@ -1,90 +0,0 @@
|
||||
//! Http server around the HomeserverCore
|
||||
|
||||
use std::{
|
||||
net::{SocketAddr, TcpListener},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::Router;
|
||||
use axum_server::{
|
||||
tls_rustls::{RustlsAcceptor, RustlsConfig},
|
||||
Handle,
|
||||
};
|
||||
use futures_util::TryFutureExt;
|
||||
use pkarr::Keypair;
|
||||
|
||||
use super::IoConfig;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HttpServers {
|
||||
/// Handle for the HTTP server
|
||||
pub(crate) http_handle: Handle,
|
||||
/// Handle for the HTTPS server using Pkarr TLS
|
||||
pub(crate) https_handle: Handle,
|
||||
|
||||
http_address: SocketAddr,
|
||||
https_address: SocketAddr,
|
||||
}
|
||||
|
||||
impl HttpServers {
|
||||
pub async fn run(keypair: &Keypair, config: &IoConfig, router: &Router) -> Result<Self> {
|
||||
let http_listener = TcpListener::bind(SocketAddr::from(([0, 0, 0, 0], config.http_port)))?;
|
||||
let http_address = http_listener.local_addr()?;
|
||||
|
||||
let http_handle = Handle::new();
|
||||
|
||||
tokio::spawn(
|
||||
axum_server::from_tcp(http_listener)
|
||||
.handle(http_handle.clone())
|
||||
.serve(
|
||||
router
|
||||
.clone()
|
||||
.into_make_service_with_connect_info::<SocketAddr>(),
|
||||
)
|
||||
.map_err(|error| tracing::error!(?error, "Homeserver http server error")),
|
||||
);
|
||||
|
||||
let https_listener =
|
||||
TcpListener::bind(SocketAddr::from(([0, 0, 0, 0], config.https_port)))?;
|
||||
let https_address = https_listener.local_addr()?;
|
||||
|
||||
let https_handle = Handle::new();
|
||||
|
||||
tokio::spawn(
|
||||
axum_server::from_tcp(https_listener)
|
||||
.acceptor(RustlsAcceptor::new(RustlsConfig::from_config(Arc::new(
|
||||
keypair.to_rpk_rustls_server_config(),
|
||||
))))
|
||||
.handle(https_handle.clone())
|
||||
.serve(
|
||||
router
|
||||
.clone()
|
||||
.into_make_service_with_connect_info::<SocketAddr>(),
|
||||
)
|
||||
.map_err(|error| tracing::error!(?error, "Homeserver https server error")),
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
http_handle,
|
||||
https_handle,
|
||||
|
||||
http_address,
|
||||
https_address,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn http_address(&self) -> SocketAddr {
|
||||
self.http_address
|
||||
}
|
||||
|
||||
pub fn https_address(&self) -> SocketAddr {
|
||||
self.https_address
|
||||
}
|
||||
|
||||
/// Shutdown all HTTP servers.
|
||||
pub fn shutdown(&self) {
|
||||
self.http_handle.shutdown();
|
||||
self.https_handle.shutdown();
|
||||
}
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
//! Pkarr related task
|
||||
|
||||
use anyhow::Result;
|
||||
use pkarr::errors::PublishError;
|
||||
use pkarr::{dns::rdata::SVCB, Keypair, SignedPacket};
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::{interval, Duration};
|
||||
|
||||
use super::IoConfig;
|
||||
|
||||
/// Republishes the homeserver's pkarr packet to the DHT every hour.
|
||||
#[derive(Debug)]
|
||||
pub struct HomeserverKeyRepublisher {
|
||||
client: pkarr::Client,
|
||||
signed_packet: SignedPacket,
|
||||
republish_task: Mutex<Option<JoinHandle<()>>>,
|
||||
}
|
||||
|
||||
impl HomeserverKeyRepublisher {
|
||||
pub fn new(
|
||||
keypair: &Keypair,
|
||||
config: &IoConfig,
|
||||
https_port: u16,
|
||||
http_port: u16,
|
||||
) -> Result<Self> {
|
||||
let mut builder = pkarr::Client::builder();
|
||||
|
||||
if let Some(bootstrap) = &config.bootstrap {
|
||||
builder.bootstrap(bootstrap);
|
||||
}
|
||||
|
||||
if let Some(request_timeout) = config.dht_request_timeout {
|
||||
builder.request_timeout(request_timeout);
|
||||
}
|
||||
|
||||
let client = builder.build()?;
|
||||
|
||||
let signed_packet = create_signed_packet(keypair, config, https_port, http_port)?;
|
||||
|
||||
Ok(Self {
|
||||
client,
|
||||
signed_packet,
|
||||
republish_task: Mutex::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
async fn publish_once(
|
||||
client: &pkarr::Client,
|
||||
signed_packet: &SignedPacket,
|
||||
) -> Result<(), PublishError> {
|
||||
let res = client.publish(signed_packet, None).await;
|
||||
if let Err(e) = &res {
|
||||
tracing::warn!(
|
||||
"Failed to publish the homeserver's pkarr packet to the DHT: {}",
|
||||
e
|
||||
);
|
||||
} else {
|
||||
tracing::info!("Published the homeserver's pkarr packet to the DHT.");
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Start the periodic republish task which will republish the server packet to the DHT every hour.
|
||||
///
|
||||
/// # Errors
|
||||
/// - Throws an error if the initial publish fails.
|
||||
/// - Throws an error if the periodic republish task is already running.
|
||||
pub async fn start_periodic_republish(&self) -> anyhow::Result<()> {
|
||||
let mut task_guard = self.republish_task.lock().await;
|
||||
|
||||
if task_guard.is_some() {
|
||||
return Err(anyhow::anyhow!(
|
||||
"Periodic republish task is already running"
|
||||
));
|
||||
}
|
||||
|
||||
// Publish once to make sure the packet is published to the DHT before this
|
||||
// function returns.
|
||||
// Throws an error if the packet is not published to the DHT.
|
||||
Self::publish_once(&self.client, &self.signed_packet).await?;
|
||||
|
||||
// Start the periodic republish task.
|
||||
let client = self.client.clone();
|
||||
let signed_packet = self.signed_packet.clone();
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut interval = interval(Duration::from_secs(60 * 60)); // 1 hour in seconds
|
||||
interval.tick().await; // This ticks immediatly. Wait for first interval before starting the loop.
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let _ = Self::publish_once(&client, &signed_packet).await;
|
||||
}
|
||||
});
|
||||
|
||||
*task_guard = Some(handle);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop the periodic republish task.
|
||||
pub async fn stop_periodic_republish(&self) {
|
||||
let mut task_guard = self.republish_task.lock().await;
|
||||
|
||||
if let Some(handle) = task_guard.take() {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_signed_packet(
|
||||
keypair: &Keypair,
|
||||
config: &IoConfig,
|
||||
https_port: u16,
|
||||
http_port: u16,
|
||||
) -> Result<SignedPacket> {
|
||||
// TODO: Try to resolve first before publishing.
|
||||
|
||||
let mut signed_packet_builder = SignedPacket::builder();
|
||||
|
||||
let mut svcb = SVCB::new(0, ".".try_into()?);
|
||||
|
||||
// Set the public Ip or localhost
|
||||
signed_packet_builder = signed_packet_builder.address(
|
||||
".".try_into()
|
||||
.expect(". is valid domain and therefore always succeeds"),
|
||||
config
|
||||
.public_addr
|
||||
.map(|addr| addr.ip())
|
||||
.unwrap_or("127.0.0.1".parse().expect("localhost is valid ip")),
|
||||
60 * 60,
|
||||
);
|
||||
|
||||
// Set the public port or the local https_port
|
||||
svcb.set_port(
|
||||
config
|
||||
.public_addr
|
||||
.map(|addr| addr.port())
|
||||
.unwrap_or(https_port),
|
||||
);
|
||||
|
||||
signed_packet_builder = signed_packet_builder.https(
|
||||
".".try_into()
|
||||
.expect(". is valid domain and therefore always succeeds"),
|
||||
svcb,
|
||||
60 * 60,
|
||||
);
|
||||
|
||||
// Set low priority https record for legacy browsers support
|
||||
if let Some(ref domain) = config.domain {
|
||||
let mut svcb = SVCB::new(10, ".".try_into()?);
|
||||
|
||||
let http_port_be_bytes = http_port.to_be_bytes();
|
||||
if domain == "localhost" {
|
||||
svcb.set_param(
|
||||
pubky_common::constants::reserved_param_keys::HTTP_PORT,
|
||||
&http_port_be_bytes,
|
||||
)?;
|
||||
}
|
||||
|
||||
svcb.target = domain.as_str().try_into()?;
|
||||
|
||||
signed_packet_builder = signed_packet_builder.https(
|
||||
".".try_into()
|
||||
.expect(". is valid domain and therefore always succeeds"),
|
||||
svcb,
|
||||
60 * 60,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(signed_packet_builder.build(keypair)?)
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
mod http;
|
||||
mod key_republisher;
|
||||
mod server;
|
||||
|
||||
pub use server::*;
|
||||
@@ -1,284 +0,0 @@
|
||||
use std::{net::SocketAddr, path::PathBuf, time::Duration};
|
||||
|
||||
use super::http::HttpServers;
|
||||
use super::key_republisher::HomeserverKeyRepublisher;
|
||||
use crate::{data_directory::DataDir, SignupMode};
|
||||
use anyhow::Result;
|
||||
use pkarr::{Keypair, PublicKey};
|
||||
use tracing::info;
|
||||
|
||||
use crate::core::{AdminConfig, CoreConfig, HomeserverCore};
|
||||
|
||||
pub const DEFAULT_HTTP_PORT: u16 = 6286;
|
||||
pub const DEFAULT_HTTPS_PORT: u16 = 6287;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
/// Builder for [Homeserver].
|
||||
pub struct HomeserverBuilder(Config);
|
||||
|
||||
impl HomeserverBuilder {
|
||||
/// Set the Homeserver's keypair
|
||||
pub fn keypair(&mut self, keypair: Keypair) -> &mut Self {
|
||||
self.0.keypair = keypair;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure the storage path of the Homeserver
|
||||
pub fn storage(&mut self, storage: PathBuf) -> &mut Self {
|
||||
self.0.core.storage = storage;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure the DHT bootstrapping nodes that this Homeserver is connected to.
|
||||
pub fn bootstrap(&mut self, bootstrap: &[String]) -> &mut Self {
|
||||
self.0.io.bootstrap = Some(bootstrap.to_vec());
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Configure Pkarr relays used by this Homeserver
|
||||
pub fn relays(&mut self, _relays: &[url::Url]) -> &mut Self {
|
||||
// TODO: make it not a noop if we are going to support relays in homeservers.
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the public domain of this Homeserver
|
||||
pub fn domain(&mut self, domain: &str) -> &mut Self {
|
||||
self.0.io.domain = Some(domain.to_string());
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the signup mode to "token_required". Only to be used on ::test()
|
||||
/// homeserver for the specific case of testing signup token flow.
|
||||
pub fn close_signups(&mut self) -> &mut Self {
|
||||
self.0.admin.signup_mode = SignupMode::TokenRequired;
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a password to protect admin endpoints
|
||||
pub fn admin_password(&mut self, password: String) -> &mut Self {
|
||||
self.0.admin.password = Some(password);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Run a Homeserver
|
||||
///
|
||||
/// # Safety
|
||||
/// Homeserver uses LMDB, [opening][heed::EnvOpenOptions::open] which is marked unsafe,
|
||||
/// because the possible Undefined Behavior (UB) if the lock file is broken.
|
||||
pub async unsafe fn run(self) -> Result<Homeserver> {
|
||||
Homeserver::run(self.0).await
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Homeserver Core + I/O (http server and pkarr publishing).
|
||||
pub struct Homeserver {
|
||||
http_servers: HttpServers,
|
||||
keypair: Keypair,
|
||||
key_republisher: HomeserverKeyRepublisher,
|
||||
}
|
||||
|
||||
impl Homeserver {
|
||||
/// Returns a Homeserver builder.
|
||||
pub fn builder() -> HomeserverBuilder {
|
||||
HomeserverBuilder::default()
|
||||
}
|
||||
|
||||
/// Run the homeserver with configurations from a data directory.
|
||||
pub async fn run_with_data_dir(dir_path: PathBuf) -> Result<Self> {
|
||||
let data_dir = DataDir::new(dir_path);
|
||||
let config = Config::try_from(data_dir)?;
|
||||
unsafe { Self::run(config) }.await
|
||||
}
|
||||
|
||||
/// Run a Homeserver with configurations suitable for ephemeral tests.
|
||||
pub async fn run_test(bootstrap: &[String]) -> Result<Self> {
|
||||
let config = Config::test(bootstrap);
|
||||
|
||||
unsafe { Self::run(config) }.await
|
||||
}
|
||||
|
||||
/// Run a Homeserver with configurations suitable for ephemeral tests.
|
||||
/// That requires signup tokens.
|
||||
pub async fn run_test_with_signup_tokens(bootstrap: &[String]) -> Result<Self> {
|
||||
let mut config = Config::test(bootstrap);
|
||||
config.admin.signup_mode = SignupMode::TokenRequired;
|
||||
|
||||
unsafe { Self::run(config) }.await
|
||||
}
|
||||
|
||||
/// Run a Homeserver
|
||||
///
|
||||
/// # Safety
|
||||
/// Homeserver uses LMDB, [opening][heed::EnvOpenOptions::open] which is marked unsafe,
|
||||
/// because the possible Undefined Behavior (UB) if the lock file is broken.
|
||||
async unsafe fn run(config: Config) -> Result<Self> {
|
||||
tracing::debug!(?config, "Running homeserver with configurations");
|
||||
|
||||
let keypair = config.keypair;
|
||||
|
||||
let core = unsafe { HomeserverCore::new(config.core, config.admin)? };
|
||||
|
||||
let http_servers = HttpServers::run(&keypair, &config.io, &core.router).await?;
|
||||
|
||||
let dht_republisher = HomeserverKeyRepublisher::new(
|
||||
&keypair,
|
||||
&config.io,
|
||||
http_servers.https_address().port(),
|
||||
http_servers.http_address().port(),
|
||||
)?;
|
||||
dht_republisher.start_periodic_republish().await?;
|
||||
info!(
|
||||
"Homeserver listening on http://localhost:{}",
|
||||
http_servers.http_address().port()
|
||||
);
|
||||
info!("Homeserver listening on https://{}", keypair.public_key());
|
||||
|
||||
Ok(Self {
|
||||
http_servers,
|
||||
keypair,
|
||||
key_republisher: dht_republisher,
|
||||
})
|
||||
}
|
||||
|
||||
// === Getters ===
|
||||
|
||||
/// Returns the public_key of this server.
|
||||
pub fn public_key(&self) -> PublicKey {
|
||||
self.keypair.public_key()
|
||||
}
|
||||
|
||||
/// Returns the `https://<server public key>` url
|
||||
pub fn url(&self) -> url::Url {
|
||||
url::Url::parse(&format!("https://{}", self.public_key())).expect("valid url")
|
||||
}
|
||||
|
||||
// === Public Methods ===
|
||||
|
||||
/// Send a shutdown signal to all open resources
|
||||
pub async fn shutdown(&self) {
|
||||
self.http_servers.shutdown();
|
||||
self.key_republisher.stop_periodic_republish().await;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct IoConfig {
|
||||
pub http_port: u16,
|
||||
pub https_port: u16,
|
||||
pub public_addr: Option<SocketAddr>,
|
||||
pub domain: Option<String>,
|
||||
|
||||
/// Bootstrapping DHT nodes.
|
||||
///
|
||||
/// Helpful to run the server locally or in testnet.
|
||||
pub bootstrap: Option<Vec<String>>,
|
||||
pub dht_request_timeout: Option<Duration>,
|
||||
}
|
||||
|
||||
impl Default for IoConfig {
|
||||
fn default() -> Self {
|
||||
IoConfig {
|
||||
https_port: DEFAULT_HTTPS_PORT,
|
||||
http_port: DEFAULT_HTTP_PORT,
|
||||
public_addr: None,
|
||||
domain: None,
|
||||
bootstrap: None,
|
||||
dht_request_timeout: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Server configuration
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Config {
|
||||
/// Server keypair.
|
||||
///
|
||||
/// Defaults to a random keypair.
|
||||
pub keypair: Keypair,
|
||||
pub io: IoConfig,
|
||||
pub core: CoreConfig,
|
||||
pub admin: AdminConfig,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Create test configurations
|
||||
pub fn test(bootstrap: &[String]) -> Self {
|
||||
let bootstrap = Some(bootstrap.to_vec());
|
||||
|
||||
Self {
|
||||
io: IoConfig {
|
||||
bootstrap,
|
||||
http_port: 0,
|
||||
https_port: 0,
|
||||
..Default::default()
|
||||
},
|
||||
core: CoreConfig::test(),
|
||||
admin: AdminConfig::test(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
keypair: Keypair::random(),
|
||||
io: IoConfig::default(),
|
||||
core: CoreConfig::default(),
|
||||
admin: AdminConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<DataDir> for Config {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(dir: DataDir) -> Result<Self, Self::Error> {
|
||||
dir.ensure_data_dir_exists_and_is_writable()?;
|
||||
let conf = dir.read_or_create_config_file()?;
|
||||
let keypair = dir.read_or_create_keypair()?;
|
||||
|
||||
// TODO: Needs refactoring of the Homeserver Config struct. I am not doing
|
||||
// it yet because I am concentrating on the config currently.
|
||||
let io = IoConfig {
|
||||
http_port: conf.drive.icann_listen_socket.port(),
|
||||
https_port: conf.drive.pubky_listen_socket.port(),
|
||||
domain: conf.drive.icann_domain,
|
||||
public_addr: Some(conf.pkdns.public_socket),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let core = CoreConfig {
|
||||
storage: dir.path().join("data/lmdb"),
|
||||
user_keys_republisher_interval: Some(Duration::from_secs(
|
||||
conf.pkdns.user_keys_republisher_interval.into(),
|
||||
)),
|
||||
lmdb_backup_interval: if conf.general.lmdb_backup_interval_s == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(Duration::from_secs(conf.general.lmdb_backup_interval_s))
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let admin = AdminConfig {
|
||||
signup_mode: conf.general.signup_mode,
|
||||
password: Some(conf.admin.admin_password),
|
||||
};
|
||||
|
||||
Ok(Config {
|
||||
keypair,
|
||||
io,
|
||||
core,
|
||||
admin,
|
||||
})
|
||||
}
|
||||
}
|
||||
3
pubky-homeserver/src/homeserver_suite/mod.rs
Normal file
3
pubky-homeserver/src/homeserver_suite/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod suite;
|
||||
|
||||
pub use suite::*;
|
||||
88
pubky-homeserver/src/homeserver_suite/suite.rs
Normal file
88
pubky-homeserver/src/homeserver_suite/suite.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use crate::admin::{AdminServer, AdminServerBuildError};
|
||||
use crate::core::{HomeserverBuildError, HomeserverCore};
|
||||
use crate::MockDataDir;
|
||||
use crate::{app_context::AppContext, data_directory::PersistentDataDir};
|
||||
use anyhow::Result;
|
||||
use pkarr::PublicKey;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Errors that can occur when building a `HomeserverSuite`.
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum HomeserverSuiteBuildError {
|
||||
/// Failed to build the homeserver.
|
||||
#[error("Failed to build homeserver: {0}")]
|
||||
Homeserver(HomeserverBuildError),
|
||||
/// Failed to build the admin server.
|
||||
#[error("Failed to build admin server: {0}")]
|
||||
Admin(AdminServerBuildError),
|
||||
}
|
||||
|
||||
/// Homeserver with all bells and whistles.
|
||||
/// Core + Admin server.
|
||||
///
|
||||
/// When dropped, the homeserver will stop.
|
||||
pub struct HomeserverSuite {
|
||||
context: AppContext,
|
||||
#[allow(dead_code)] // Keep this alive. When dropped, the homeserver will stop.
|
||||
core: HomeserverCore,
|
||||
#[allow(dead_code)] // Keep this alive. When dropped, the admin server will stop.
|
||||
admin_server: AdminServer,
|
||||
}
|
||||
|
||||
impl HomeserverSuite {
|
||||
/// Run the homeserver with configurations from a data directory.
|
||||
pub async fn start_with_persistent_data_dir_path(dir_path: PathBuf) -> Result<Self> {
|
||||
let data_dir = PersistentDataDir::new(dir_path);
|
||||
let context = AppContext::try_from(data_dir)?;
|
||||
Self::start(context).await
|
||||
}
|
||||
|
||||
/// Run the homeserver with configurations from a data directory.
|
||||
pub async fn start_with_persistent_data_dir(dir: PersistentDataDir) -> Result<Self> {
|
||||
let context = AppContext::try_from(dir)?;
|
||||
Self::start(context).await
|
||||
}
|
||||
|
||||
/// Run the homeserver with configurations from a data directory mock.
|
||||
pub async fn start_with_mock_data_dir(dir: MockDataDir) -> Result<Self> {
|
||||
let context = AppContext::try_from(dir)?;
|
||||
Self::start(context).await
|
||||
}
|
||||
|
||||
/// Run a Homeserver
|
||||
pub async fn start(context: AppContext) -> Result<Self> {
|
||||
let core = HomeserverCore::new(context.clone()).await?;
|
||||
let admin_server = AdminServer::start(&context).await?;
|
||||
|
||||
Ok(Self {
|
||||
context,
|
||||
core,
|
||||
admin_server,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the core of the homeserver suite.
|
||||
pub fn core(&self) -> &HomeserverCore {
|
||||
&self.core
|
||||
}
|
||||
|
||||
/// Get the admin server of the homeserver suite.
|
||||
pub fn admin(&self) -> &AdminServer {
|
||||
&self.admin_server
|
||||
}
|
||||
|
||||
/// Returns the public_key of this server.
|
||||
pub fn public_key(&self) -> PublicKey {
|
||||
self.context.keypair.public_key()
|
||||
}
|
||||
|
||||
/// Returns the `https://<server public key>` url
|
||||
pub fn pubky_url(&self) -> url::Url {
|
||||
url::Url::parse(&format!("pubky://{}", self.public_key())).expect("valid url")
|
||||
}
|
||||
|
||||
/// Returns the `https://<server public key>` url
|
||||
pub fn icann_http_url(&self) -> url::Url {
|
||||
url::Url::parse(&self.core.icann_http_url()).expect("valid url")
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,26 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
//! Homeserver for Pubky
|
||||
//!
|
||||
//! This crate provides a homeserver for Pubky. It is responsible for handling user authentication,
|
||||
//! authorization, and other core functionalities.
|
||||
//!
|
||||
//! This crate is part of the Pubky project.
|
||||
//!
|
||||
//! For more information, see the [Pubky project](https://github.com/pubky/pubky).
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
#![cfg_attr(any(), deny(clippy::unwrap_used))]
|
||||
|
||||
mod admin;
|
||||
mod app_context;
|
||||
mod constants;
|
||||
mod core;
|
||||
mod data_directory;
|
||||
mod homeserver;
|
||||
mod homeserver_suite;
|
||||
mod persistence;
|
||||
|
||||
pub use admin::{AdminServer, AdminServerBuildError};
|
||||
pub use app_context::{AppContext, AppContextConversionError};
|
||||
pub use core::{HomeserverBuildError, HomeserverCore};
|
||||
pub use data_directory::*;
|
||||
pub use homeserver::Homeserver;
|
||||
pub use homeserver::HomeserverBuilder;
|
||||
pub use homeserver_suite::{HomeserverSuite, HomeserverSuiteBuildError};
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::path::PathBuf;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use pubky_homeserver::Homeserver;
|
||||
use pubky_homeserver::HomeserverSuite;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
fn default_config_dir_path() -> PathBuf {
|
||||
@@ -38,14 +38,30 @@ async fn main() -> Result<()> {
|
||||
.init();
|
||||
|
||||
tracing::debug!("Using data dir: {}", args.data_dir.display());
|
||||
let server = HomeserverSuite::start_with_persistent_data_dir_path(args.data_dir).await?;
|
||||
|
||||
let server = Homeserver::run_with_data_dir(args.data_dir).await?;
|
||||
tracing::info!(
|
||||
"Homeserver HTTP listening on {}",
|
||||
server.core().icann_http_url()
|
||||
);
|
||||
|
||||
tracing::info!(
|
||||
"Homeserver Pubky TLS listening on {}",
|
||||
server.core().pubky_tls_dns_url(),
|
||||
);
|
||||
tracing::info!(
|
||||
"Homeserver Pubky TLS listening on {}",
|
||||
server.core().pubky_tls_ip_url()
|
||||
);
|
||||
tracing::info!(
|
||||
"Admin server listening on http://{}",
|
||||
server.admin().listen_socket()
|
||||
);
|
||||
|
||||
tracing::info!("Press Ctrl+C to stop the Homeserver");
|
||||
tokio::signal::ctrl_c().await?;
|
||||
|
||||
tracing::info!("Shutting down Homeserver");
|
||||
|
||||
server.shutdown().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
77
pubky-homeserver/src/persistence/lmdb/db.rs
Normal file
77
pubky-homeserver/src/persistence/lmdb/db.rs
Normal file
@@ -0,0 +1,77 @@
|
||||
use super::tables::{Tables, TABLES_COUNT};
|
||||
use heed::{Env, EnvOpenOptions};
|
||||
use std::sync::Arc;
|
||||
use std::{fs, path::PathBuf};
|
||||
|
||||
use super::migrations;
|
||||
|
||||
pub const DEFAULT_MAP_SIZE: usize = 10995116277760; // 10TB (not = disk-space used)
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LmDB {
|
||||
pub(crate) env: Env,
|
||||
pub(crate) tables: Tables,
|
||||
pub(crate) buffers_dir: PathBuf,
|
||||
pub(crate) max_chunk_size: usize,
|
||||
// Only used for testing purposes to keep the testdir alive.
|
||||
#[allow(dead_code)]
|
||||
test_dir: Option<Arc<tempfile::TempDir>>,
|
||||
}
|
||||
|
||||
impl LmDB {
|
||||
/// # Safety
|
||||
/// DB uses LMDB, [opening][heed::EnvOpenOptions::open] which is marked unsafe,
|
||||
/// because the possible Undefined Behavior (UB) if the lock file is broken.
|
||||
pub unsafe fn open(main_dir: PathBuf) -> anyhow::Result<Self> {
|
||||
let buffers_dir = main_dir.join("buffers");
|
||||
|
||||
// Cleanup buffers.
|
||||
let _ = fs::remove_dir(&buffers_dir);
|
||||
fs::create_dir_all(&buffers_dir)?;
|
||||
|
||||
let env = unsafe {
|
||||
EnvOpenOptions::new()
|
||||
.max_dbs(TABLES_COUNT)
|
||||
.map_size(DEFAULT_MAP_SIZE)
|
||||
.open(&main_dir)
|
||||
}?;
|
||||
|
||||
let tables = migrations::run(&env)?;
|
||||
|
||||
let db = LmDB {
|
||||
env,
|
||||
tables,
|
||||
buffers_dir,
|
||||
max_chunk_size: Self::max_chunk_size(),
|
||||
test_dir: None,
|
||||
};
|
||||
|
||||
Ok(db)
|
||||
}
|
||||
|
||||
// Create an ephemeral database for testing purposes.
|
||||
#[cfg(test)]
|
||||
pub fn test() -> LmDB {
|
||||
// Create a temporary directory for the test.
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let mut lmdb = unsafe { LmDB::open(PathBuf::from(temp_dir.path())).unwrap() };
|
||||
lmdb.test_dir = Some(Arc::new(temp_dir)); // Keep the directory alive for the duration of the test. As soon as all LmDB instances are dropped, the directory will be deleted automatically.
|
||||
|
||||
lmdb
|
||||
}
|
||||
|
||||
/// calculate optimal chunk size:
|
||||
/// - <https://lmdb.readthedocs.io/en/release/#storage-efficiency-limits>
|
||||
/// - <https://github.com/lmdbjava/benchmarks/blob/master/results/20160710/README.md#test-2-determine-24816-kb-byte-values>
|
||||
fn max_chunk_size() -> usize {
|
||||
let page_size = page_size::get();
|
||||
|
||||
// - 16 bytes Header per page (LMDB)
|
||||
// - Each page has to contain 2 records
|
||||
// - 8 bytes per record (LMDB) (empirically, it seems to be 10 not 8)
|
||||
// - 12 bytes key:
|
||||
// - timestamp : 8 bytes
|
||||
// - chunk index: 4 bytes
|
||||
((page_size - 16) / 2) - (8 + 2) - 12
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use heed::{Env, RwTxn};
|
||||
|
||||
use crate::core::database::tables::{blobs, entries, events, sessions, signup_tokens, users};
|
||||
use crate::persistence::lmdb::tables::{blobs, entries, events, sessions, signup_tokens, users};
|
||||
|
||||
pub fn run(env: &Env, wtxn: &mut RwTxn) -> anyhow::Result<()> {
|
||||
let _: users::UsersTable = env.create_database(wtxn, Some(users::USERS_TABLE))?;
|
||||
@@ -1,9 +1,9 @@
|
||||
use super::tables::Tables;
|
||||
use heed::Env;
|
||||
|
||||
mod m0;
|
||||
|
||||
use super::tables::Tables;
|
||||
|
||||
/// Run the migrations.
|
||||
pub fn run(env: &Env) -> anyhow::Result<Tables> {
|
||||
let mut wtxn = env.write_txn()?;
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
mod db;
|
||||
mod migrations;
|
||||
pub mod tables;
|
||||
pub use db::DB;
|
||||
pub use db::LmDB;
|
||||
@@ -1,6 +1,6 @@
|
||||
use heed::{types::Bytes, Database, RoTxn};
|
||||
|
||||
use crate::core::database::DB;
|
||||
use super::super::LmDB;
|
||||
|
||||
use super::entries::Entry;
|
||||
|
||||
@@ -9,7 +9,7 @@ pub type BlobsTable = Database<Bytes, Bytes>;
|
||||
|
||||
pub const BLOBS_TABLE: &str = "blobs";
|
||||
|
||||
impl DB {
|
||||
impl LmDB {
|
||||
pub fn read_entry_content<'txn>(
|
||||
&self,
|
||||
rtxn: &'txn RoTxn,
|
||||
@@ -18,7 +18,9 @@ use pubky_common::{
|
||||
timestamp::Timestamp,
|
||||
};
|
||||
|
||||
use crate::core::database::DB;
|
||||
use crate::constants::{DEFAULT_LIST_LIMIT, DEFAULT_MAX_LIST_LIMIT};
|
||||
|
||||
use super::super::LmDB;
|
||||
|
||||
use super::events::Event;
|
||||
|
||||
@@ -27,7 +29,7 @@ pub type EntriesTable = Database<Str, Bytes>;
|
||||
|
||||
pub const ENTRIES_TABLE: &str = "entries";
|
||||
|
||||
impl DB {
|
||||
impl LmDB {
|
||||
/// Write an entry by an author at a given path.
|
||||
///
|
||||
/// The path has to start with a forward slash `/`
|
||||
@@ -127,8 +129,8 @@ impl DB {
|
||||
let mut results = Vec::new();
|
||||
|
||||
let limit = limit
|
||||
.unwrap_or(self.config().default_list_limit)
|
||||
.min(self.config().max_list_limit);
|
||||
.unwrap_or(DEFAULT_LIST_LIMIT)
|
||||
.min(DEFAULT_MAX_LIST_LIMIT);
|
||||
|
||||
// TODO: make this more performant than split and allocations?
|
||||
|
||||
@@ -303,7 +305,7 @@ impl Entry {
|
||||
|
||||
pub fn read_content<'txn>(
|
||||
&self,
|
||||
db: &'txn DB,
|
||||
db: &'txn LmDB,
|
||||
rtxn: &'txn RoTxn,
|
||||
) -> anyhow::Result<impl Iterator<Item = Result<&'txn [u8], heed::Error>> + 'txn> {
|
||||
db.read_entry_content(rtxn, self)
|
||||
@@ -323,7 +325,7 @@ impl Entry {
|
||||
}
|
||||
|
||||
pub struct EntryWriter<'db> {
|
||||
db: &'db DB,
|
||||
db: &'db LmDB,
|
||||
buffer: File,
|
||||
hasher: Hasher,
|
||||
buffer_path: PathBuf,
|
||||
@@ -333,7 +335,7 @@ pub struct EntryWriter<'db> {
|
||||
}
|
||||
|
||||
impl<'db> EntryWriter<'db> {
|
||||
pub fn new(db: &'db DB, public_key: &PublicKey, path: &str) -> anyhow::Result<Self> {
|
||||
pub fn new(db: &'db LmDB, public_key: &PublicKey, path: &str) -> anyhow::Result<Self> {
|
||||
let hasher = Hasher::new();
|
||||
|
||||
let timestamp = Timestamp::now();
|
||||
@@ -453,11 +455,11 @@ mod tests {
|
||||
use bytes::Bytes;
|
||||
use pkarr::Keypair;
|
||||
|
||||
use super::DB;
|
||||
use super::LmDB;
|
||||
|
||||
#[tokio::test]
|
||||
async fn entries() -> anyhow::Result<()> {
|
||||
let mut db = DB::test();
|
||||
let mut db = LmDB::test();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
let public_key = keypair.public_key();
|
||||
@@ -499,7 +501,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn chunked_entry() -> anyhow::Result<()> {
|
||||
let mut db = DB::test();
|
||||
let mut db = LmDB::test();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
let public_key = keypair.public_key();
|
||||
@@ -10,7 +10,9 @@ use heed::{
|
||||
use postcard::{from_bytes, to_allocvec};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::core::database::DB;
|
||||
use crate::constants::{DEFAULT_LIST_LIMIT, DEFAULT_MAX_LIST_LIMIT};
|
||||
|
||||
use super::super::LmDB;
|
||||
|
||||
/// Event [pkarr::Timestamp] base32 => Encoded event.
|
||||
pub type EventsTable = Database<Str, Bytes>;
|
||||
@@ -59,7 +61,7 @@ impl Event {
|
||||
}
|
||||
}
|
||||
|
||||
impl DB {
|
||||
impl LmDB {
|
||||
/// Returns a list of events formatted as `<OP> <url>`.
|
||||
///
|
||||
/// - limit defaults to [crate::config::DEFAULT_LIST_LIMIT] and capped by [crate::config::DEFAULT_MAX_LIST_LIMIT]
|
||||
@@ -72,8 +74,8 @@ impl DB {
|
||||
let txn = self.env.read_txn()?;
|
||||
|
||||
let limit = limit
|
||||
.unwrap_or(self.config().default_list_limit)
|
||||
.min(self.config().max_list_limit);
|
||||
.unwrap_or(DEFAULT_LIST_LIMIT)
|
||||
.min(DEFAULT_MAX_LIST_LIMIT);
|
||||
|
||||
let cursor = cursor.unwrap_or("0000000000000".to_string());
|
||||
|
||||
@@ -4,14 +4,14 @@ use heed::{
|
||||
};
|
||||
use pubky_common::session::Session;
|
||||
|
||||
use crate::core::database::DB;
|
||||
use super::super::LmDB;
|
||||
|
||||
/// session secret => Session.
|
||||
pub type SessionsTable = Database<Str, Bytes>;
|
||||
|
||||
pub const SESSIONS_TABLE: &str = "sessions";
|
||||
|
||||
impl DB {
|
||||
impl LmDB {
|
||||
pub fn get_session(&self, session_secret: &str) -> anyhow::Result<Option<Session>> {
|
||||
let rtxn = self.env.read_txn()?;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use crate::core::database::DB;
|
||||
use super::super::LmDB;
|
||||
use base32::{encode, Alphabet};
|
||||
use heed::{
|
||||
types::{Bytes, Str},
|
||||
@@ -53,7 +53,7 @@ impl SignupToken {
|
||||
}
|
||||
}
|
||||
|
||||
impl DB {
|
||||
impl LmDB {
|
||||
pub fn generate_signup_token(&mut self) -> anyhow::Result<String> {
|
||||
let signup_token = SignupToken::random();
|
||||
let mut wtxn = self.env.write_txn()?;
|
||||
1
pubky-homeserver/src/persistence/mod.rs
Normal file
1
pubky-homeserver/src/persistence/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
pub mod lmdb;
|
||||
@@ -12,15 +12,18 @@ categories = ["web-programming", "authentication", "cryptography"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.95"
|
||||
http-relay = "0.2.0"
|
||||
mainline = "5.2.0"
|
||||
pkarr-relay = "0.5.7"
|
||||
pkarr-relay = { workspace = true }
|
||||
tokio = { version = "1.43.0", features = ["full"] }
|
||||
tracing-subscriber = "0.3.19"
|
||||
url = "2.5.4"
|
||||
|
||||
pubky = { version = "0.4.2", path = "../pubky-client" }
|
||||
pubky-common = { version = "0.3.1", path = "../pubky-common" }
|
||||
pubky-homeserver = { version = "0.1.2", path = "../pubky-homeserver" }
|
||||
pubky-homeserver = { version = "0.1.2", path = "../pubky-homeserver"}
|
||||
http-relay = { version = "0.2.0", path = "../http-relay"}
|
||||
tempfile = "3.19.1"
|
||||
tracing = "0.1.41"
|
||||
pkarr = {workspace = true}
|
||||
mainline = {workspace = true}
|
||||
|
||||
|
||||
|
||||
@@ -9,21 +9,22 @@ All resources are ephemeral, databases are in the operating system's temporary d
|
||||
### Inline testing
|
||||
|
||||
```rust
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky_testnet::EphemeralTestnet;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main () {
|
||||
// Run a new testnet.
|
||||
let testnet = Testnet::run().await.unwrap();
|
||||
|
||||
// Optionally create and run a Homeserver.
|
||||
let server = testnet.run_homeserver().await.unwrap();
|
||||
|
||||
// Optionally create and run an HTTP Relay.
|
||||
let http_relay = testnet.run_http_relay().await.unwrap();
|
||||
// Run a new testnet. This creates a test dht,
|
||||
// a homeserver, and a http relay.
|
||||
let testnet = EphemeralTestnet::start().await.unwrap();
|
||||
|
||||
// Create a Pubky Client from the testnet.
|
||||
let client = testnet.client_builder().build().unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
|
||||
// Use the homeserver
|
||||
let homeserver = testnet.homeserver_suite();
|
||||
|
||||
// Use the relay
|
||||
let http_relay = testnet.http_relay();
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
73
pubky-testnet/src/ephemeral_testnet.rs
Normal file
73
pubky-testnet/src/ephemeral_testnet.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use http_relay::HttpRelay;
|
||||
|
||||
use crate::Testnet;
|
||||
|
||||
/// A simple testnet with random ports assigned for all components.
|
||||
///
|
||||
/// - A local DHT with bootstrapping nodes.
|
||||
/// - http relay.
|
||||
/// - A homeserver with address is hardcoded to `8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo`.
|
||||
/// - An admin server for the homeserver.
|
||||
pub struct EphemeralTestnet {
|
||||
/// Inner flexible testnet.
|
||||
pub flexible_testnet: Testnet,
|
||||
}
|
||||
|
||||
impl EphemeralTestnet {
|
||||
/// Run a new simple testnet.
|
||||
pub async fn start() -> anyhow::Result<Self> {
|
||||
let mut me = Self {
|
||||
flexible_testnet: Testnet::new().await?,
|
||||
};
|
||||
|
||||
me.flexible_testnet.create_http_relay().await?;
|
||||
me.flexible_testnet.create_homeserver_suite().await?;
|
||||
|
||||
Ok(me)
|
||||
}
|
||||
|
||||
/// Create a new pubky client builder.
|
||||
pub fn pubky_client_builder(&self) -> pubky::ClientBuilder {
|
||||
self.flexible_testnet.pubky_client_builder()
|
||||
}
|
||||
|
||||
/// Create a new pkarr client builder.
|
||||
pub fn pkarr_client_builder(&self) -> pkarr::ClientBuilder {
|
||||
self.flexible_testnet.pkarr_client_builder()
|
||||
}
|
||||
|
||||
/// Get the homeserver in the testnet.
|
||||
pub fn homeserver_suite(&self) -> &pubky_homeserver::HomeserverSuite {
|
||||
self.flexible_testnet
|
||||
.homeservers
|
||||
.first()
|
||||
.expect("homeservers should be non-empty")
|
||||
}
|
||||
|
||||
/// Get the http relay in the testnet.
|
||||
pub fn http_relay(&self) -> &HttpRelay {
|
||||
self.flexible_testnet
|
||||
.http_relays
|
||||
.first()
|
||||
.expect("http relays should be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
/// Test that two testnets can be run in a row.
|
||||
/// This is to prevent the case where the testnet is not cleaned up properly.
|
||||
/// For example, if the port is not released after the testnet is stopped.
|
||||
#[tokio::test]
|
||||
async fn test_two_testnet_in_a_row() {
|
||||
{
|
||||
let _ = EphemeralTestnet::start().await.unwrap();
|
||||
}
|
||||
|
||||
{
|
||||
let _ = EphemeralTestnet::start().await.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,12 @@
|
||||
// Actual testnet exposed in the library
|
||||
mod ephemeral_testnet;
|
||||
mod static_testnet;
|
||||
mod testnet;
|
||||
pub use ephemeral_testnet::EphemeralTestnet;
|
||||
pub use static_testnet::StaticTestnet;
|
||||
pub use testnet::Testnet;
|
||||
|
||||
// Re-export the core crates
|
||||
pub use pubky;
|
||||
pub use pubky_common;
|
||||
pub use pubky_homeserver;
|
||||
|
||||
@@ -1,16 +1,35 @@
|
||||
use anyhow::Result;
|
||||
use pubky_testnet::Testnet;
|
||||
use pubky_testnet::StaticTestnet;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
"pubky_homeserver=debug,http_relay=debug,pkarr_relay=debug,tower_http=debug"
|
||||
"pubky_homeserver=debug,http_relay=debug,pkarr_relay=info,tower_http=debug,pubky_testnet=debug"
|
||||
.to_string(),
|
||||
)
|
||||
.init();
|
||||
|
||||
Testnet::run_with_hardcoded_configurations().await?;
|
||||
let testnet = StaticTestnet::start().await?;
|
||||
tracing::info!("Testnet running");
|
||||
tracing::info!(
|
||||
"DHT Bootstrap Nodes: {}",
|
||||
testnet.bootstrap_nodes().join(", ")
|
||||
);
|
||||
tracing::info!("Pkarr Relay: {}", testnet.pkarr_relay().local_url());
|
||||
tracing::info!("Http Relay: {}", testnet.http_relay().local_url());
|
||||
tracing::info!(
|
||||
"Homeserver ICANN HTTP: {}",
|
||||
testnet.homeserver_suite().icann_http_url()
|
||||
);
|
||||
tracing::info!(
|
||||
"Homeserver Pubky HTTPS: {}",
|
||||
testnet.homeserver_suite().pubky_url()
|
||||
);
|
||||
tracing::info!(
|
||||
"Homeserver admin: http://{}",
|
||||
testnet.homeserver_suite().admin().listen_socket()
|
||||
);
|
||||
|
||||
tokio::signal::ctrl_c().await?;
|
||||
|
||||
|
||||
172
pubky-testnet/src/static_testnet.rs
Normal file
172
pubky-testnet/src/static_testnet.rs
Normal file
@@ -0,0 +1,172 @@
|
||||
use std::{
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use crate::Testnet;
|
||||
use http_relay::HttpRelay;
|
||||
use pubky_homeserver::{ConfigToml, DomainPort, HomeserverSuite, MockDataDir, SignupMode};
|
||||
|
||||
/// A simple testnet with
|
||||
///
|
||||
/// - A local DHT with a boostrap node on port 6881.
|
||||
/// - pkarr relay on port 15411.
|
||||
/// - http relay on port 15412.
|
||||
/// - A homeserver with address is hardcoded to `8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo`.
|
||||
/// - An admin server for the homeserver on port 6288.
|
||||
pub struct StaticTestnet {
|
||||
/// Inner flexible testnet.
|
||||
pub flexible_testnet: Testnet,
|
||||
#[allow(dead_code)]
|
||||
fixed_bootstrap_node: Option<pkarr::mainline::Dht>, // Keep alive
|
||||
#[allow(dead_code)]
|
||||
temp_dirs: Vec<tempfile::TempDir>, // Keep temp dirs alive for the pkarr relay
|
||||
}
|
||||
|
||||
impl StaticTestnet {
|
||||
/// Run a new simple testnet.
|
||||
pub async fn start() -> anyhow::Result<Self> {
|
||||
let testnet = Testnet::new().await?;
|
||||
let fixed_boostrap = Self::run_fixed_boostrap_node(&testnet.dht.bootstrap)
|
||||
.map_err(|e| anyhow::anyhow!("Failed to run bootstrap node on port 6881: {}", e))?;
|
||||
|
||||
let mut testnet = Self {
|
||||
flexible_testnet: testnet,
|
||||
fixed_bootstrap_node: fixed_boostrap,
|
||||
temp_dirs: vec![],
|
||||
};
|
||||
|
||||
testnet
|
||||
.run_fixed_pkarr_relays()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to run pkarr relay on port 15411: {}", e))?;
|
||||
testnet
|
||||
.run_fixed_http_relay()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to run http relay on port 15412: {}", e))?;
|
||||
testnet
|
||||
.run_fixed_homeserver()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to run homeserver on port 6288: {}", e))?;
|
||||
|
||||
Ok(testnet)
|
||||
}
|
||||
|
||||
/// Create a new pubky client builder.
|
||||
pub fn pubky_client_builder(&self) -> pubky::ClientBuilder {
|
||||
self.flexible_testnet.pubky_client_builder()
|
||||
}
|
||||
|
||||
pub fn pkarr_client_builder(&self) -> pkarr::ClientBuilder {
|
||||
self.flexible_testnet.pkarr_client_builder()
|
||||
}
|
||||
|
||||
/// Get the homeserver in the testnet.
|
||||
pub fn homeserver_suite(&self) -> &pubky_homeserver::HomeserverSuite {
|
||||
self.flexible_testnet
|
||||
.homeservers
|
||||
.first()
|
||||
.expect("homeservers should be non-empty")
|
||||
}
|
||||
|
||||
/// Get the http relay in the testnet.
|
||||
pub fn http_relay(&self) -> &HttpRelay {
|
||||
self.flexible_testnet
|
||||
.http_relays
|
||||
.first()
|
||||
.expect("http relays should be non-empty")
|
||||
}
|
||||
|
||||
/// Get the pkarr relay in the testnet.
|
||||
pub fn pkarr_relay(&self) -> &pkarr_relay::Relay {
|
||||
self.flexible_testnet
|
||||
.pkarr_relays
|
||||
.first()
|
||||
.expect("pkarr relays should be non-empty")
|
||||
}
|
||||
|
||||
/// Get the bootstrap nodes for the testnet.
|
||||
pub fn bootstrap_nodes(&self) -> Vec<String> {
|
||||
let mut nodes = vec![];
|
||||
if let Some(dht) = &self.fixed_bootstrap_node {
|
||||
nodes.push(dht.info().local_addr().to_string());
|
||||
}
|
||||
nodes.extend(
|
||||
self.flexible_testnet
|
||||
.dht_bootstrap_nodes()
|
||||
.iter()
|
||||
.map(|node| node.to_string()),
|
||||
);
|
||||
nodes
|
||||
}
|
||||
|
||||
/// Create a fixed bootstrap node on port 6881 if it is not already running.
|
||||
/// If it's already running, return None.
|
||||
fn run_fixed_boostrap_node(
|
||||
other_bootstrap_nodes: &[String],
|
||||
) -> anyhow::Result<Option<pkarr::mainline::Dht>> {
|
||||
if other_bootstrap_nodes
|
||||
.iter()
|
||||
.any(|node| node.contains("6881"))
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
let mut builder = pkarr::mainline::Dht::builder();
|
||||
builder
|
||||
.port(6881)
|
||||
.bootstrap(other_bootstrap_nodes)
|
||||
.server_mode();
|
||||
let dht = builder.build()?;
|
||||
Ok(Some(dht))
|
||||
}
|
||||
|
||||
/// Creates a fixed pkarr relay on port 15411 with a temporary storage directory.
|
||||
async fn run_fixed_pkarr_relays(&mut self) -> anyhow::Result<()> {
|
||||
let temp_dir = tempfile::tempdir()?; // Gets cleaned up automatically when it drops
|
||||
let mut builder = pkarr_relay::Relay::builder();
|
||||
builder
|
||||
.http_port(15411)
|
||||
.storage(temp_dir.path().to_path_buf())
|
||||
.disable_rate_limiter()
|
||||
.pkarr(|pkarr| {
|
||||
pkarr.no_default_network();
|
||||
pkarr.bootstrap(&self.flexible_testnet.dht.bootstrap)
|
||||
});
|
||||
let relay = unsafe { builder.run() }.await?;
|
||||
self.flexible_testnet.pkarr_relays.push(relay);
|
||||
self.temp_dirs.push(temp_dir);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a fixed http relay on port 15412.
|
||||
async fn run_fixed_http_relay(&mut self) -> anyhow::Result<()> {
|
||||
let relay = HttpRelay::builder()
|
||||
.http_port(15412) // Random available port
|
||||
.run()
|
||||
.await?;
|
||||
self.flexible_testnet.http_relays.push(relay);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_fixed_homeserver(&mut self) -> anyhow::Result<()> {
|
||||
let keypair = pkarr::Keypair::from_secret_key(&[0; 32]);
|
||||
let mut config = ConfigToml::test();
|
||||
config.pkdns.dht_bootstrap_nodes = Some(
|
||||
self.bootstrap_nodes()
|
||||
.iter()
|
||||
.map(|node| DomainPort::from_str(node).unwrap())
|
||||
.collect(),
|
||||
);
|
||||
config.general.signup_mode = SignupMode::Open;
|
||||
config.drive.icann_listen_socket =
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6286);
|
||||
config.drive.pubky_listen_socket =
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6287);
|
||||
config.admin.listen_socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6288);
|
||||
let mock = MockDataDir::new(config, Some(keypair))?;
|
||||
|
||||
let homeserver = HomeserverSuite::start_with_mock_data_dir(mock).await?;
|
||||
self.flexible_testnet.homeservers.push(homeserver);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -4,146 +4,315 @@
|
||||
#![deny(missing_docs)]
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
#![cfg_attr(any(), deny(clippy::unwrap_used))]
|
||||
use std::time::Duration;
|
||||
use std::{str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use http_relay::HttpRelay;
|
||||
use pubky::{ClientBuilder, Keypair};
|
||||
use pubky_common::timestamp::Timestamp;
|
||||
use pubky_homeserver::Homeserver;
|
||||
use pubky::Keypair;
|
||||
use pubky_homeserver::{ConfigToml, DomainPort, HomeserverSuite, MockDataDir};
|
||||
use url::Url;
|
||||
|
||||
/// A local test network for Pubky Core development.
|
||||
/// Can create a flexible amount of pkarr relays, http relays and homeservers.
|
||||
///
|
||||
/// Keeps track of the components and can create new ones.
|
||||
/// Cleans up all resources when dropped.
|
||||
pub struct Testnet {
|
||||
dht: mainline::Testnet,
|
||||
relays: Vec<pkarr_relay::Relay>,
|
||||
pub(crate) dht: pkarr::mainline::Testnet,
|
||||
pub(crate) pkarr_relays: Vec<pkarr_relay::Relay>,
|
||||
pub(crate) http_relays: Vec<HttpRelay>,
|
||||
pub(crate) homeservers: Vec<HomeserverSuite>,
|
||||
|
||||
temp_dirs: Vec<tempfile::TempDir>,
|
||||
}
|
||||
|
||||
impl Testnet {
|
||||
/// Run a new testnet.
|
||||
pub async fn run() -> Result<Self> {
|
||||
let dht = mainline::Testnet::new(3)?;
|
||||
|
||||
let mut testnet = Self {
|
||||
dht,
|
||||
relays: vec![],
|
||||
};
|
||||
|
||||
testnet.run_pkarr_relay().await?;
|
||||
|
||||
Ok(testnet)
|
||||
}
|
||||
|
||||
/// Create these components with hardcoded configurations:
|
||||
///
|
||||
/// 1. A local DHT with bootstrapping nodes: `&["localhost:6881"]`
|
||||
/// 3. A Pkarr Relay running on port [15411](pubky_common::constants::testnet_ports::PKARR_RELAY)
|
||||
/// 2. A Homeserver with address is hardcoded to `8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo`
|
||||
/// 4. An HTTP relay running on port [15412](pubky_common::constants::testnet_ports::HTTP_RELAY)
|
||||
pub async fn run_with_hardcoded_configurations() -> Result<Self> {
|
||||
let dht = mainline::Testnet::new(3)?;
|
||||
|
||||
dht.leak();
|
||||
|
||||
let storage = std::env::temp_dir().join(Timestamp::now().to_string());
|
||||
|
||||
let mut builder = pkarr_relay::Relay::builder();
|
||||
builder
|
||||
.http_port(15411)
|
||||
.storage(storage.clone())
|
||||
.disable_rate_limiter()
|
||||
.pkarr(|pkarr| {
|
||||
pkarr
|
||||
.request_timeout(Duration::from_millis(100))
|
||||
.bootstrap(&dht.bootstrap)
|
||||
.dht(|builder| {
|
||||
if !dht.bootstrap.first().unwrap().contains("6881") {
|
||||
builder.server_mode().port(6881);
|
||||
}
|
||||
|
||||
builder
|
||||
.bootstrap(&dht.bootstrap)
|
||||
.request_timeout(Duration::from_millis(200))
|
||||
})
|
||||
});
|
||||
let relay = unsafe { builder.run() }.await?;
|
||||
|
||||
let mut builder = Homeserver::builder();
|
||||
builder
|
||||
.keypair(Keypair::from_secret_key(&[0; 32]))
|
||||
.storage(storage)
|
||||
.bootstrap(&dht.bootstrap)
|
||||
.relays(&[relay.local_url()])
|
||||
.domain("localhost")
|
||||
.close_signups()
|
||||
.admin_password("admin".to_string());
|
||||
unsafe { builder.run().await }?;
|
||||
|
||||
HttpRelay::builder().http_port(15412).run().await?;
|
||||
|
||||
/// Run a new testnet with a local DHT.
|
||||
pub async fn new() -> Result<Self> {
|
||||
let dht = pkarr::mainline::Testnet::new_async(2).await?;
|
||||
let testnet = Self {
|
||||
dht,
|
||||
relays: vec![relay],
|
||||
pkarr_relays: vec![],
|
||||
http_relays: vec![],
|
||||
homeservers: vec![],
|
||||
temp_dirs: vec![],
|
||||
};
|
||||
|
||||
Ok(testnet)
|
||||
}
|
||||
|
||||
// === Getters ===
|
||||
|
||||
/// Returns a list of DHT bootstrapping nodes.
|
||||
pub fn bootstrap(&self) -> &[String] {
|
||||
&self.dht.bootstrap
|
||||
/// Run the full homeserver suite with core and admin server
|
||||
/// Automatically listens on the default ports.
|
||||
/// Automatically uses the configured bootstrap nodes and relays in this Testnet.
|
||||
pub async fn create_homeserver_suite(&mut self) -> Result<&HomeserverSuite> {
|
||||
let mock_dir =
|
||||
MockDataDir::new(ConfigToml::test(), Some(Keypair::from_secret_key(&[0; 32])))?;
|
||||
self.create_homeserver_suite_with_mock(mock_dir).await
|
||||
}
|
||||
|
||||
/// Returns a list of pkarr relays.
|
||||
pub fn relays(&self) -> Box<[Url]> {
|
||||
self.relays.iter().map(|r| r.local_url()).collect()
|
||||
}
|
||||
|
||||
// === Public Methods ===
|
||||
|
||||
/// Run a Pubky Homeserver
|
||||
pub async fn run_homeserver(&self) -> Result<Homeserver> {
|
||||
Homeserver::run_test(&self.dht.bootstrap).await
|
||||
}
|
||||
|
||||
/// Run a Pubky Homeserver that requires signup tokens
|
||||
pub async fn run_homeserver_with_signup_tokens(&self) -> Result<Homeserver> {
|
||||
Homeserver::run_test_with_signup_tokens(&self.dht.bootstrap).await
|
||||
/// Run the full homeserver suite with core and admin server
|
||||
/// Automatically listens on the configured ports.
|
||||
/// Automatically uses the configured bootstrap nodes and relays in this Testnet.
|
||||
pub async fn create_homeserver_suite_with_mock(
|
||||
&mut self,
|
||||
mut mock_dir: MockDataDir,
|
||||
) -> Result<&HomeserverSuite> {
|
||||
mock_dir.config_toml.pkdns.dht_bootstrap_nodes = Some(self.dht_bootstrap_nodes());
|
||||
if !self.dht_relay_urls().is_empty() {
|
||||
mock_dir.config_toml.pkdns.dht_relay_nodes = Some(self.dht_relay_urls().to_vec());
|
||||
}
|
||||
let homeserver = HomeserverSuite::start_with_mock_data_dir(mock_dir).await?;
|
||||
self.homeservers.push(homeserver);
|
||||
Ok(self
|
||||
.homeservers
|
||||
.last()
|
||||
.expect("homeservers should be non-empty"))
|
||||
}
|
||||
|
||||
/// Run an HTTP Relay
|
||||
pub async fn run_http_relay(&self) -> Result<HttpRelay> {
|
||||
HttpRelay::builder().run().await
|
||||
}
|
||||
|
||||
/// Create a [ClientBuilder] and configure it to use this local test network.
|
||||
pub fn client_builder(&self) -> ClientBuilder {
|
||||
let bootstrap = self.bootstrap();
|
||||
let relays = self.relays();
|
||||
|
||||
let mut builder = pubky::Client::builder();
|
||||
builder.pkarr(|builder| {
|
||||
builder
|
||||
.bootstrap(bootstrap)
|
||||
.relays(&relays)
|
||||
.expect("testnet relays should be valid urls")
|
||||
});
|
||||
|
||||
builder
|
||||
pub async fn create_http_relay(&mut self) -> Result<&HttpRelay> {
|
||||
let relay = HttpRelay::builder()
|
||||
.http_port(0) // Random available port
|
||||
.run()
|
||||
.await?;
|
||||
self.http_relays.push(relay);
|
||||
Ok(self
|
||||
.http_relays
|
||||
.last()
|
||||
.expect("http relays should be non-empty"))
|
||||
}
|
||||
|
||||
/// Run a new Pkarr relay.
|
||||
///
|
||||
/// You can access the list of relays at [Self::relays].
|
||||
pub async fn run_pkarr_relay(&mut self) -> Result<Url> {
|
||||
let relay = pkarr_relay::Relay::run_test(&self.dht).await?;
|
||||
|
||||
pub async fn create_pkarr_relay(&mut self) -> Result<Url> {
|
||||
let dir = tempfile::tempdir()?;
|
||||
let mut builder = pkarr_relay::Relay::builder();
|
||||
builder
|
||||
.disable_rate_limiter()
|
||||
.http_port(0)
|
||||
.storage(dir.path().to_path_buf())
|
||||
.pkarr(|builder| {
|
||||
builder.no_default_network();
|
||||
builder.bootstrap(&self.dht.bootstrap);
|
||||
builder
|
||||
});
|
||||
let relay = unsafe { builder.run().await? };
|
||||
let url = relay.local_url();
|
||||
|
||||
self.relays.push(relay);
|
||||
|
||||
self.pkarr_relays.push(relay);
|
||||
self.temp_dirs.push(dir);
|
||||
Ok(url)
|
||||
}
|
||||
|
||||
// === Getters ===
|
||||
|
||||
/// Returns a list of DHT bootstrapping nodes.
|
||||
pub fn dht_bootstrap_nodes(&self) -> Vec<DomainPort> {
|
||||
self.dht
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
let addr = node.info().local_addr();
|
||||
DomainPort::from_str(&format!("{}:{}", addr.ip(), addr.port()))
|
||||
.expect("boostrap nodes from the pkarr dht are always valid domain:port pairs")
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns a list of pkarr relays.
|
||||
pub fn dht_relay_urls(&self) -> Vec<Url> {
|
||||
self.pkarr_relays.iter().map(|r| r.local_url()).collect()
|
||||
}
|
||||
|
||||
/// Create a [ClientBuilder] and configure it to use this local test network.
|
||||
pub fn pubky_client_builder(&self) -> pubky::ClientBuilder {
|
||||
let relays = self.dht_relay_urls();
|
||||
|
||||
let mut builder = pubky::Client::builder();
|
||||
builder.pkarr(|builder| {
|
||||
builder.no_default_network();
|
||||
builder.bootstrap(&self.dht.bootstrap);
|
||||
if relays.is_empty() {
|
||||
builder.no_relays();
|
||||
} else {
|
||||
builder
|
||||
.relays(&relays)
|
||||
.expect("testnet relays should be valid urls");
|
||||
}
|
||||
// 100ms timeout for requests. This makes methods like `resolve_most_recent` fast
|
||||
// because it doesn't need to wait the default 2s which would slow down the tests.
|
||||
builder.request_timeout(Duration::from_millis(2000));
|
||||
builder
|
||||
});
|
||||
|
||||
builder
|
||||
}
|
||||
|
||||
/// Create a [pkarr::ClientBuilder] and configure it to use this local test network.
|
||||
pub fn pkarr_client_builder(&self) -> pkarr::ClientBuilder {
|
||||
let relays = self.dht_relay_urls();
|
||||
let mut builder = pkarr::Client::builder();
|
||||
builder.no_default_network(); // Remove DHT bootstrap nodes and relays
|
||||
builder.bootstrap(&self.dht.bootstrap);
|
||||
if !relays.is_empty() {
|
||||
builder
|
||||
.relays(&relays)
|
||||
.expect("Testnet relays should be valid urls");
|
||||
}
|
||||
|
||||
builder
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::Testnet;
|
||||
use pubky::Keypair;
|
||||
|
||||
/// Make sure the components are kept alive even when dropped.
|
||||
#[tokio::test]
|
||||
async fn test_keep_relays_alive_even_when_dropped() {
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
{
|
||||
let _relay = testnet.create_http_relay().await.unwrap();
|
||||
}
|
||||
assert_eq!(testnet.http_relays.len(), 1);
|
||||
}
|
||||
|
||||
/// Boostrap node conversion
|
||||
#[tokio::test]
|
||||
async fn test_boostrap_node_conversion() {
|
||||
let testnet = Testnet::new().await.unwrap();
|
||||
let nodes = testnet.dht_bootstrap_nodes();
|
||||
assert_eq!(nodes.len(), 2);
|
||||
}
|
||||
|
||||
/// Test that a user can signup in the testnet.
|
||||
/// This is an e2e tests to check if everything is correct.
|
||||
#[tokio::test]
|
||||
async fn test_signup() {
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
testnet.create_homeserver_suite().await.unwrap();
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
let hs = testnet.homeservers.first().unwrap();
|
||||
let keypair = Keypair::random();
|
||||
let pubky = keypair.public_key();
|
||||
|
||||
let session = client
|
||||
.signup(&keypair, &hs.public_key(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(session.pubky(), &pubky);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_independent_dhts() {
|
||||
let t1 = Testnet::new().await.unwrap();
|
||||
let t2 = Testnet::new().await.unwrap();
|
||||
|
||||
assert_ne!(t1.dht.bootstrap, t2.dht.bootstrap);
|
||||
}
|
||||
|
||||
/// If everything is linked correctly, the hs_pubky should be resolvable from the pkarr client.
|
||||
#[tokio::test]
|
||||
async fn test_homeserver_resolvable() {
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
let hs_pubky = testnet
|
||||
.create_homeserver_suite()
|
||||
.await
|
||||
.unwrap()
|
||||
.public_key();
|
||||
|
||||
// Make sure the pkarr packet of the hs is resolvable.
|
||||
let pkarr_client = testnet.pkarr_client_builder().build().unwrap();
|
||||
let _packet = pkarr_client.resolve(&hs_pubky).await.unwrap();
|
||||
|
||||
// Make sure the pkarr can resolve the hs_pubky.
|
||||
let pubkey = format!("{}", hs_pubky);
|
||||
let _endpoint = pkarr_client
|
||||
.resolve_https_endpoint(pubkey.as_str())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_spawn_in_parallel() {
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for _ in 0..10 {
|
||||
let handle = tokio::spawn(async move {
|
||||
let mut testnet = match Testnet::new().await {
|
||||
Ok(testnet) => testnet,
|
||||
Err(e) => {
|
||||
panic!("Failed to create testnet: {}", e);
|
||||
}
|
||||
};
|
||||
match testnet.create_homeserver_suite().await {
|
||||
Ok(hs) => hs,
|
||||
Err(e) => {
|
||||
panic!("Failed to create homeserver suite: {}", e);
|
||||
}
|
||||
};
|
||||
let client = testnet.pubky_client_builder().build().unwrap();
|
||||
let hs = testnet.homeservers.first().unwrap();
|
||||
let keypair = Keypair::random();
|
||||
let pubky = keypair.public_key();
|
||||
|
||||
let session = client
|
||||
.signup(&keypair, &hs.public_key(), None)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(session.pubky(), &pubky);
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
match handle.await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
panic!("{}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test relay resolvable.
|
||||
/// This simulates pkarr clients in a browser.
|
||||
/// Made due to https://github.com/pubky/pkarr/issues/140
|
||||
#[tokio::test]
|
||||
async fn test_pkarr_relay_resolvable() {
|
||||
let mut testnet = Testnet::new().await.unwrap();
|
||||
testnet.create_pkarr_relay().await.unwrap();
|
||||
|
||||
let keypair = Keypair::random();
|
||||
|
||||
// Publish packet on the DHT without using the relay.
|
||||
let client = testnet.pkarr_client_builder().build().unwrap();
|
||||
let signed = pkarr::SignedPacket::builder().sign(&keypair).unwrap();
|
||||
client.publish(&signed, None).await.unwrap();
|
||||
|
||||
// Resolve packet with a new client to prevent caching
|
||||
// Only use the DHT, no relays
|
||||
let client = testnet.pkarr_client_builder().no_relays().build().unwrap();
|
||||
let packet = client.resolve(&keypair.public_key()).await;
|
||||
assert!(
|
||||
packet.is_some(),
|
||||
"Published packet is not available over the DHT."
|
||||
);
|
||||
|
||||
// Resolve packet with a new client to prevent caching
|
||||
// Only use the relay, no DHT
|
||||
// This simulates pkarr clients in a browser.
|
||||
let client = testnet.pkarr_client_builder().no_dht().build().unwrap();
|
||||
let packet = client.resolve(&keypair.public_key()).await;
|
||||
assert!(
|
||||
packet.is_some(),
|
||||
"Published packet is not available over the relay only."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user