Add base project

Implement auth method
This commit is contained in:
coreyphillips
2024-09-11 22:03:27 -04:00
parent 66649117dc
commit b03d04ccc4
192 changed files with 54913 additions and 3 deletions

3388
rust/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

26
rust/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "react_native_pubky"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate_type = ["cdylib", "staticlib"]
name = "mobile"
[dependencies]
uniffi = { version = "0.25.3", features = [ "cli" ] }
bitcoin = "0.31.1"
bip39 = "2.0.0"
secp256k1 = "0.28.2"
bip32 = "0.5.1"
serde_json = "1.0.114"
hex = "0.4.3"
sha2 = "0.10.8"
serde = { version = "^1.0.209", features = ["derive"] }
pkarr = { git = "https://github.com/Pubky/pkarr", branch = "v3", package = "pkarr", features = ["async", "rand"] }
pubky = { version = "0.1.0", path = "pubky/pubky" }
pubky-common = { version = "0.1.0", path = "pubky/pubky-common" }
pubky_homeserver = { version = "0.1.0", path = "pubky/pubky-homeserver" }
tokio = "1.40.0"
url = "2.5.2"

View File

@@ -0,0 +1,688 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
@file:Suppress("NAME_SHADOWING")
package uniffi.mobile;
// Common helper code.
//
// Ideally this would live in a separate .kt file where it can be unittested etc
// in isolation, and perhaps even published as a re-useable package.
//
// However, it's important that the details of how this helper code works (e.g. the
// way that different builtin types are passed across the FFI) exactly match what's
// expected by the Rust code on the other side of the interface. In practice right
// now that means coming from the exact some version of `uniffi` that was used to
// compile the Rust component. The easiest way to ensure this is to bundle the Kotlin
// helpers directly inline like we're doing here.
import com.sun.jna.Library
import com.sun.jna.IntegerType
import com.sun.jna.Native
import com.sun.jna.Pointer
import com.sun.jna.Structure
import com.sun.jna.Callback
import com.sun.jna.ptr.*
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.nio.CharBuffer
import java.nio.charset.CodingErrorAction
import java.util.concurrent.ConcurrentHashMap
import kotlin.coroutines.resume
import kotlinx.coroutines.CancellableContinuation
import kotlinx.coroutines.suspendCancellableCoroutine
// This is a helper for safely working with byte buffers returned from the Rust code.
// A rust-owned buffer is represented by its capacity, its current length, and a
// pointer to the underlying data.
@Structure.FieldOrder("capacity", "len", "data")
open class RustBuffer : Structure() {
@JvmField var capacity: Int = 0
@JvmField var len: Int = 0
@JvmField var data: Pointer? = null
class ByValue: RustBuffer(), Structure.ByValue
class ByReference: RustBuffer(), Structure.ByReference
companion object {
internal fun alloc(size: Int = 0) = rustCall() { status ->
_UniFFILib.INSTANCE.ffi_mobile_rustbuffer_alloc(size, status)
}.also {
if(it.data == null) {
throw RuntimeException("RustBuffer.alloc() returned null data pointer (size=${size})")
}
}
internal fun create(capacity: Int, len: Int, data: Pointer?): RustBuffer.ByValue {
var buf = RustBuffer.ByValue()
buf.capacity = capacity
buf.len = len
buf.data = data
return buf
}
internal fun free(buf: RustBuffer.ByValue) = rustCall() { status ->
_UniFFILib.INSTANCE.ffi_mobile_rustbuffer_free(buf, status)
}
}
@Suppress("TooGenericExceptionThrown")
fun asByteBuffer() =
this.data?.getByteBuffer(0, this.len.toLong())?.also {
it.order(ByteOrder.BIG_ENDIAN)
}
}
/**
* The equivalent of the `*mut RustBuffer` type.
* Required for callbacks taking in an out pointer.
*
* Size is the sum of all values in the struct.
*/
class RustBufferByReference : ByReference(16) {
/**
* Set the pointed-to `RustBuffer` to the given value.
*/
fun setValue(value: RustBuffer.ByValue) {
// NOTE: The offsets are as they are in the C-like struct.
val pointer = getPointer()
pointer.setInt(0, value.capacity)
pointer.setInt(4, value.len)
pointer.setPointer(8, value.data)
}
/**
* Get a `RustBuffer.ByValue` from this reference.
*/
fun getValue(): RustBuffer.ByValue {
val pointer = getPointer()
val value = RustBuffer.ByValue()
value.writeField("capacity", pointer.getInt(0))
value.writeField("len", pointer.getInt(4))
value.writeField("data", pointer.getPointer(8))
return value
}
}
// This is a helper for safely passing byte references into the rust code.
// It's not actually used at the moment, because there aren't many things that you
// can take a direct pointer to in the JVM, and if we're going to copy something
// then we might as well copy it into a `RustBuffer`. But it's here for API
// completeness.
@Structure.FieldOrder("len", "data")
open class ForeignBytes : Structure() {
@JvmField var len: Int = 0
@JvmField var data: Pointer? = null
class ByValue : ForeignBytes(), Structure.ByValue
}
// The FfiConverter interface handles converter types to and from the FFI
//
// All implementing objects should be public to support external types. When a
// type is external we need to import it's FfiConverter.
public interface FfiConverter<KotlinType, FfiType> {
// Convert an FFI type to a Kotlin type
fun lift(value: FfiType): KotlinType
// Convert an Kotlin type to an FFI type
fun lower(value: KotlinType): FfiType
// Read a Kotlin type from a `ByteBuffer`
fun read(buf: ByteBuffer): KotlinType
// Calculate bytes to allocate when creating a `RustBuffer`
//
// This must return at least as many bytes as the write() function will
// write. It can return more bytes than needed, for example when writing
// Strings we can't know the exact bytes needed until we the UTF-8
// encoding, so we pessimistically allocate the largest size possible (3
// bytes per codepoint). Allocating extra bytes is not really a big deal
// because the `RustBuffer` is short-lived.
fun allocationSize(value: KotlinType): Int
// Write a Kotlin type to a `ByteBuffer`
fun write(value: KotlinType, buf: ByteBuffer)
// Lower a value into a `RustBuffer`
//
// This method lowers a value into a `RustBuffer` rather than the normal
// FfiType. It's used by the callback interface code. Callback interface
// returns are always serialized into a `RustBuffer` regardless of their
// normal FFI type.
fun lowerIntoRustBuffer(value: KotlinType): RustBuffer.ByValue {
val rbuf = RustBuffer.alloc(allocationSize(value))
try {
val bbuf = rbuf.data!!.getByteBuffer(0, rbuf.capacity.toLong()).also {
it.order(ByteOrder.BIG_ENDIAN)
}
write(value, bbuf)
rbuf.writeField("len", bbuf.position())
return rbuf
} catch (e: Throwable) {
RustBuffer.free(rbuf)
throw e
}
}
// Lift a value from a `RustBuffer`.
//
// This here mostly because of the symmetry with `lowerIntoRustBuffer()`.
// It's currently only used by the `FfiConverterRustBuffer` class below.
fun liftFromRustBuffer(rbuf: RustBuffer.ByValue): KotlinType {
val byteBuf = rbuf.asByteBuffer()!!
try {
val item = read(byteBuf)
if (byteBuf.hasRemaining()) {
throw RuntimeException("junk remaining in buffer after lifting, something is very wrong!!")
}
return item
} finally {
RustBuffer.free(rbuf)
}
}
}
// FfiConverter that uses `RustBuffer` as the FfiType
public interface FfiConverterRustBuffer<KotlinType>: FfiConverter<KotlinType, RustBuffer.ByValue> {
override fun lift(value: RustBuffer.ByValue) = liftFromRustBuffer(value)
override fun lower(value: KotlinType) = lowerIntoRustBuffer(value)
}
// A handful of classes and functions to support the generated data structures.
// This would be a good candidate for isolating in its own ffi-support lib.
// Error runtime.
@Structure.FieldOrder("code", "error_buf")
internal open class RustCallStatus : Structure() {
@JvmField var code: Byte = 0
@JvmField var error_buf: RustBuffer.ByValue = RustBuffer.ByValue()
class ByValue: RustCallStatus(), Structure.ByValue
fun isSuccess(): Boolean {
return code == 0.toByte()
}
fun isError(): Boolean {
return code == 1.toByte()
}
fun isPanic(): Boolean {
return code == 2.toByte()
}
}
class InternalException(message: String) : Exception(message)
// Each top-level error class has a companion object that can lift the error from the call status's rust buffer
interface CallStatusErrorHandler<E> {
fun lift(error_buf: RustBuffer.ByValue): E;
}
// Helpers for calling Rust
// In practice we usually need to be synchronized to call this safely, so it doesn't
// synchronize itself
// Call a rust function that returns a Result<>. Pass in the Error class companion that corresponds to the Err
private inline fun <U, E: Exception> rustCallWithError(errorHandler: CallStatusErrorHandler<E>, callback: (RustCallStatus) -> U): U {
var status = RustCallStatus();
val return_value = callback(status)
checkCallStatus(errorHandler, status)
return return_value
}
// Check RustCallStatus and throw an error if the call wasn't successful
private fun<E: Exception> checkCallStatus(errorHandler: CallStatusErrorHandler<E>, status: RustCallStatus) {
if (status.isSuccess()) {
return
} else if (status.isError()) {
throw errorHandler.lift(status.error_buf)
} else if (status.isPanic()) {
// when the rust code sees a panic, it tries to construct a rustbuffer
// with the message. but if that code panics, then it just sends back
// an empty buffer.
if (status.error_buf.len > 0) {
throw InternalException(FfiConverterString.lift(status.error_buf))
} else {
throw InternalException("Rust panic")
}
} else {
throw InternalException("Unknown rust call status: $status.code")
}
}
// CallStatusErrorHandler implementation for times when we don't expect a CALL_ERROR
object NullCallStatusErrorHandler: CallStatusErrorHandler<InternalException> {
override fun lift(error_buf: RustBuffer.ByValue): InternalException {
RustBuffer.free(error_buf)
return InternalException("Unexpected CALL_ERROR")
}
}
// Call a rust function that returns a plain value
private inline fun <U> rustCall(callback: (RustCallStatus) -> U): U {
return rustCallWithError(NullCallStatusErrorHandler, callback);
}
// IntegerType that matches Rust's `usize` / C's `size_t`
public class USize(value: Long = 0) : IntegerType(Native.SIZE_T_SIZE, value, true) {
// This is needed to fill in the gaps of IntegerType's implementation of Number for Kotlin.
override fun toByte() = toInt().toByte()
// Needed until https://youtrack.jetbrains.com/issue/KT-47902 is fixed.
@Deprecated("`toInt().toChar()` is deprecated")
override fun toChar() = toInt().toChar()
override fun toShort() = toInt().toShort()
fun writeToBuffer(buf: ByteBuffer) {
// Make sure we always write usize integers using native byte-order, since they may be
// casted to pointer values
buf.order(ByteOrder.nativeOrder())
try {
when (Native.SIZE_T_SIZE) {
4 -> buf.putInt(toInt())
8 -> buf.putLong(toLong())
else -> throw RuntimeException("Invalid SIZE_T_SIZE: ${Native.SIZE_T_SIZE}")
}
} finally {
buf.order(ByteOrder.BIG_ENDIAN)
}
}
companion object {
val size: Int
get() = Native.SIZE_T_SIZE
fun readFromBuffer(buf: ByteBuffer) : USize {
// Make sure we always read usize integers using native byte-order, since they may be
// casted from pointer values
buf.order(ByteOrder.nativeOrder())
try {
return when (Native.SIZE_T_SIZE) {
4 -> USize(buf.getInt().toLong())
8 -> USize(buf.getLong())
else -> throw RuntimeException("Invalid SIZE_T_SIZE: ${Native.SIZE_T_SIZE}")
}
} finally {
buf.order(ByteOrder.BIG_ENDIAN)
}
}
}
}
// Map handles to objects
//
// This is used when the Rust code expects an opaque pointer to represent some foreign object.
// Normally we would pass a pointer to the object, but JNA doesn't support getting a pointer from an
// object reference , nor does it support leaking a reference to Rust.
//
// Instead, this class maps USize values to objects so that we can pass a pointer-sized type to
// Rust when it needs an opaque pointer.
//
// TODO: refactor callbacks to use this class
internal class UniFfiHandleMap<T: Any> {
private val map = ConcurrentHashMap<USize, T>()
// Use AtomicInteger for our counter, since we may be on a 32-bit system. 4 billion possible
// values seems like enough. If somehow we generate 4 billion handles, then this will wrap
// around back to zero and we can assume the first handle generated will have been dropped by
// then.
private val counter = java.util.concurrent.atomic.AtomicInteger(0)
val size: Int
get() = map.size
fun insert(obj: T): USize {
val handle = USize(counter.getAndAdd(1).toLong())
map.put(handle, obj)
return handle
}
fun get(handle: USize): T? {
return map.get(handle)
}
fun remove(handle: USize): T? {
return map.remove(handle)
}
}
// FFI type for Rust future continuations
internal interface UniFffiRustFutureContinuationCallbackType : com.sun.jna.Callback {
fun callback(continuationHandle: USize, pollResult: Short);
}
// Contains loading, initialization code,
// and the FFI Function declarations in a com.sun.jna.Library.
@Synchronized
private fun findLibraryName(componentName: String): String {
val libOverride = System.getProperty("uniffi.component.$componentName.libraryOverride")
if (libOverride != null) {
return libOverride
}
return "mobile"
}
private inline fun <reified Lib : Library> loadIndirect(
componentName: String
): Lib {
return Native.load<Lib>(findLibraryName(componentName), Lib::class.java)
}
// A JNA Library to expose the extern-C FFI definitions.
// This is an implementation detail which will be called internally by the public API.
internal interface _UniFFILib : Library {
companion object {
internal val INSTANCE: _UniFFILib by lazy {
loadIndirect<_UniFFILib>(componentName = "mobile")
.also { lib: _UniFFILib ->
uniffiCheckContractApiVersion(lib)
uniffiCheckApiChecksums(lib)
uniffiRustFutureContinuationCallback.register(lib)
}
}
}
fun uniffi_mobile_fn_func_auth(`url`: RustBuffer.ByValue,`secretKey`: RustBuffer.ByValue,
): Pointer
fun uniffi_mobile_fn_func_myexample(_uniffi_out_err: RustCallStatus,
): RustBuffer.ByValue
fun ffi_mobile_rustbuffer_alloc(`size`: Int,_uniffi_out_err: RustCallStatus,
): RustBuffer.ByValue
fun ffi_mobile_rustbuffer_from_bytes(`bytes`: ForeignBytes.ByValue,_uniffi_out_err: RustCallStatus,
): RustBuffer.ByValue
fun ffi_mobile_rustbuffer_free(`buf`: RustBuffer.ByValue,_uniffi_out_err: RustCallStatus,
): Unit
fun ffi_mobile_rustbuffer_reserve(`buf`: RustBuffer.ByValue,`additional`: Int,_uniffi_out_err: RustCallStatus,
): RustBuffer.ByValue
fun ffi_mobile_rust_future_continuation_callback_set(`callback`: UniFffiRustFutureContinuationCallbackType,
): Unit
fun ffi_mobile_rust_future_poll_u8(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_u8(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_u8(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_u8(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Byte
fun ffi_mobile_rust_future_poll_i8(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_i8(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_i8(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_i8(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Byte
fun ffi_mobile_rust_future_poll_u16(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_u16(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_u16(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_u16(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Short
fun ffi_mobile_rust_future_poll_i16(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_i16(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_i16(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_i16(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Short
fun ffi_mobile_rust_future_poll_u32(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_u32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_u32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_u32(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Int
fun ffi_mobile_rust_future_poll_i32(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_i32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_i32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_i32(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Int
fun ffi_mobile_rust_future_poll_u64(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_u64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_u64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_u64(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Long
fun ffi_mobile_rust_future_poll_i64(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_i64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_i64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_i64(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Long
fun ffi_mobile_rust_future_poll_f32(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_f32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_f32(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_f32(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Float
fun ffi_mobile_rust_future_poll_f64(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_f64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_f64(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_f64(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Double
fun ffi_mobile_rust_future_poll_pointer(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_pointer(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_pointer(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_pointer(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Pointer
fun ffi_mobile_rust_future_poll_rust_buffer(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_rust_buffer(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_rust_buffer(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_rust_buffer(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): RustBuffer.ByValue
fun ffi_mobile_rust_future_poll_void(`handle`: Pointer,`uniffiCallback`: USize,
): Unit
fun ffi_mobile_rust_future_cancel_void(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_free_void(`handle`: Pointer,
): Unit
fun ffi_mobile_rust_future_complete_void(`handle`: Pointer,_uniffi_out_err: RustCallStatus,
): Unit
fun uniffi_mobile_checksum_func_auth(
): Short
fun uniffi_mobile_checksum_func_myexample(
): Short
fun ffi_mobile_uniffi_contract_version(
): Int
}
private fun uniffiCheckContractApiVersion(lib: _UniFFILib) {
// Get the bindings contract version from our ComponentInterface
val bindings_contract_version = 24
// Get the scaffolding contract version by calling the into the dylib
val scaffolding_contract_version = lib.ffi_mobile_uniffi_contract_version()
if (bindings_contract_version != scaffolding_contract_version) {
throw RuntimeException("UniFFI contract version mismatch: try cleaning and rebuilding your project")
}
}
@Suppress("UNUSED_PARAMETER")
private fun uniffiCheckApiChecksums(lib: _UniFFILib) {
if (lib.uniffi_mobile_checksum_func_auth() != 55720.toShort()) {
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
}
if (lib.uniffi_mobile_checksum_func_myexample() != 65225.toShort()) {
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
}
}
// Async support
// Async return type handlers
internal const val UNIFFI_RUST_FUTURE_POLL_READY = 0.toShort()
internal const val UNIFFI_RUST_FUTURE_POLL_MAYBE_READY = 1.toShort()
internal val uniffiContinuationHandleMap = UniFfiHandleMap<CancellableContinuation<Short>>()
// FFI type for Rust future continuations
internal object uniffiRustFutureContinuationCallback: UniFffiRustFutureContinuationCallbackType {
override fun callback(continuationHandle: USize, pollResult: Short) {
uniffiContinuationHandleMap.remove(continuationHandle)?.resume(pollResult)
}
internal fun register(lib: _UniFFILib) {
lib.ffi_mobile_rust_future_continuation_callback_set(this)
}
}
internal suspend fun<T, F, E: Exception> uniffiRustCallAsync(
rustFuture: Pointer,
pollFunc: (Pointer, USize) -> Unit,
completeFunc: (Pointer, RustCallStatus) -> F,
freeFunc: (Pointer) -> Unit,
liftFunc: (F) -> T,
errorHandler: CallStatusErrorHandler<E>
): T {
try {
do {
val pollResult = suspendCancellableCoroutine<Short> { continuation ->
pollFunc(
rustFuture,
uniffiContinuationHandleMap.insert(continuation)
)
}
} while (pollResult != UNIFFI_RUST_FUTURE_POLL_READY);
return liftFunc(
rustCallWithError(errorHandler, { status -> completeFunc(rustFuture, status) })
)
} finally {
freeFunc(rustFuture)
}
}
// Public interface members begin here.
public object FfiConverterString: FfiConverter<String, RustBuffer.ByValue> {
// Note: we don't inherit from FfiConverterRustBuffer, because we use a
// special encoding when lowering/lifting. We can use `RustBuffer.len` to
// store our length and avoid writing it out to the buffer.
override fun lift(value: RustBuffer.ByValue): String {
try {
val byteArr = ByteArray(value.len)
value.asByteBuffer()!!.get(byteArr)
return byteArr.toString(Charsets.UTF_8)
} finally {
RustBuffer.free(value)
}
}
override fun read(buf: ByteBuffer): String {
val len = buf.getInt()
val byteArr = ByteArray(len)
buf.get(byteArr)
return byteArr.toString(Charsets.UTF_8)
}
fun toUtf8(value: String): ByteBuffer {
// Make sure we don't have invalid UTF-16, check for lone surrogates.
return Charsets.UTF_8.newEncoder().run {
onMalformedInput(CodingErrorAction.REPORT)
encode(CharBuffer.wrap(value))
}
}
override fun lower(value: String): RustBuffer.ByValue {
val byteBuf = toUtf8(value)
// Ideally we'd pass these bytes to `ffi_bytebuffer_from_bytes`, but doing so would require us
// to copy them into a JNA `Memory`. So we might as well directly copy them into a `RustBuffer`.
val rbuf = RustBuffer.alloc(byteBuf.limit())
rbuf.asByteBuffer()!!.put(byteBuf)
return rbuf
}
// We aren't sure exactly how many bytes our string will be once it's UTF-8
// encoded. Allocate 3 bytes per UTF-16 code unit which will always be
// enough.
override fun allocationSize(value: String): Int {
val sizeForLength = 4
val sizeForString = value.length * 3
return sizeForLength + sizeForString
}
override fun write(value: String, buf: ByteBuffer) {
val byteBuf = toUtf8(value)
buf.putInt(byteBuf.limit())
buf.put(byteBuf)
}
}
public object FfiConverterSequenceString: FfiConverterRustBuffer<List<String>> {
override fun read(buf: ByteBuffer): List<String> {
val len = buf.getInt()
return List<String>(len) {
FfiConverterString.read(buf)
}
}
override fun allocationSize(value: List<String>): Int {
val sizeForLength = 4
val sizeForItems = value.map { FfiConverterString.allocationSize(it) }.sum()
return sizeForLength + sizeForItems
}
override fun write(value: List<String>, buf: ByteBuffer) {
buf.putInt(value.size)
value.forEach {
FfiConverterString.write(it, buf)
}
}
}
@Suppress("ASSIGNED_BUT_NEVER_ACCESSED_VARIABLE")
suspend fun `auth`(`url`: String, `secretKey`: String) : List<String> {
return uniffiRustCallAsync(
_UniFFILib.INSTANCE.uniffi_mobile_fn_func_auth(FfiConverterString.lower(`url`),FfiConverterString.lower(`secretKey`),),
{ future, continuation -> _UniFFILib.INSTANCE.ffi_mobile_rust_future_poll_rust_buffer(future, continuation) },
{ future, continuation -> _UniFFILib.INSTANCE.ffi_mobile_rust_future_complete_rust_buffer(future, continuation) },
{ future -> _UniFFILib.INSTANCE.ffi_mobile_rust_future_free_rust_buffer(future) },
// lift function
{ FfiConverterSequenceString.lift(it) },
// Error FFI converter
NullCallStatusErrorHandler,
)
}
fun `myexample`(): List<String> {
return FfiConverterSequenceString.lift(
rustCall() { _status ->
_UniFFILib.INSTANCE.uniffi_mobile_fn_func_myexample(_status)
})
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

484
rust/bindings/mobile.swift Normal file
View File

@@ -0,0 +1,484 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
import Foundation
// Depending on the consumer's build setup, the low-level FFI code
// might be in a separate module, or it might be compiled inline into
// this module. This is a bit of light hackery to work with both.
#if canImport(mobileFFI)
import mobileFFI
#endif
fileprivate extension RustBuffer {
// Allocate a new buffer, copying the contents of a `UInt8` array.
init(bytes: [UInt8]) {
let rbuf = bytes.withUnsafeBufferPointer { ptr in
RustBuffer.from(ptr)
}
self.init(capacity: rbuf.capacity, len: rbuf.len, data: rbuf.data)
}
static func from(_ ptr: UnsafeBufferPointer<UInt8>) -> RustBuffer {
try! rustCall { ffi_mobile_rustbuffer_from_bytes(ForeignBytes(bufferPointer: ptr), $0) }
}
// Frees the buffer in place.
// The buffer must not be used after this is called.
func deallocate() {
try! rustCall { ffi_mobile_rustbuffer_free(self, $0) }
}
}
fileprivate extension ForeignBytes {
init(bufferPointer: UnsafeBufferPointer<UInt8>) {
self.init(len: Int32(bufferPointer.count), data: bufferPointer.baseAddress)
}
}
// For every type used in the interface, we provide helper methods for conveniently
// lifting and lowering that type from C-compatible data, and for reading and writing
// values of that type in a buffer.
// Helper classes/extensions that don't change.
// Someday, this will be in a library of its own.
fileprivate extension Data {
init(rustBuffer: RustBuffer) {
// TODO: This copies the buffer. Can we read directly from a
// Rust buffer?
self.init(bytes: rustBuffer.data!, count: Int(rustBuffer.len))
}
}
// Define reader functionality. Normally this would be defined in a class or
// struct, but we use standalone functions instead in order to make external
// types work.
//
// With external types, one swift source file needs to be able to call the read
// method on another source file's FfiConverter, but then what visibility
// should Reader have?
// - If Reader is fileprivate, then this means the read() must also
// be fileprivate, which doesn't work with external types.
// - If Reader is internal/public, we'll get compile errors since both source
// files will try define the same type.
//
// Instead, the read() method and these helper functions input a tuple of data
fileprivate func createReader(data: Data) -> (data: Data, offset: Data.Index) {
(data: data, offset: 0)
}
// Reads an integer at the current offset, in big-endian order, and advances
// the offset on success. Throws if reading the integer would move the
// offset past the end of the buffer.
fileprivate func readInt<T: FixedWidthInteger>(_ reader: inout (data: Data, offset: Data.Index)) throws -> T {
let range = reader.offset..<reader.offset + MemoryLayout<T>.size
guard reader.data.count >= range.upperBound else {
throw UniffiInternalError.bufferOverflow
}
if T.self == UInt8.self {
let value = reader.data[reader.offset]
reader.offset += 1
return value as! T
}
var value: T = 0
let _ = withUnsafeMutableBytes(of: &value, { reader.data.copyBytes(to: $0, from: range)})
reader.offset = range.upperBound
return value.bigEndian
}
// Reads an arbitrary number of bytes, to be used to read
// raw bytes, this is useful when lifting strings
fileprivate func readBytes(_ reader: inout (data: Data, offset: Data.Index), count: Int) throws -> Array<UInt8> {
let range = reader.offset..<(reader.offset+count)
guard reader.data.count >= range.upperBound else {
throw UniffiInternalError.bufferOverflow
}
var value = [UInt8](repeating: 0, count: count)
value.withUnsafeMutableBufferPointer({ buffer in
reader.data.copyBytes(to: buffer, from: range)
})
reader.offset = range.upperBound
return value
}
// Reads a float at the current offset.
fileprivate func readFloat(_ reader: inout (data: Data, offset: Data.Index)) throws -> Float {
return Float(bitPattern: try readInt(&reader))
}
// Reads a float at the current offset.
fileprivate func readDouble(_ reader: inout (data: Data, offset: Data.Index)) throws -> Double {
return Double(bitPattern: try readInt(&reader))
}
// Indicates if the offset has reached the end of the buffer.
fileprivate func hasRemaining(_ reader: (data: Data, offset: Data.Index)) -> Bool {
return reader.offset < reader.data.count
}
// Define writer functionality. Normally this would be defined in a class or
// struct, but we use standalone functions instead in order to make external
// types work. See the above discussion on Readers for details.
fileprivate func createWriter() -> [UInt8] {
return []
}
fileprivate func writeBytes<S>(_ writer: inout [UInt8], _ byteArr: S) where S: Sequence, S.Element == UInt8 {
writer.append(contentsOf: byteArr)
}
// Writes an integer in big-endian order.
//
// Warning: make sure what you are trying to write
// is in the correct type!
fileprivate func writeInt<T: FixedWidthInteger>(_ writer: inout [UInt8], _ value: T) {
var value = value.bigEndian
withUnsafeBytes(of: &value) { writer.append(contentsOf: $0) }
}
fileprivate func writeFloat(_ writer: inout [UInt8], _ value: Float) {
writeInt(&writer, value.bitPattern)
}
fileprivate func writeDouble(_ writer: inout [UInt8], _ value: Double) {
writeInt(&writer, value.bitPattern)
}
// Protocol for types that transfer other types across the FFI. This is
// analogous go the Rust trait of the same name.
fileprivate protocol FfiConverter {
associatedtype FfiType
associatedtype SwiftType
static func lift(_ value: FfiType) throws -> SwiftType
static func lower(_ value: SwiftType) -> FfiType
static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType
static func write(_ value: SwiftType, into buf: inout [UInt8])
}
// Types conforming to `Primitive` pass themselves directly over the FFI.
fileprivate protocol FfiConverterPrimitive: FfiConverter where FfiType == SwiftType { }
extension FfiConverterPrimitive {
public static func lift(_ value: FfiType) throws -> SwiftType {
return value
}
public static func lower(_ value: SwiftType) -> FfiType {
return value
}
}
// Types conforming to `FfiConverterRustBuffer` lift and lower into a `RustBuffer`.
// Used for complex types where it's hard to write a custom lift/lower.
fileprivate protocol FfiConverterRustBuffer: FfiConverter where FfiType == RustBuffer {}
extension FfiConverterRustBuffer {
public static func lift(_ buf: RustBuffer) throws -> SwiftType {
var reader = createReader(data: Data(rustBuffer: buf))
let value = try read(from: &reader)
if hasRemaining(reader) {
throw UniffiInternalError.incompleteData
}
buf.deallocate()
return value
}
public static func lower(_ value: SwiftType) -> RustBuffer {
var writer = createWriter()
write(value, into: &writer)
return RustBuffer(bytes: writer)
}
}
// An error type for FFI errors. These errors occur at the UniFFI level, not
// the library level.
fileprivate enum UniffiInternalError: LocalizedError {
case bufferOverflow
case incompleteData
case unexpectedOptionalTag
case unexpectedEnumCase
case unexpectedNullPointer
case unexpectedRustCallStatusCode
case unexpectedRustCallError
case unexpectedStaleHandle
case rustPanic(_ message: String)
public var errorDescription: String? {
switch self {
case .bufferOverflow: return "Reading the requested value would read past the end of the buffer"
case .incompleteData: return "The buffer still has data after lifting its containing value"
case .unexpectedOptionalTag: return "Unexpected optional tag; should be 0 or 1"
case .unexpectedEnumCase: return "Raw enum value doesn't match any cases"
case .unexpectedNullPointer: return "Raw pointer value was null"
case .unexpectedRustCallStatusCode: return "Unexpected RustCallStatus code"
case .unexpectedRustCallError: return "CALL_ERROR but no errorClass specified"
case .unexpectedStaleHandle: return "The object in the handle map has been dropped already"
case let .rustPanic(message): return message
}
}
}
fileprivate let CALL_SUCCESS: Int8 = 0
fileprivate let CALL_ERROR: Int8 = 1
fileprivate let CALL_PANIC: Int8 = 2
fileprivate let CALL_CANCELLED: Int8 = 3
fileprivate extension RustCallStatus {
init() {
self.init(
code: CALL_SUCCESS,
errorBuf: RustBuffer.init(
capacity: 0,
len: 0,
data: nil
)
)
}
}
private func rustCall<T>(_ callback: (UnsafeMutablePointer<RustCallStatus>) -> T) throws -> T {
try makeRustCall(callback, errorHandler: nil)
}
private func rustCallWithError<T>(
_ errorHandler: @escaping (RustBuffer) throws -> Error,
_ callback: (UnsafeMutablePointer<RustCallStatus>) -> T) throws -> T {
try makeRustCall(callback, errorHandler: errorHandler)
}
private func makeRustCall<T>(
_ callback: (UnsafeMutablePointer<RustCallStatus>) -> T,
errorHandler: ((RustBuffer) throws -> Error)?
) throws -> T {
uniffiEnsureInitialized()
var callStatus = RustCallStatus.init()
let returnedVal = callback(&callStatus)
try uniffiCheckCallStatus(callStatus: callStatus, errorHandler: errorHandler)
return returnedVal
}
private func uniffiCheckCallStatus(
callStatus: RustCallStatus,
errorHandler: ((RustBuffer) throws -> Error)?
) throws {
switch callStatus.code {
case CALL_SUCCESS:
return
case CALL_ERROR:
if let errorHandler = errorHandler {
throw try errorHandler(callStatus.errorBuf)
} else {
callStatus.errorBuf.deallocate()
throw UniffiInternalError.unexpectedRustCallError
}
case CALL_PANIC:
// When the rust code sees a panic, it tries to construct a RustBuffer
// with the message. But if that code panics, then it just sends back
// an empty buffer.
if callStatus.errorBuf.len > 0 {
throw UniffiInternalError.rustPanic(try FfiConverterString.lift(callStatus.errorBuf))
} else {
callStatus.errorBuf.deallocate()
throw UniffiInternalError.rustPanic("Rust panic")
}
case CALL_CANCELLED:
throw CancellationError()
default:
throw UniffiInternalError.unexpectedRustCallStatusCode
}
}
// Public interface members begin here.
fileprivate struct FfiConverterString: FfiConverter {
typealias SwiftType = String
typealias FfiType = RustBuffer
public static func lift(_ value: RustBuffer) throws -> String {
defer {
value.deallocate()
}
if value.data == nil {
return String()
}
let bytes = UnsafeBufferPointer<UInt8>(start: value.data!, count: Int(value.len))
return String(bytes: bytes, encoding: String.Encoding.utf8)!
}
public static func lower(_ value: String) -> RustBuffer {
return value.utf8CString.withUnsafeBufferPointer { ptr in
// The swift string gives us int8_t, we want uint8_t.
ptr.withMemoryRebound(to: UInt8.self) { ptr in
// The swift string gives us a trailing null byte, we don't want it.
let buf = UnsafeBufferPointer(rebasing: ptr.prefix(upTo: ptr.count - 1))
return RustBuffer.from(buf)
}
}
}
public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> String {
let len: Int32 = try readInt(&buf)
return String(bytes: try readBytes(&buf, count: Int(len)), encoding: String.Encoding.utf8)!
}
public static func write(_ value: String, into buf: inout [UInt8]) {
let len = Int32(value.utf8.count)
writeInt(&buf, len)
writeBytes(&buf, value.utf8)
}
}
fileprivate struct FfiConverterSequenceString: FfiConverterRustBuffer {
typealias SwiftType = [String]
public static func write(_ value: [String], into buf: inout [UInt8]) {
let len = Int32(value.count)
writeInt(&buf, len)
for item in value {
FfiConverterString.write(item, into: &buf)
}
}
public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [String] {
let len: Int32 = try readInt(&buf)
var seq = [String]()
seq.reserveCapacity(Int(len))
for _ in 0 ..< len {
seq.append(try FfiConverterString.read(from: &buf))
}
return seq
}
}
private let UNIFFI_RUST_FUTURE_POLL_READY: Int8 = 0
private let UNIFFI_RUST_FUTURE_POLL_MAYBE_READY: Int8 = 1
fileprivate func uniffiRustCallAsync<F, T>(
rustFutureFunc: () -> UnsafeMutableRawPointer,
pollFunc: (UnsafeMutableRawPointer, UnsafeMutableRawPointer) -> (),
completeFunc: (UnsafeMutableRawPointer, UnsafeMutablePointer<RustCallStatus>) -> F,
freeFunc: (UnsafeMutableRawPointer) -> (),
liftFunc: (F) throws -> T,
errorHandler: ((RustBuffer) throws -> Error)?
) async throws -> T {
// Make sure to call uniffiEnsureInitialized() since future creation doesn't have a
// RustCallStatus param, so doesn't use makeRustCall()
uniffiEnsureInitialized()
let rustFuture = rustFutureFunc()
defer {
freeFunc(rustFuture)
}
var pollResult: Int8;
repeat {
pollResult = await withUnsafeContinuation {
pollFunc(rustFuture, ContinuationHolder($0).toOpaque())
}
} while pollResult != UNIFFI_RUST_FUTURE_POLL_READY
return try liftFunc(makeRustCall(
{ completeFunc(rustFuture, $0) },
errorHandler: errorHandler
))
}
// Callback handlers for an async calls. These are invoked by Rust when the future is ready. They
// lift the return value or error and resume the suspended function.
fileprivate func uniffiFutureContinuationCallback(ptr: UnsafeMutableRawPointer, pollResult: Int8) {
ContinuationHolder.fromOpaque(ptr).resume(pollResult)
}
// Wraps UnsafeContinuation in a class so that we can use reference counting when passing it across
// the FFI
fileprivate class ContinuationHolder {
let continuation: UnsafeContinuation<Int8, Never>
init(_ continuation: UnsafeContinuation<Int8, Never>) {
self.continuation = continuation
}
func resume(_ pollResult: Int8) {
self.continuation.resume(returning: pollResult)
}
func toOpaque() -> UnsafeMutableRawPointer {
return Unmanaged<ContinuationHolder>.passRetained(self).toOpaque()
}
static func fromOpaque(_ ptr: UnsafeRawPointer) -> ContinuationHolder {
return Unmanaged<ContinuationHolder>.fromOpaque(ptr).takeRetainedValue()
}
}
fileprivate func uniffiInitContinuationCallback() {
ffi_mobile_rust_future_continuation_callback_set(uniffiFutureContinuationCallback)
}
public func auth(url: String, secretKey: String) async -> [String] {
return try! await uniffiRustCallAsync(
rustFutureFunc: {
uniffi_mobile_fn_func_auth(
FfiConverterString.lower(url),
FfiConverterString.lower(secretKey)
)
},
pollFunc: ffi_mobile_rust_future_poll_rust_buffer,
completeFunc: ffi_mobile_rust_future_complete_rust_buffer,
freeFunc: ffi_mobile_rust_future_free_rust_buffer,
liftFunc: FfiConverterSequenceString.lift,
errorHandler: nil
)
}
public func myexample() -> [String] {
return try! FfiConverterSequenceString.lift(
try! rustCall() {
uniffi_mobile_fn_func_myexample($0)
}
)
}
private enum InitializationResult {
case ok
case contractVersionMismatch
case apiChecksumMismatch
}
// Use a global variables to perform the versioning checks. Swift ensures that
// the code inside is only computed once.
private var initializationResult: InitializationResult {
// Get the bindings contract version from our ComponentInterface
let bindings_contract_version = 24
// Get the scaffolding contract version by calling the into the dylib
let scaffolding_contract_version = ffi_mobile_uniffi_contract_version()
if bindings_contract_version != scaffolding_contract_version {
return InitializationResult.contractVersionMismatch
}
if (uniffi_mobile_checksum_func_auth() != 55720) {
return InitializationResult.apiChecksumMismatch
}
if (uniffi_mobile_checksum_func_myexample() != 65225) {
return InitializationResult.apiChecksumMismatch
}
uniffiInitContinuationCallback()
return InitializationResult.ok
}
private func uniffiEnsureInitialized() {
switch initializationResult {
case .ok:
break
case .contractVersionMismatch:
fatalError("UniFFI contract version mismatch: try cleaning and rebuilding your project")
case .apiChecksumMismatch:
fatalError("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
}
}

194
rust/bindings/mobileFFI.h Normal file
View File

@@ -0,0 +1,194 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V4
#ifndef UNIFFI_SHARED_HEADER_V4
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V4
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V4
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
typedef struct RustBuffer
{
int32_t capacity;
int32_t len;
uint8_t *_Nullable data;
} RustBuffer;
typedef int32_t (*ForeignCallback)(uint64_t, int32_t, const uint8_t *_Nonnull, int32_t, RustBuffer *_Nonnull);
// Task defined in Rust that Swift executes
typedef void (*UniFfiRustTaskCallback)(const void * _Nullable, int8_t);
// Callback to execute Rust tasks using a Swift Task
//
// Args:
// executor: ForeignExecutor lowered into a size_t value
// delay: Delay in MS
// task: UniFfiRustTaskCallback to call
// task_data: data to pass the task callback
typedef int8_t (*UniFfiForeignExecutorCallback)(size_t, uint32_t, UniFfiRustTaskCallback _Nullable, const void * _Nullable);
typedef struct ForeignBytes
{
int32_t len;
const uint8_t *_Nullable data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
#endif // def UNIFFI_SHARED_H
// Continuation callback for UniFFI Futures
typedef void (*UniFfiRustFutureContinuation)(void * _Nonnull, int8_t);
// Scaffolding functions
void* _Nonnull uniffi_mobile_fn_func_auth(RustBuffer url, RustBuffer secret_key
);
RustBuffer uniffi_mobile_fn_func_myexample(RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_alloc(int32_t size, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_from_bytes(ForeignBytes bytes, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rustbuffer_free(RustBuffer buf, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_reserve(RustBuffer buf, int32_t additional, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_continuation_callback_set(UniFfiRustFutureContinuation _Nonnull callback
);
void ffi_mobile_rust_future_poll_u8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u8(void* _Nonnull handle
);
uint8_t ffi_mobile_rust_future_complete_u8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i8(void* _Nonnull handle
);
int8_t ffi_mobile_rust_future_complete_i8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u16(void* _Nonnull handle
);
uint16_t ffi_mobile_rust_future_complete_u16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i16(void* _Nonnull handle
);
int16_t ffi_mobile_rust_future_complete_i16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u32(void* _Nonnull handle
);
uint32_t ffi_mobile_rust_future_complete_u32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i32(void* _Nonnull handle
);
int32_t ffi_mobile_rust_future_complete_i32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u64(void* _Nonnull handle
);
uint64_t ffi_mobile_rust_future_complete_u64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i64(void* _Nonnull handle
);
int64_t ffi_mobile_rust_future_complete_i64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f32(void* _Nonnull handle
);
float ffi_mobile_rust_future_complete_f32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f64(void* _Nonnull handle
);
double ffi_mobile_rust_future_complete_f64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_pointer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_pointer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_pointer(void* _Nonnull handle
);
void*_Nonnull ffi_mobile_rust_future_complete_pointer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_rust_buffer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_rust_buffer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_rust_buffer(void* _Nonnull handle
);
RustBuffer ffi_mobile_rust_future_complete_rust_buffer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_void(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_complete_void(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
uint16_t uniffi_mobile_checksum_func_auth(void
);
uint16_t uniffi_mobile_checksum_func_myexample(void
);
uint32_t ffi_mobile_uniffi_contract_version(void
);

View File

@@ -0,0 +1,6 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
module mobileFFI {
header "mobileFFI.h"
export *
}

View File

@@ -0,0 +1,47 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>AvailableLibraries</key>
<array>
<dict>
<key>BinaryPath</key>
<string>libmobile.a</string>
<key>HeadersPath</key>
<string>Headers</string>
<key>LibraryIdentifier</key>
<string>ios-arm64</string>
<key>LibraryPath</key>
<string>libmobile.a</string>
<key>SupportedArchitectures</key>
<array>
<string>arm64</string>
</array>
<key>SupportedPlatform</key>
<string>ios</string>
</dict>
<dict>
<key>BinaryPath</key>
<string>libmobile.a</string>
<key>HeadersPath</key>
<string>Headers</string>
<key>LibraryIdentifier</key>
<string>ios-arm64-simulator</string>
<key>LibraryPath</key>
<string>libmobile.a</string>
<key>SupportedArchitectures</key>
<array>
<string>arm64</string>
</array>
<key>SupportedPlatform</key>
<string>ios</string>
<key>SupportedPlatformVariant</key>
<string>simulator</string>
</dict>
</array>
<key>CFBundlePackageType</key>
<string>XFWK</string>
<key>XCFrameworkFormatVersion</key>
<string>1.0</string>
</dict>
</plist>

View File

@@ -0,0 +1,194 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V4
#ifndef UNIFFI_SHARED_HEADER_V4
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V4
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V4
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
typedef struct RustBuffer
{
int32_t capacity;
int32_t len;
uint8_t *_Nullable data;
} RustBuffer;
typedef int32_t (*ForeignCallback)(uint64_t, int32_t, const uint8_t *_Nonnull, int32_t, RustBuffer *_Nonnull);
// Task defined in Rust that Swift executes
typedef void (*UniFfiRustTaskCallback)(const void * _Nullable, int8_t);
// Callback to execute Rust tasks using a Swift Task
//
// Args:
// executor: ForeignExecutor lowered into a size_t value
// delay: Delay in MS
// task: UniFfiRustTaskCallback to call
// task_data: data to pass the task callback
typedef int8_t (*UniFfiForeignExecutorCallback)(size_t, uint32_t, UniFfiRustTaskCallback _Nullable, const void * _Nullable);
typedef struct ForeignBytes
{
int32_t len;
const uint8_t *_Nullable data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
#endif // def UNIFFI_SHARED_H
// Continuation callback for UniFFI Futures
typedef void (*UniFfiRustFutureContinuation)(void * _Nonnull, int8_t);
// Scaffolding functions
void* _Nonnull uniffi_mobile_fn_func_auth(RustBuffer url, RustBuffer secret_key
);
RustBuffer uniffi_mobile_fn_func_myexample(RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_alloc(int32_t size, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_from_bytes(ForeignBytes bytes, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rustbuffer_free(RustBuffer buf, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_reserve(RustBuffer buf, int32_t additional, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_continuation_callback_set(UniFfiRustFutureContinuation _Nonnull callback
);
void ffi_mobile_rust_future_poll_u8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u8(void* _Nonnull handle
);
uint8_t ffi_mobile_rust_future_complete_u8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i8(void* _Nonnull handle
);
int8_t ffi_mobile_rust_future_complete_i8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u16(void* _Nonnull handle
);
uint16_t ffi_mobile_rust_future_complete_u16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i16(void* _Nonnull handle
);
int16_t ffi_mobile_rust_future_complete_i16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u32(void* _Nonnull handle
);
uint32_t ffi_mobile_rust_future_complete_u32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i32(void* _Nonnull handle
);
int32_t ffi_mobile_rust_future_complete_i32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u64(void* _Nonnull handle
);
uint64_t ffi_mobile_rust_future_complete_u64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i64(void* _Nonnull handle
);
int64_t ffi_mobile_rust_future_complete_i64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f32(void* _Nonnull handle
);
float ffi_mobile_rust_future_complete_f32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f64(void* _Nonnull handle
);
double ffi_mobile_rust_future_complete_f64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_pointer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_pointer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_pointer(void* _Nonnull handle
);
void*_Nonnull ffi_mobile_rust_future_complete_pointer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_rust_buffer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_rust_buffer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_rust_buffer(void* _Nonnull handle
);
RustBuffer ffi_mobile_rust_future_complete_rust_buffer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_void(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_complete_void(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
uint16_t uniffi_mobile_checksum_func_auth(void
);
uint16_t uniffi_mobile_checksum_func_myexample(void
);
uint32_t ffi_mobile_uniffi_contract_version(void
);

View File

@@ -0,0 +1,6 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
module mobileFFI {
header "mobileFFI.h"
export *
}

View File

@@ -0,0 +1,194 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
#pragma once
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
// The following structs are used to implement the lowest level
// of the FFI, and thus useful to multiple uniffied crates.
// We ensure they are declared exactly once, with a header guard, UNIFFI_SHARED_H.
#ifdef UNIFFI_SHARED_H
// We also try to prevent mixing versions of shared uniffi header structs.
// If you add anything to the #else block, you must increment the version suffix in UNIFFI_SHARED_HEADER_V4
#ifndef UNIFFI_SHARED_HEADER_V4
#error Combining helper code from multiple versions of uniffi is not supported
#endif // ndef UNIFFI_SHARED_HEADER_V4
#else
#define UNIFFI_SHARED_H
#define UNIFFI_SHARED_HEADER_V4
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
typedef struct RustBuffer
{
int32_t capacity;
int32_t len;
uint8_t *_Nullable data;
} RustBuffer;
typedef int32_t (*ForeignCallback)(uint64_t, int32_t, const uint8_t *_Nonnull, int32_t, RustBuffer *_Nonnull);
// Task defined in Rust that Swift executes
typedef void (*UniFfiRustTaskCallback)(const void * _Nullable, int8_t);
// Callback to execute Rust tasks using a Swift Task
//
// Args:
// executor: ForeignExecutor lowered into a size_t value
// delay: Delay in MS
// task: UniFfiRustTaskCallback to call
// task_data: data to pass the task callback
typedef int8_t (*UniFfiForeignExecutorCallback)(size_t, uint32_t, UniFfiRustTaskCallback _Nullable, const void * _Nullable);
typedef struct ForeignBytes
{
int32_t len;
const uint8_t *_Nullable data;
} ForeignBytes;
// Error definitions
typedef struct RustCallStatus {
int8_t code;
RustBuffer errorBuf;
} RustCallStatus;
// ⚠️ Attention: If you change this #else block (ending in `#endif // def UNIFFI_SHARED_H`) you *must* ⚠️
// ⚠️ increment the version suffix in all instances of UNIFFI_SHARED_HEADER_V4 in this file. ⚠️
#endif // def UNIFFI_SHARED_H
// Continuation callback for UniFFI Futures
typedef void (*UniFfiRustFutureContinuation)(void * _Nonnull, int8_t);
// Scaffolding functions
void* _Nonnull uniffi_mobile_fn_func_auth(RustBuffer url, RustBuffer secret_key
);
RustBuffer uniffi_mobile_fn_func_myexample(RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_alloc(int32_t size, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_from_bytes(ForeignBytes bytes, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rustbuffer_free(RustBuffer buf, RustCallStatus *_Nonnull out_status
);
RustBuffer ffi_mobile_rustbuffer_reserve(RustBuffer buf, int32_t additional, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_continuation_callback_set(UniFfiRustFutureContinuation _Nonnull callback
);
void ffi_mobile_rust_future_poll_u8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u8(void* _Nonnull handle
);
uint8_t ffi_mobile_rust_future_complete_u8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i8(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i8(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i8(void* _Nonnull handle
);
int8_t ffi_mobile_rust_future_complete_i8(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u16(void* _Nonnull handle
);
uint16_t ffi_mobile_rust_future_complete_u16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i16(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i16(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i16(void* _Nonnull handle
);
int16_t ffi_mobile_rust_future_complete_i16(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u32(void* _Nonnull handle
);
uint32_t ffi_mobile_rust_future_complete_u32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i32(void* _Nonnull handle
);
int32_t ffi_mobile_rust_future_complete_i32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_u64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_u64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_u64(void* _Nonnull handle
);
uint64_t ffi_mobile_rust_future_complete_u64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_i64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_i64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_i64(void* _Nonnull handle
);
int64_t ffi_mobile_rust_future_complete_i64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f32(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f32(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f32(void* _Nonnull handle
);
float ffi_mobile_rust_future_complete_f32(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_f64(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_f64(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_f64(void* _Nonnull handle
);
double ffi_mobile_rust_future_complete_f64(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_pointer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_pointer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_pointer(void* _Nonnull handle
);
void*_Nonnull ffi_mobile_rust_future_complete_pointer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_rust_buffer(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_rust_buffer(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_rust_buffer(void* _Nonnull handle
);
RustBuffer ffi_mobile_rust_future_complete_rust_buffer(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
void ffi_mobile_rust_future_poll_void(void* _Nonnull handle, void* _Nonnull uniffi_callback
);
void ffi_mobile_rust_future_cancel_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_free_void(void* _Nonnull handle
);
void ffi_mobile_rust_future_complete_void(void* _Nonnull handle, RustCallStatus *_Nonnull out_status
);
uint16_t uniffi_mobile_checksum_func_auth(void
);
uint16_t uniffi_mobile_checksum_func_myexample(void
);
uint32_t ffi_mobile_uniffi_contract_version(void
);

View File

@@ -0,0 +1,6 @@
// This file was autogenerated by some hot garbage in the `uniffi` crate.
// Trust me, you don't want to mess with it!
module mobileFFI {
header "mobileFFI.h"
export *
}

Binary file not shown.

2811
rust/pubky/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

18
rust/pubky/Cargo.toml Normal file
View File

@@ -0,0 +1,18 @@
[workspace]
members = [
"pubky",
"pubky-*",
"examples/authz/authenticator"
]
# See: https://github.com/rust-lang/rust/issues/90148#issuecomment-949194352
resolver = "2"
[workspace.dependencies]
pkarr = { git = "https://github.com/Pubky/pkarr", branch = "v3", package = "pkarr", features = ["async"] }
serde = { version = "^1.0.209", features = ["derive"] }
[profile.release]
lto = true
opt-level = 'z'

21
rust/pubky/LICENSE Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2023
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

8
rust/pubky/README.md Normal file
View File

@@ -0,0 +1,8 @@
# Pubky
> The Web, long centralized, must decentralize; Long decentralized, must centralize.
> [!WARNING]
> Pubky is still under heavy development and should be considered an alpha software.
>
> Features might be added, removed, or changed. Data might be lost.

View File

@@ -0,0 +1,24 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?

View File

@@ -0,0 +1,26 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/pubky.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Pubky Auth Demo</title>
<link rel="stylesheet" href="./src/index.css" />
<script type="module">
import "@synonymdev/pubky"
</script>
<script type="module" src="/src/pubky-auth-widget.js"></script>
</head>
<body>
<pubky-auth-widget
relay="https://demo.httprelay.io/link/"
caps="/pub/pubky.app/:rw,/pub/example.com/nested:rw"
>
</pubky-auth-widget>
<main>
<h1>Third Party app!</h1>
<p>this is a demo for using Pubky Auth in an unhosted (no backend) app.</p>
</main>
</body>
</html>

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,20 @@
{
"name": "pubky-auth-3rd-party",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"start": "npm run dev",
"dev": "vite --host --open",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"@synonymdev/pubky": "file:../../../pubky/pkg",
"lit": "^3.2.0",
"qrcode": "^1.5.4"
},
"devDependencies": {
"vite": "^5.4.2"
}
}

View File

@@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" version="1.2" viewBox="0 0 1511 1511" width="1511" height="1511"><style>.a{fill:#fff}</style><path d="m269 0h973c148.6 0 269 120.4 269 269v973c0 148.6-120.4 269-269 269h-973c-148.6 0-269-120.4-269-269v-973c0-148.6 120.4-269 269-269z"/><path fill-rule="evenodd" class="a" d="m630.1 1064.3l14.9-59.6c50.5-27.7 90.8-71.7 113.7-124.8-47.3 51.2-115.2 83.3-190.5 83.3-51.9 0-100.1-15.1-140.4-41.2l-39.8 142.3c0 0-199.3 0-200 0l162.4-619.3h210.5l-0.1 0.1q3.7-0.1 7.4-0.1c77.6 0 147.2 34 194.7 88l22-88h201.9l-46.9 180.8 183.7-180.8h248.8l-322.8 332.6 223.9 286.7h-290.8l-116.6-154.6-40.3 154.6c0 0-195 0-195.7 0z"/></svg>

After

Width:  |  Height:  |  Size: 655 B

View File

@@ -0,0 +1,48 @@
:root {
font-family: system-ui, -apple-system, BlinkMacSystemFont, 'Segoe UI',
Roboto, Oxygen, Ubuntu, Cantarell, 'Open Sans', 'Helvetica Neue', sans-serif;
color: white;
background: radial-gradient(
circle,
transparent 20%,
#151718 20%,
#151718 80%,
transparent 80%,
transparent
),
radial-gradient(
circle,
transparent 20%,
#151718 20%,
#151718 80%,
transparent 80%,
transparent
)
25px 25px,
linear-gradient(#202020 1px, transparent 2px) 0 -1px,
linear-gradient(90deg, #202020 1px, #151718 2px) -1px 0;
background-size: 50px 50px, 50px 50px, 25px 25px, 25px 25px;
}
body {
margin: 0;
display: flex;
place-items: center;
min-width: 20rem;
min-height: 100vh;
font-family: var(--font-family);
}
h1 {
font-weight: bold;
font-size: 3.2rem;
line-height: 1.1;
}
main {
max-width: 80rem;
margin: 0 auto;
padding: 2rem;
text-align: center;
}

View File

@@ -0,0 +1,336 @@
import { LitElement, css, html } from 'lit'
import { createRef, ref } from 'lit/directives/ref.js';
import QRCode from 'qrcode'
const DEFAULT_HTTP_RELAY = "https://demo.httprelay.io/link"
/**
*/
export class PubkyAuthWidget extends LitElement {
static get properties() {
return {
/**
* Relay endpoint for the widget to receive Pubky AuthTokens
*
* Internally, a random channel ID will be generated and a
* GET request made for `${realy url}/${channelID}`
*
* If no relay is passed, the widget will use a default relay:
* https://demo.httprelay.io/link
*/
relay: { type: String },
/**
* Capabilities requested or this application encoded as a string.
*/
caps: { type: String },
/**
* Widget's state (open or closed)
*/
open: { type: Boolean },
/**
* Show "copied to clipboard" note
*/
showCopied: { type: Boolean },
}
}
canvasRef = createRef();
constructor() {
if (!window.pubky) {
throw new Error("window.pubky is unavailable, make sure to import `@synonymdev/pubky` before this web component.")
}
super()
this.open = false;
// TODO: allow using mainnet
/** @type {import("@synonymdev/pubky").PubkyClient} */
this.pubkyClient = window.pubky.PubkyClient.testnet();
}
connectedCallback() {
super.connectedCallback()
let [url, promise] = this.pubkyClient.authRequest(this.relay || DEFAULT_HTTP_RELAY, this.caps);
promise.then(session => {
console.log({ id: session.pubky().z32(), capabilities: session.capabilities() })
alert(`Successfully signed in to ${session.pubky().z32()} with capabilities: ${session.capabilities().join(",")}`)
}).catch(e => {
console.error(e)
})
// let keypair = pubky.Keypair.random();
// const Homeserver = pubky.PublicKey.from('8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo')
// this.pubkyClient.signup(keypair, Homeserver).then(() => {
// this.pubkyClient.sendAuthToken(keypair, url)
// })
this.authUrl = url
}
render() {
return html`
<div
id="widget"
class=${this.open ? "open" : ""}
>
<button class="header" @click=${this._switchOpen}>
<svg id="pubky-icon" version="1.2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1511 1511"><path fill-rule="evenodd" d="m636.3 1066.7 14.9-59.7c50.5-27.7 90.8-71.7 113.7-124.9-47.3 51.3-115.2 83.4-190.6 83.4-51.9 0-100.1-15.1-140.5-41.2L394 1066.7H193.9L356.4 447H567l-.1.1q3.7-.1 7.4-.1c77.7 0 147.3 34 194.8 88l22-88h202.1l-47 180.9L1130 447h249l-323 332.8 224 286.9H989L872.4 912l-40.3 154.7H636.3z" style="fill:#fff"/></svg>
<span class="text">
Pubky Auth
</span>
</button>
<div class="line"></div>
<div id="widget-content">
<p>Scan or copy Pubky auth URL</p>
<div class="card">
<canvas id="qr" ${ref(this._setQr)}></canvas>
</div>
<button class="card url" @click=${this._copyToClipboard}>
<div class="copied ${this.showCopied ? "show" : ""}">Copied to Clipboard</div>
<p>${this.authUrl}</p>
<svg width="14" height="16" viewBox="0 0 14 16" fill="none" xmlns="http://www.w3.org/2000/svg"><rect width="10" height="12" rx="2" fill="white"></rect><rect x="3" y="3" width="10" height="12" rx="2" fill="white" stroke="#3B3B3B"></rect></svg>
</button>
</div>
</div>
`
}
_setQr(canvas) {
QRCode.toCanvas(canvas, this.authUrl, {
margin: 2,
scale: 8,
color: {
light: '#fff',
dark: '#000',
},
});
}
_switchOpen() {
this.open = !this.open
}
async _copyToClipboard() {
try {
await navigator.clipboard.writeText(this.authUrl);
this.showCopied = true;
setTimeout(() => { this.showCopied = false }, 1000)
} catch (error) {
console.error('Failed to copy text: ', error);
}
}
render() {
return html`
<div
id="widget"
class=${this.open ? "open" : ""}
>
<button class="header" @click=${this._switchOpen}>
<svg id="pubky-icon" version="1.2" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 1511 1511"><path fill-rule="evenodd" d="m636.3 1066.7 14.9-59.7c50.5-27.7 90.8-71.7 113.7-124.9-47.3 51.3-115.2 83.4-190.6 83.4-51.9 0-100.1-15.1-140.5-41.2L394 1066.7H193.9L356.4 447H567l-.1.1q3.7-.1 7.4-.1c77.7 0 147.3 34 194.8 88l22-88h202.1l-47 180.9L1130 447h249l-323 332.8 224 286.9H989L872.4 912l-40.3 154.7H636.3z" style="fill:#fff"/></svg>
<span class="text">
Pubky Auth
</span>
</button>
<div class="line"></div>
<div id="widget-content">
<p>Scan or copy Pubky auth URL</p>
<div class="card">
<canvas id="qr" ${ref(this._setQr)}></canvas>
</div>
<button class="card url" @click=${this._copyToClipboard}>
<div class="copied ${this.showCopied ? "show" : ""}">Copied to Clipboard</div>
<p>${this.authUrl}</p>
<svg width="14" height="16" viewBox="0 0 14 16" fill="none" xmlns="http://www.w3.org/2000/svg"><rect width="10" height="12" rx="2" fill="white"></rect><rect x="3" y="3" width="10" height="12" rx="2" fill="white" stroke="#3B3B3B"></rect></svg>
</button>
</div>
</div>
`
}
static get styles() {
return css`
* {
box-sizing: border-box;
}
:host {
--full-width: 22rem;
--full-height: 31rem;
--header-height: 3rem;
--closed-width: 3rem;
}
a {
text-decoration: none;
}
button {
padding: 0;
background: none;
border: none;
color: inherit;
cursor: pointer;
}
p {
margin: 0;
}
/** End reset */
#widget {
color: white;
position: fixed;
top: 1rem;
right: 1rem;
background-color:red;
z-index: 99999;
overflow: hidden;
background: rgba(43, 43, 43, .7372549019607844);
border: 1px solid #3c3c3c;
box-shadow: 0 10px 34px -10px rgba(236, 243, 222, .05);
border-radius: 8px;
-webkit-backdrop-filter: blur(8px);
backdrop-filter: blur(8px);
width: var(--closed-width);
height: var(--header-height);
will-change: height,width;
transition-property: height, width;
transition-duration: 80ms;
transition-timing-function: ease-in;
}
#widget.open{
width: var(--full-width);
height: var(--full-height);
}
.header {
height: var(--header-height);
display: flex;
justify-content: center;
align-items: center;
}
#widget
.header .text {
display: none;
font-weight: bold;
}
#widget.open
.header .text {
display: block
}
#widget.open
.header {
width: var(--full-width);
justify-content: center;
}
#pubky-icon {
height: 100%;
width: 100%;
}
#widget.open
#pubky-icon {
width: var(--header-height);
height: 74%;
}
#widget-content{
width: var(--full-width);
padding: 0 1rem
}
#widget p {
font-size: .87rem;
line-height: 1rem;
text-align: center;
color: #fff;
opacity: .5;
/* Fix flash wrap in open animation */
text-wrap: nowrap;
}
#qr {
width: 18em !important;
height: 18em !important;
}
.card {
position: relative;
background: #3b3b3b;
border-radius: 5px;
padding: 1rem;
margin-top: 1rem;
display: flex;
justify-content: center;
align-items: center;
}
.card.url {
padding: .625rem;
justify-content: space-between;
max-width:100%;
}
.url p {
display: flex;
align-items: center;
line-height: 1!important;
width: 93%;
overflow: hidden;
text-overflow: ellipsis;
text-wrap: nowrap;
}
.line {
height: 1px;
background-color: #3b3b3b;
flex: 1 1;
margin-bottom: 1rem;
}
.copied {
will-change: opacity;
transition-property: opacity;
transition-duration: 80ms;
transition-timing-function: ease-in;
opacity: 0;
position: absolute;
right: 0;
top: -1.6rem;
font-size: 0.9em;
background: rgb(43 43 43 / 98%);
padding: .5rem;
border-radius: .3rem;
color: #ddd;
}
.copied.show {
opacity:1
}
`
}
}
window.customElements.define('pubky-auth-widget', PubkyAuthWidget)

View File

@@ -0,0 +1,29 @@
# Pubky Auth Example
This example shows 3rd party authorization in Pubky.
It consists of 2 parts:
1. [3rd party app](./3rd-party-app): A web component showing the how to implement a Pubky Auth widget.
2. [Authenticator CLI](./authenticator): A CLI showing the authenticator (key chain) asking user for consent and generating the AuthToken.
## Usage
First you need to be running a local testnet Homeserver, in the root of this repo run
```bash
cargo run --bin pubky_homeserver -- --testnet
```
Run the frontend of the 3rd party app
```bash
cd ./3rd-party-app
npm start
```
Copy the Pubky Auth URL from the frontend.
Finally run the CLI to paste the Pubky Auth in.
You should see the frontend reacting by showing the success of authorization and session details.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,14 @@
[package]
name = "authenticator"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow = "1.0.86"
base64 = "0.22.1"
clap = { version = "4.5.16", features = ["derive"] }
pubky = { version = "0.1.0", path = "../../../pubky" }
pubky-common = { version = "0.1.0", path = "../../../pubky-common" }
rpassword = "7.3.1"
tokio = { version = "1.40.0", features = ["macros", "rt-multi-thread"] }
url = "2.5.2"

View File

@@ -0,0 +1,80 @@
use anyhow::Result;
use clap::Parser;
use pubky::PubkyClient;
use std::path::PathBuf;
use url::Url;
use pubky_common::{capabilities::Capability, crypto::PublicKey};
/// local testnet HOMESERVER
const HOMESERVER: &str = "8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo";
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Cli {
/// Path to a recovery_file of the Pubky you want to sign in with
recovery_file: PathBuf,
/// Pubky Auth url
url: Url,
}
#[tokio::main]
async fn main() -> Result<()> {
let cli = Cli::parse();
let recovery_file = std::fs::read(&cli.recovery_file)?;
println!("\nSuccessfully opened recovery file");
let url = cli.url;
let caps = url
.query_pairs()
.filter_map(|(key, value)| {
if key == "caps" {
return Some(
value
.split(',')
.filter_map(|cap| Capability::try_from(cap).ok())
.collect::<Vec<_>>(),
);
};
None
})
.next()
.unwrap_or_default();
if !caps.is_empty() {
println!("\nRequired Capabilities:");
}
for cap in &caps {
println!(" {} : {:?}", cap.scope, cap.actions);
}
// === Consent form ===
println!("\nEnter your recovery_file's passphrase to confirm:");
let passphrase = rpassword::read_password()?;
let keypair = pubky_common::recovery_file::decrypt_recovery_file(&recovery_file, &passphrase)?;
println!("Successfully decrypted recovery file...");
println!("PublicKey: {}", keypair.public_key());
let client = PubkyClient::testnet();
// For the purposes of this demo, we need to make sure
// the user has an account on the local homeserver.
if client.signin(&keypair).await.is_err() {
client
.signup(&keypair, &PublicKey::try_from(HOMESERVER).unwrap())
.await?;
};
println!("Sending AuthToken to the 3rd party app...");
client.send_auth_token(&keypair, url).await?;
Ok(())
}

View File

@@ -0,0 +1,33 @@
[package]
name = "pubky-common"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
base32 = "0.5.0"
blake3 = "1.5.1"
ed25519-dalek = "2.1.1"
once_cell = "1.19.0"
pkarr = { workspace = true }
rand = "0.8.5"
thiserror = "1.0.60"
postcard = { version = "1.0.8", features = ["alloc"] }
crypto_secretbox = { version = "0.1.1", features = ["std"] }
argon2 = { version = "0.5.3", features = ["std"] }
serde = { workspace = true, optional = true }
[target.'cfg(target_arch = "wasm32")'.dependencies]
js-sys = "0.3.69"
[dev-dependencies]
postcard = "1.0.8"
[features]
serde = ["dep:serde", "ed25519-dalek/serde", "pkarr/serde"]
full = ['serde']
default = ['full']

View File

@@ -0,0 +1,279 @@
//! Client-server Authentication using signed timesteps
use std::sync::{Arc, Mutex};
use serde::{Deserialize, Serialize};
use crate::{
capabilities::{Capabilities, Capability},
crypto::{Keypair, PublicKey, Signature},
namespaces::PUBKY_AUTH,
timestamp::Timestamp,
};
// 30 seconds
const TIME_INTERVAL: u64 = 30 * 1_000_000;
const CURRENT_VERSION: u8 = 0;
// 45 seconds in the past or the future
const TIMESTAMP_WINDOW: i64 = 45 * 1_000_000;
#[derive(Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthToken {
/// Signature over the token.
signature: Signature,
/// A namespace to ensure this signature can't be used for any
/// other purposes that share the same message structurea by accident.
namespace: [u8; 10],
/// Version of the [AuthToken], in case we need to upgrade it to support unforseen usecases.
///
/// Version 0:
/// - Signer is implicitly the same as the root keypair for
/// the [AuthToken::pubky], without any delegation.
/// - Capabilities are only meant for resoucres on the homeserver.
version: u8,
/// Timestamp
timestamp: Timestamp,
/// The [PublicKey] of the owner of the resources being accessed by this token.
pubky: PublicKey,
// Variable length capabilities
capabilities: Capabilities,
}
impl AuthToken {
pub fn sign(keypair: &Keypair, capabilities: impl Into<Capabilities>) -> Self {
let timestamp = Timestamp::now();
let mut token = Self {
signature: Signature::from_bytes(&[0; 64]),
namespace: *PUBKY_AUTH,
version: 0,
timestamp,
pubky: keypair.public_key(),
capabilities: capabilities.into(),
};
let serialized = token.serialize();
token.signature = keypair.sign(&serialized[65..]);
token
}
pub fn capabilities(&self) -> &[Capability] {
&self.capabilities.0
}
pub fn verify(bytes: &[u8]) -> Result<Self, Error> {
if bytes[75] > CURRENT_VERSION {
return Err(Error::UnknownVersion);
}
let token = AuthToken::deserialize(bytes)?;
match token.version {
0 => {
let now = Timestamp::now();
// Chcek timestamp;
let diff = token.timestamp.difference(&now);
if diff > TIMESTAMP_WINDOW {
return Err(Error::TooFarInTheFuture);
}
if diff < -TIMESTAMP_WINDOW {
return Err(Error::Expired);
}
token
.pubky
.verify(AuthToken::signable(token.version, bytes), &token.signature)
.map_err(|_| Error::InvalidSignature)?;
Ok(token)
}
_ => unreachable!(),
}
}
pub fn serialize(&self) -> Vec<u8> {
postcard::to_allocvec(self).unwrap()
}
pub fn deserialize(bytes: &[u8]) -> Result<Self, Error> {
Ok(postcard::from_bytes(bytes)?)
}
pub fn pubky(&self) -> &PublicKey {
&self.pubky
}
/// A unique ID for this [AuthToken], which is a concatenation of
/// [AuthToken::pubky] and [AuthToken::timestamp].
///
/// Assuming that [AuthToken::timestamp] is unique for every [AuthToken::pubky].
fn id(version: u8, bytes: &[u8]) -> Box<[u8]> {
match version {
0 => bytes[75..115].into(),
_ => unreachable!(),
}
}
fn signable(version: u8, bytes: &[u8]) -> &[u8] {
match version {
0 => bytes[65..].into(),
_ => unreachable!(),
}
}
}
#[derive(Debug, Clone, Default)]
/// Keeps track of used AuthToken until they expire.
pub struct AuthVerifier {
seen: Arc<Mutex<Vec<Box<[u8]>>>>,
}
impl AuthVerifier {
pub fn verify(&self, bytes: &[u8]) -> Result<AuthToken, Error> {
self.gc();
let token = AuthToken::verify(bytes)?;
let mut seen = self.seen.lock().unwrap();
let id = AuthToken::id(token.version, bytes);
match seen.binary_search_by(|element| element.cmp(&id)) {
Ok(_) => Err(Error::AlreadyUsed),
Err(index) => {
seen.insert(index, id);
Ok(token)
}
}
}
// === Private Methods ===
/// Remove all tokens older than two time intervals in the past.
fn gc(&self) {
let threshold = ((Timestamp::now().into_inner() / TIME_INTERVAL) - 2).to_be_bytes();
let mut inner = self.seen.lock().unwrap();
match inner.binary_search_by(|element| element[0..8].cmp(&threshold)) {
Ok(index) | Err(index) => {
inner.drain(0..index);
}
}
}
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum Error {
#[error("Unknown version")]
UnknownVersion,
#[error("AuthToken has a timestamp that is more than 45 seconds in the future")]
TooFarInTheFuture,
#[error("AuthToken has a timestamp that is more than 45 seconds in the past")]
Expired,
#[error("Invalid Signature")]
InvalidSignature,
#[error(transparent)]
Postcard(#[from] postcard::Error),
#[error("AuthToken already used")]
AlreadyUsed,
}
#[cfg(test)]
mod tests {
use crate::{
auth::TIMESTAMP_WINDOW, capabilities::Capability, crypto::Keypair, timestamp::Timestamp,
};
use super::*;
#[test]
fn v0_id_signable() {
let signer = Keypair::random();
let capabilities = vec![Capability::root()];
let token = AuthToken::sign(&signer, capabilities.clone());
let serialized = &token.serialize();
let mut id = vec![];
id.extend_from_slice(&token.timestamp.to_bytes());
id.extend_from_slice(signer.public_key().as_bytes());
assert_eq!(AuthToken::id(token.version, serialized), id.into());
assert_eq!(
AuthToken::signable(token.version, serialized),
&serialized[65..]
)
}
#[test]
fn sign_verify() {
let signer = Keypair::random();
let capabilities = vec![Capability::root()];
let verifier = AuthVerifier::default();
let token = AuthToken::sign(&signer, capabilities.clone());
let serialized = &token.serialize();
verifier.verify(serialized).unwrap();
assert_eq!(token.capabilities, capabilities.into());
}
#[test]
fn expired() {
let signer = Keypair::random();
let capabilities = Capabilities(vec![Capability::root()]);
let verifier = AuthVerifier::default();
let timestamp = (&Timestamp::now()) - (TIMESTAMP_WINDOW as u64);
let mut signable = vec![];
signable.extend_from_slice(signer.public_key().as_bytes());
signable.extend_from_slice(&postcard::to_allocvec(&capabilities).unwrap());
let signature = signer.sign(&signable);
let token = AuthToken {
signature,
namespace: *PUBKY_AUTH,
version: 0,
timestamp,
pubky: signer.public_key(),
capabilities,
};
let serialized = token.serialize();
let result = verifier.verify(&serialized);
assert_eq!(result, Err(Error::Expired));
}
#[test]
fn already_used() {
let signer = Keypair::random();
let capabilities = vec![Capability::root()];
let verifier = AuthVerifier::default();
let token = AuthToken::sign(&signer, capabilities.clone());
let serialized = &token.serialize();
verifier.verify(serialized).unwrap();
assert_eq!(token.capabilities, capabilities.into());
assert_eq!(verifier.verify(serialized), Err(Error::AlreadyUsed));
}
}

View File

@@ -0,0 +1,237 @@
use std::fmt::Display;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Capability {
pub scope: String,
pub actions: Vec<Action>,
}
impl Capability {
/// Create a root [Capability] at the `/` path with all the available [PubkyAbility]
pub fn root() -> Self {
Capability {
scope: "/".to_string(),
actions: vec![Action::Read, Action::Write],
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Action {
/// Can read the scope at the specified path (GET requests).
Read,
/// Can write to the scope at the specified path (PUT/POST/DELETE requests).
Write,
/// Unknown ability
Unknown(char),
}
impl From<&Action> for char {
fn from(value: &Action) -> Self {
match value {
Action::Read => 'r',
Action::Write => 'w',
Action::Unknown(char) => char.to_owned(),
}
}
}
impl TryFrom<char> for Action {
type Error = Error;
fn try_from(value: char) -> Result<Self, Error> {
match value {
'r' => Ok(Self::Read),
'w' => Ok(Self::Write),
_ => Err(Error::InvalidAction),
}
}
}
impl Display for Capability {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}:{}",
self.scope,
self.actions.iter().map(char::from).collect::<String>()
)
}
}
impl TryFrom<String> for Capability {
type Error = Error;
fn try_from(value: String) -> Result<Self, Error> {
value.as_str().try_into()
}
}
impl TryFrom<&str> for Capability {
type Error = Error;
fn try_from(value: &str) -> Result<Self, Error> {
if value.matches(':').count() != 1 {
return Err(Error::InvalidFormat);
}
if !value.starts_with('/') {
return Err(Error::InvalidScope);
}
let actions_str = value.rsplit(':').next().unwrap_or("");
let mut actions = Vec::new();
for char in actions_str.chars() {
let ability = Action::try_from(char)?;
match actions.binary_search_by(|element| char::from(element).cmp(&char)) {
Ok(_) => {}
Err(index) => {
actions.insert(index, ability);
}
}
}
let scope = value[0..value.len() - actions_str.len() - 1].to_string();
Ok(Capability { scope, actions })
}
}
impl Serialize for Capability {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let string = self.to_string();
string.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Capability {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let string: String = Deserialize::deserialize(deserializer)?;
string.try_into().map_err(serde::de::Error::custom)
}
}
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
pub enum Error {
#[error("Capability: Invalid scope: does not start with `/`")]
InvalidScope,
#[error("Capability: Invalid format should be <scope>:<abilities>")]
InvalidFormat,
#[error("Capability: Invalid Action")]
InvalidAction,
#[error("Capabilities: Invalid capabilities format")]
InvalidCapabilities,
}
#[derive(Clone, Default, Debug, PartialEq, Eq)]
/// A wrapper around `Vec<Capability>` to enable serialization without
/// a varint. Useful when [Capabilities] are at the end of a struct.
pub struct Capabilities(pub Vec<Capability>);
impl Capabilities {
pub fn contains(&self, capability: &Capability) -> bool {
self.0.contains(capability)
}
}
impl From<Vec<Capability>> for Capabilities {
fn from(value: Vec<Capability>) -> Self {
Self(value)
}
}
impl From<Capabilities> for Vec<Capability> {
fn from(value: Capabilities) -> Self {
value.0
}
}
impl TryFrom<&str> for Capabilities {
type Error = Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
let mut caps = vec![];
for s in value.split(',') {
if let Ok(cap) = Capability::try_from(s) {
caps.push(cap);
};
}
Ok(Capabilities(caps))
}
}
impl Display for Capabilities {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let string = self
.0
.iter()
.map(|c| c.to_string())
.collect::<Vec<_>>()
.join(",");
write!(f, "{}", string)
}
}
impl Serialize for Capabilities {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.to_string().serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Capabilities {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let string: String = Deserialize::deserialize(deserializer)?;
let mut caps = vec![];
for s in string.split(',') {
if let Ok(cap) = Capability::try_from(s) {
caps.push(cap);
};
}
Ok(Capabilities(caps))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn pubky_caps() {
let cap = Capability {
scope: "/pub/pubky.app/".to_string(),
actions: vec![Action::Read, Action::Write],
};
// Read and write withing directory `/pub/pubky.app/`.
let expected_string = "/pub/pubky.app/:rw";
assert_eq!(cap.to_string(), expected_string);
assert_eq!(Capability::try_from(expected_string), Ok(cap))
}
}

View File

@@ -0,0 +1,71 @@
use crypto_secretbox::{
aead::{Aead, AeadCore, KeyInit, OsRng},
XSalsa20Poly1305,
};
use rand::prelude::Rng;
pub use pkarr::{Keypair, PublicKey};
pub use ed25519_dalek::Signature;
pub type Hash = blake3::Hash;
pub use blake3::hash;
pub use blake3::Hasher;
pub fn random_hash() -> Hash {
let mut rng = rand::thread_rng();
Hash::from_bytes(rng.gen())
}
pub fn random_bytes<const N: usize>() -> [u8; N] {
let mut rng = rand::thread_rng();
let mut arr = [0u8; N];
#[allow(clippy::needless_range_loop)]
for i in 0..N {
arr[i] = rng.gen();
}
arr
}
pub fn encrypt(plain_text: &[u8], encryption_key: &[u8; 32]) -> Result<Vec<u8>, Error> {
let cipher = XSalsa20Poly1305::new(encryption_key.into());
let nonce = XSalsa20Poly1305::generate_nonce(&mut OsRng); // unique per message
let ciphertext = cipher.encrypt(&nonce, plain_text)?;
let mut out: Vec<u8> = Vec::with_capacity(nonce.len() + ciphertext.len());
out.extend_from_slice(nonce.as_slice());
out.extend_from_slice(&ciphertext);
Ok(out)
}
pub fn decrypt(bytes: &[u8], encryption_key: &[u8; 32]) -> Result<Vec<u8>, Error> {
let cipher = XSalsa20Poly1305::new(encryption_key.into());
Ok(cipher.decrypt(bytes[..24].into(), &bytes[24..])?)
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error(transparent)]
SecretBox(#[from] crypto_secretbox::Error),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encrypt_decrypt() {
let plain_text = "Plain text!";
let encryption_key = [0; 32];
let encrypted = encrypt(plain_text.as_bytes(), &encryption_key).unwrap();
let decrypted = decrypt(&encrypted, &encryption_key).unwrap();
assert_eq!(decrypted, plain_text.as_bytes())
}
}

View File

@@ -0,0 +1,7 @@
pub mod auth;
pub mod capabilities;
pub mod crypto;
pub mod namespaces;
pub mod recovery_file;
pub mod session;
pub mod timestamp;

View File

@@ -0,0 +1 @@
pub const PUBKY_AUTH: &[u8; 10] = b"PUBKY:AUTH";

View File

@@ -0,0 +1,102 @@
use argon2::Argon2;
use pkarr::Keypair;
use crate::crypto::{decrypt, encrypt};
static SPEC_NAME: &str = "recovery";
static SPEC_LINE: &str = "pubky.org/recovery";
pub fn decrypt_recovery_file(recovery_file: &[u8], passphrase: &str) -> Result<Keypair, Error> {
let encryption_key = recovery_file_encryption_key_from_passphrase(passphrase)?;
let newline_index = recovery_file
.iter()
.position(|&r| r == 10)
.ok_or(())
.map_err(|_| Error::RecoveryFileMissingSpecLine)?;
let spec_line = &recovery_file[..newline_index];
if !(spec_line.starts_with(SPEC_LINE.as_bytes())
|| spec_line.starts_with(b"pkarr.org/recovery"))
{
return Err(Error::RecoveryFileVersionNotSupported);
}
let encrypted = &recovery_file[newline_index + 1..];
if encrypted.is_empty() {
return Err(Error::RecoverFileMissingEncryptedSecretKey);
};
let decrypted = decrypt(encrypted, &encryption_key)?;
let length = decrypted.len();
let secret_key: [u8; 32] = decrypted
.try_into()
.map_err(|_| Error::RecoverFileInvalidSecretKeyLength(length))?;
Ok(Keypair::from_secret_key(&secret_key))
}
pub fn create_recovery_file(keypair: &Keypair, passphrase: &str) -> Result<Vec<u8>, Error> {
let encryption_key = recovery_file_encryption_key_from_passphrase(passphrase)?;
let secret_key = keypair.secret_key();
let encrypted_secret_key = encrypt(&secret_key, &encryption_key)?;
let mut out = Vec::with_capacity(SPEC_LINE.len() + 1 + encrypted_secret_key.len());
out.extend_from_slice(SPEC_LINE.as_bytes());
out.extend_from_slice(b"\n");
out.extend_from_slice(&encrypted_secret_key);
Ok(out)
}
fn recovery_file_encryption_key_from_passphrase(passphrase: &str) -> Result<[u8; 32], Error> {
let argon2id = Argon2::default();
let mut out = [0; 32];
argon2id.hash_password_into(passphrase.as_bytes(), SPEC_NAME.as_bytes(), &mut out)?;
Ok(out)
}
#[derive(thiserror::Error, Debug)]
pub enum Error {
// === Recovery file ==
#[error("Recovery file should start with a spec line, followed by a new line character")]
RecoveryFileMissingSpecLine,
#[error("Recovery file should start with a spec line, followed by a new line character")]
RecoveryFileVersionNotSupported,
#[error("Recovery file should contain an encrypted secret key after the new line character")]
RecoverFileMissingEncryptedSecretKey,
#[error("Recovery file encrypted secret key should be 32 bytes, got {0}")]
RecoverFileInvalidSecretKeyLength(usize),
#[error(transparent)]
Argon(#[from] argon2::Error),
#[error(transparent)]
Crypto(#[from] crate::crypto::Error),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn encrypt_decrypt_recovery_file() {
let passphrase = "very secure password";
let keypair = Keypair::random();
let recovery_file = create_recovery_file(&keypair, passphrase).unwrap();
let recovered = decrypt_recovery_file(&recovery_file, passphrase).unwrap();
assert_eq!(recovered.public_key(), keypair.public_key());
}
}

View File

@@ -0,0 +1,126 @@
use pkarr::PublicKey;
use postcard::{from_bytes, to_allocvec};
use serde::{Deserialize, Serialize};
extern crate alloc;
use alloc::vec::Vec;
use crate::{auth::AuthToken, capabilities::Capability, timestamp::Timestamp};
// TODO: add IP address?
// TODO: use https://crates.io/crates/user-agent-parser to parse the session
// and get more informations from the user-agent.
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Session {
version: usize,
pubky: PublicKey,
created_at: u64,
/// User specified name, defaults to the user-agent.
name: String,
user_agent: String,
capabilities: Vec<Capability>,
}
impl Session {
pub fn new(token: &AuthToken, user_agent: Option<String>) -> Self {
Self {
version: 0,
pubky: token.pubky().to_owned(),
created_at: Timestamp::now().into_inner(),
capabilities: token.capabilities().to_vec(),
user_agent: user_agent.as_deref().unwrap_or("").to_string(),
name: user_agent.as_deref().unwrap_or("").to_string(),
}
}
// === Getters ===
pub fn pubky(&self) -> &PublicKey {
&self.pubky
}
pub fn capabilities(&self) -> &Vec<Capability> {
&self.capabilities
}
// === Setters ===
pub fn set_user_agent(&mut self, user_agent: String) -> &mut Self {
self.user_agent = user_agent;
if self.name.is_empty() {
self.name.clone_from(&self.user_agent)
}
self
}
pub fn set_capabilities(&mut self, capabilities: Vec<Capability>) -> &mut Self {
self.capabilities = capabilities;
self
}
// === Public Methods ===
pub fn serialize(&self) -> Vec<u8> {
to_allocvec(self).expect("Session::serialize")
}
pub fn deserialize(bytes: &[u8]) -> Result<Self> {
if bytes[0] > 0 {
return Err(Error::UnknownVersion);
}
Ok(from_bytes(bytes)?)
}
// TODO: add `can_read()`, `can_write()` and `is_root()` methods
}
pub type Result<T> = core::result::Result<T, Error>;
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("Unknown version")]
UnknownVersion,
#[error(transparent)]
Postcard(#[from] postcard::Error),
}
#[cfg(test)]
mod tests {
use crate::crypto::Keypair;
use super::*;
#[test]
fn serialize() {
let keypair = Keypair::from_secret_key(&[0; 32]);
let pubky = keypair.public_key();
let session = Session {
user_agent: "foo".to_string(),
capabilities: vec![Capability::root()],
created_at: 0,
pubky,
version: 0,
name: "".to_string(),
};
let serialized = session.serialize();
assert_eq!(
serialized,
[
0, 59, 106, 39, 188, 206, 182, 164, 45, 98, 163, 168, 208, 42, 111, 13, 115, 101,
50, 21, 119, 29, 226, 67, 166, 58, 192, 72, 161, 139, 89, 218, 41, 0, 0, 3, 102,
111, 111, 1, 4, 47, 58, 114, 119
]
);
let deseiralized = Session::deserialize(&serialized).unwrap();
assert_eq!(deseiralized, session)
}
}

View File

@@ -0,0 +1,280 @@
//! Monotonic unix timestamp in microseconds
use serde::{Deserialize, Serialize};
use std::fmt::Display;
use std::{
ops::{Add, Sub},
sync::Mutex,
};
use once_cell::sync::Lazy;
use rand::Rng;
#[cfg(not(target_arch = "wasm32"))]
use std::time::SystemTime;
/// ~4% chance of none of 10 clocks have matching id.
const CLOCK_MASK: u64 = (1 << 8) - 1;
const TIME_MASK: u64 = !0 >> 8;
pub struct TimestampFactory {
clock_id: u64,
last_time: u64,
}
impl TimestampFactory {
pub fn new() -> Self {
Self {
clock_id: rand::thread_rng().gen::<u64>() & CLOCK_MASK,
last_time: system_time() & TIME_MASK,
}
}
pub fn now(&mut self) -> Timestamp {
// Ensure monotonicity.
self.last_time = (system_time() & TIME_MASK).max(self.last_time + CLOCK_MASK + 1);
// Add clock_id to the end of the timestamp
Timestamp(self.last_time | self.clock_id)
}
}
impl Default for TimestampFactory {
fn default() -> Self {
Self::new()
}
}
static DEFAULT_FACTORY: Lazy<Mutex<TimestampFactory>> =
Lazy::new(|| Mutex::new(TimestampFactory::default()));
/// Monotonic timestamp since [SystemTime::UNIX_EPOCH] in microseconds as u64.
///
/// The purpose of this timestamp is to unique per "user", not globally,
/// it achieves this by:
/// 1. Override the last byte with a random `clock_id`, reducing the probability
/// of two matching timestamps across multiple machines/threads.
/// 2. Gurantee that the remaining 3 bytes are ever increasing (monotonic) within
/// the same thread regardless of the wall clock value
///
/// This timestamp is also serialized as BE bytes to remain sortable.
/// If a `utf-8` encoding is necessary, it is encoded as [base32::Alphabet::Crockford]
/// to act as a sortable Id.
///
/// U64 of microseconds is valid for the next 500 thousand years!
#[derive(Debug, Clone, PartialEq, PartialOrd, Hash, Eq, Ord)]
pub struct Timestamp(u64);
impl Timestamp {
pub fn now() -> Self {
DEFAULT_FACTORY.lock().unwrap().now()
}
/// Return big endian bytes
pub fn to_bytes(&self) -> [u8; 8] {
self.0.to_be_bytes()
}
pub fn difference(&self, rhs: &Timestamp) -> i64 {
(self.0 as i64) - (rhs.0 as i64)
}
pub fn into_inner(&self) -> u64 {
self.0
}
}
impl Default for Timestamp {
fn default() -> Self {
Timestamp::now()
}
}
impl Display for Timestamp {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let bytes: [u8; 8] = self.into();
f.write_str(&base32::encode(base32::Alphabet::Crockford, &bytes))
}
}
impl TryFrom<String> for Timestamp {
type Error = TimestampError;
fn try_from(value: String) -> Result<Self, Self::Error> {
match base32::decode(base32::Alphabet::Crockford, &value) {
Some(vec) => {
let bytes: [u8; 8] = vec
.try_into()
.map_err(|_| TimestampError::InvalidEncoding)?;
Ok(bytes.into())
}
None => Err(TimestampError::InvalidEncoding),
}
}
}
impl TryFrom<&[u8]> for Timestamp {
type Error = TimestampError;
fn try_from(bytes: &[u8]) -> Result<Self, Self::Error> {
let bytes: [u8; 8] = bytes
.try_into()
.map_err(|_| TimestampError::InvalidBytesLength(bytes.len()))?;
Ok(bytes.into())
}
}
impl From<&Timestamp> for [u8; 8] {
fn from(timestamp: &Timestamp) -> Self {
timestamp.0.to_be_bytes()
}
}
impl From<[u8; 8]> for Timestamp {
fn from(bytes: [u8; 8]) -> Self {
Self(u64::from_be_bytes(bytes))
}
}
// === U64 conversion ===
impl From<Timestamp> for u64 {
fn from(value: Timestamp) -> Self {
value.into_inner()
}
}
impl Add<u64> for &Timestamp {
type Output = Timestamp;
fn add(self, rhs: u64) -> Self::Output {
Timestamp(self.0 + rhs)
}
}
impl Sub<u64> for &Timestamp {
type Output = Timestamp;
fn sub(self, rhs: u64) -> Self::Output {
Timestamp(self.0 - rhs)
}
}
impl Serialize for Timestamp {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let bytes = self.to_bytes();
bytes.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for Timestamp {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let bytes: [u8; 8] = Deserialize::deserialize(deserializer)?;
Ok(Timestamp(u64::from_be_bytes(bytes)))
}
}
#[cfg(not(target_arch = "wasm32"))]
/// Return the number of microseconds since [SystemTime::UNIX_EPOCH]
fn system_time() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.expect("time drift")
.as_micros() as u64
}
#[cfg(target_arch = "wasm32")]
/// Return the number of microseconds since [SystemTime::UNIX_EPOCH]
pub fn system_time() -> u64 {
// Won't be an issue for more than 5000 years!
(js_sys::Date::now() as u64 )
// Turn miliseconds to microseconds
* 1000
}
#[derive(thiserror::Error, Debug)]
pub enum TimestampError {
#[error("Invalid bytes length, Timestamp should be encoded as 8 bytes, got {0}")]
InvalidBytesLength(usize),
#[error("Invalid timestamp encoding")]
InvalidEncoding,
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use super::*;
#[test]
fn monotonic() {
const COUNT: usize = 100;
let mut set = HashSet::with_capacity(COUNT);
let mut vec = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let timestamp = Timestamp::now();
set.insert(timestamp.clone());
vec.push(timestamp);
}
let mut ordered = vec.clone();
ordered.sort();
assert_eq!(set.len(), COUNT, "unique");
assert_eq!(ordered, vec, "ordered");
}
#[test]
fn strings() {
const COUNT: usize = 100;
let mut set = HashSet::with_capacity(COUNT);
let mut vec = Vec::with_capacity(COUNT);
for _ in 0..COUNT {
let string = Timestamp::now().to_string();
set.insert(string.clone());
vec.push(string)
}
let mut ordered = vec.clone();
ordered.sort();
assert_eq!(set.len(), COUNT, "unique");
assert_eq!(ordered, vec, "ordered");
}
#[test]
fn to_from_string() {
let timestamp = Timestamp::now();
let string = timestamp.to_string();
let decoded: Timestamp = string.try_into().unwrap();
assert_eq!(decoded, timestamp)
}
#[test]
fn serde() {
let timestamp = Timestamp::now();
let serialized = postcard::to_allocvec(&timestamp).unwrap();
assert_eq!(serialized, timestamp.to_bytes());
let deserialized: Timestamp = postcard::from_bytes(&serialized).unwrap();
assert_eq!(deserialized, timestamp);
}
}

View File

@@ -0,0 +1,27 @@
[package]
name = "pubky_homeserver"
version = "0.1.0"
edition = "2021"
[dependencies]
anyhow = "1.0.82"
axum = { version = "0.7.5", features = ["macros"] }
axum-extra = { version = "0.9.3", features = ["typed-header", "async-read-body"] }
base32 = "0.5.1"
bytes = "^1.7.1"
clap = { version = "4.5.11", features = ["derive"] }
dirs-next = "2.0.0"
flume = "0.11.0"
futures-util = "0.3.30"
heed = "0.20.3"
hex = "0.4.3"
pkarr = { workspace = true }
postcard = { version = "1.0.8", features = ["alloc"] }
pubky-common = { version = "0.1.0", path = "../pubky-common" }
serde = { workspace = true }
tokio = { version = "1.37.0", features = ["full"] }
toml = "0.8.19"
tower-cookies = "0.10.0"
tower-http = { version = "0.5.2", features = ["cors", "trace"] }
tracing = "0.1.40"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }

View File

@@ -0,0 +1,23 @@
# Pubky Homeserver
## Usage
Use `cargo run`
```bash
cargo run -- --config=./src/config.toml
```
Or Build first then run from target.
Build
```bash
cargo build --release
```
Run with an optional config file
```bash
../target/release/pubky-homeserver --config=./src/config.toml
```

View File

@@ -0,0 +1,173 @@
//! Configuration for the server
use anyhow::{anyhow, Context, Result};
use pkarr::Keypair;
use serde::{Deserialize, Deserializer, Serialize};
use std::{
fmt::Debug,
path::{Path, PathBuf},
time::Duration,
};
use tracing::info;
use pubky_common::timestamp::Timestamp;
const DEFAULT_HOMESERVER_PORT: u16 = 6287;
const DEFAULT_STORAGE_DIR: &str = "pubky";
/// Server configuration
#[derive(Serialize, Deserialize, Clone)]
pub struct Config {
testnet: bool,
port: Option<u16>,
bootstrap: Option<Vec<String>>,
domain: String,
/// Path to the storage directory
///
/// Defaults to a directory in the OS data directory
storage: Option<PathBuf>,
#[serde(deserialize_with = "secret_key_deserialize")]
secret_key: Option<[u8; 32]>,
dht_request_timeout: Option<Duration>,
}
impl Config {
/// Load the config from a file.
pub async fn load(path: impl AsRef<Path>) -> Result<Config> {
let s = tokio::fs::read_to_string(path.as_ref())
.await
.with_context(|| format!("failed to read {}", path.as_ref().to_string_lossy()))?;
let config: Config = toml::from_str(&s)?;
if config.testnet {
let testnet_config = Config::testnet();
return Ok(Config {
bootstrap: testnet_config.bootstrap,
..config
});
}
Ok(config)
}
/// Testnet configurations
pub fn testnet() -> Self {
let testnet = pkarr::mainline::Testnet::new(10);
info!(?testnet.bootstrap, "Testnet bootstrap nodes");
let bootstrap = Some(testnet.bootstrap.to_owned());
let storage = Some(
std::env::temp_dir()
.join(Timestamp::now().to_string())
.join(DEFAULT_STORAGE_DIR),
);
Self {
bootstrap,
storage,
port: Some(15411),
dht_request_timeout: Some(Duration::from_millis(10)),
..Default::default()
}
}
/// Test configurations
pub fn test(testnet: &pkarr::mainline::Testnet) -> Self {
let bootstrap = Some(testnet.bootstrap.to_owned());
let storage = Some(
std::env::temp_dir()
.join(Timestamp::now().to_string())
.join(DEFAULT_STORAGE_DIR),
);
Self {
bootstrap,
storage,
..Default::default()
}
}
pub fn port(&self) -> u16 {
self.port.unwrap_or(DEFAULT_HOMESERVER_PORT)
}
pub fn bootstsrap(&self) -> Option<Vec<String>> {
self.bootstrap.to_owned()
}
pub fn domain(&self) -> &str {
&self.domain
}
/// Get the path to the storage directory
pub fn storage(&self) -> Result<PathBuf> {
let dir = if let Some(storage) = &self.storage {
PathBuf::from(storage)
} else {
let path = dirs_next::data_dir().ok_or_else(|| {
anyhow!("operating environment provides no directory for application data")
})?;
path.join(DEFAULT_STORAGE_DIR)
};
Ok(dir.join("homeserver"))
}
pub fn keypair(&self) -> Keypair {
Keypair::from_secret_key(&self.secret_key.unwrap_or_default())
}
pub(crate) fn dht_request_timeout(&self) -> Option<Duration> {
self.dht_request_timeout
}
}
impl Default for Config {
fn default() -> Self {
Self {
testnet: false,
port: Some(0),
bootstrap: None,
domain: "localhost".to_string(),
storage: None,
secret_key: None,
dht_request_timeout: None,
}
}
}
fn secret_key_deserialize<'de, D>(deserializer: D) -> Result<Option<[u8; 32]>, D::Error>
where
D: Deserializer<'de>,
{
let opt: Option<String> = Option::deserialize(deserializer)?;
match opt {
Some(s) => {
let bytes = hex::decode(s).map_err(serde::de::Error::custom)?;
if bytes.len() != 32 {
return Err(serde::de::Error::custom("Expected a 32-byte array"));
}
let mut arr = [0u8; 32];
arr.copy_from_slice(&bytes);
Ok(Some(arr))
}
None => Ok(None),
}
}
impl Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_map()
.entry(&"testnet", &self.testnet)
.entry(&"port", &self.port())
.entry(&"storage", &self.storage())
.entry(&"public_key", &self.keypair().public_key())
.finish()
}
}

View File

@@ -0,0 +1,10 @@
# Use testnet network (local DHT) for testing.
testnet = true
# Secret key (in hex) to generate the Homeserver's Keypair
secret_key = "0000000000000000000000000000000000000000000000000000000000000000"
# Domain to be published in Pkarr records for this server to be accessible by.
domain = "localhost"
# Port for the Homeserver to listen on.
port = 6287
# Storage directory Defaults to <System's Data Directory>
# storage = ""

View File

@@ -0,0 +1,73 @@
use std::fs;
use std::path::Path;
use heed::{Env, EnvOpenOptions};
mod migrations;
pub mod tables;
use tables::{Tables, TABLES_COUNT};
pub const MAX_LIST_LIMIT: u16 = 100;
#[derive(Debug, Clone)]
pub struct DB {
pub(crate) env: Env,
pub(crate) tables: Tables,
}
impl DB {
pub fn open(storage: &Path) -> anyhow::Result<Self> {
fs::create_dir_all(storage).unwrap();
let env = unsafe { EnvOpenOptions::new().max_dbs(TABLES_COUNT).open(storage) }?;
let tables = migrations::run(&env)?;
let db = DB { env, tables };
Ok(db)
}
}
#[cfg(test)]
mod tests {
use bytes::Bytes;
use pkarr::Keypair;
use pubky_common::timestamp::Timestamp;
use super::DB;
#[tokio::test]
async fn entries() {
let storage = std::env::temp_dir()
.join(Timestamp::now().to_string())
.join("pubky");
let db = DB::open(&storage).unwrap();
let keypair = Keypair::random();
let path = "/pub/foo.txt";
let (tx, rx) = flume::bounded::<Bytes>(0);
let mut cloned = db.clone();
let cloned_keypair = keypair.clone();
let done = tokio::task::spawn_blocking(move || {
cloned
.put_entry(&cloned_keypair.public_key(), path, rx)
.unwrap();
});
tx.send(vec![1, 2, 3, 4, 5].into()).unwrap();
drop(tx);
done.await.unwrap();
let blob = db.get_blob(&keypair.public_key(), path).unwrap().unwrap();
assert_eq!(blob, Bytes::from(vec![1, 2, 3, 4, 5]));
}
}

View File

@@ -0,0 +1,17 @@
use heed::Env;
mod m0;
use super::tables::Tables;
pub fn run(env: &Env) -> anyhow::Result<Tables> {
let mut wtxn = env.write_txn()?;
m0::run(env, &mut wtxn)?;
let tables = Tables::new(env, &mut wtxn)?;
wtxn.commit()?;
Ok(tables)
}

View File

@@ -0,0 +1,17 @@
use heed::{Env, RwTxn};
use crate::database::tables::{blobs, entries, events, sessions, users};
pub fn run(env: &Env, wtxn: &mut RwTxn) -> anyhow::Result<()> {
let _: users::UsersTable = env.create_database(wtxn, Some(users::USERS_TABLE))?;
let _: sessions::SessionsTable = env.create_database(wtxn, Some(sessions::SESSIONS_TABLE))?;
let _: blobs::BlobsTable = env.create_database(wtxn, Some(blobs::BLOBS_TABLE))?;
let _: entries::EntriesTable = env.create_database(wtxn, Some(entries::ENTRIES_TABLE))?;
let _: events::EventsTable = env.create_database(wtxn, Some(events::EVENTS_TABLE))?;
Ok(())
}

View File

@@ -0,0 +1,49 @@
pub mod blobs;
pub mod entries;
pub mod events;
pub mod sessions;
pub mod users;
use heed::{Env, RwTxn};
use blobs::{BlobsTable, BLOBS_TABLE};
use entries::{EntriesTable, ENTRIES_TABLE};
use self::{
events::{EventsTable, EVENTS_TABLE},
sessions::{SessionsTable, SESSIONS_TABLE},
users::{UsersTable, USERS_TABLE},
};
pub const TABLES_COUNT: u32 = 5;
#[derive(Debug, Clone)]
pub struct Tables {
pub users: UsersTable,
pub sessions: SessionsTable,
pub blobs: BlobsTable,
pub entries: EntriesTable,
pub events: EventsTable,
}
impl Tables {
pub fn new(env: &Env, wtxn: &mut RwTxn) -> anyhow::Result<Self> {
Ok(Self {
users: env
.open_database(wtxn, Some(USERS_TABLE))?
.expect("Users table already created"),
sessions: env
.open_database(wtxn, Some(SESSIONS_TABLE))?
.expect("Sessions table already created"),
blobs: env
.open_database(wtxn, Some(BLOBS_TABLE))?
.expect("Blobs table already created"),
entries: env
.open_database(wtxn, Some(ENTRIES_TABLE))?
.expect("Entries table already created"),
events: env
.open_database(wtxn, Some(EVENTS_TABLE))?
.expect("Events table already created"),
})
}
}

View File

@@ -0,0 +1,38 @@
use heed::{types::Bytes, Database};
use pkarr::PublicKey;
use crate::database::DB;
use super::entries::Entry;
/// hash of the blob => bytes.
pub type BlobsTable = Database<Bytes, Bytes>;
pub const BLOBS_TABLE: &str = "blobs";
impl DB {
pub fn get_blob(
&self,
public_key: &PublicKey,
path: &str,
) -> anyhow::Result<Option<bytes::Bytes>> {
let rtxn = self.env.read_txn()?;
let key = format!("{public_key}/{path}");
let result = if let Some(bytes) = self.tables.entries.get(&rtxn, &key)? {
let entry = Entry::deserialize(bytes)?;
self.tables
.blobs
.get(&rtxn, entry.content_hash())?
.map(|blob| bytes::Bytes::from(blob.to_vec()))
} else {
None
};
rtxn.commit()?;
Ok(result)
}
}

View File

@@ -0,0 +1,274 @@
use pkarr::PublicKey;
use postcard::{from_bytes, to_allocvec};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use heed::{
types::{Bytes, Str},
Database, RoTxn,
};
use pubky_common::{
crypto::{Hash, Hasher},
timestamp::Timestamp,
};
use crate::database::{DB, MAX_LIST_LIMIT};
use super::events::Event;
/// full_path(pubky/*path) => Entry.
pub type EntriesTable = Database<Str, Bytes>;
pub const ENTRIES_TABLE: &str = "entries";
impl DB {
pub fn put_entry(
&mut self,
public_key: &PublicKey,
path: &str,
rx: flume::Receiver<bytes::Bytes>,
) -> anyhow::Result<()> {
let mut wtxn = self.env.write_txn()?;
let mut hasher = Hasher::new();
let mut bytes = vec![];
let mut length = 0;
while let Ok(chunk) = rx.recv() {
hasher.update(&chunk);
bytes.extend_from_slice(&chunk);
length += chunk.len();
}
let hash = hasher.finalize();
self.tables.blobs.put(&mut wtxn, hash.as_bytes(), &bytes)?;
let mut entry = Entry::new();
entry.set_content_hash(hash);
entry.set_content_length(length);
let key = format!("{public_key}/{path}");
self.tables
.entries
.put(&mut wtxn, &key, &entry.serialize())?;
if path.starts_with("pub/") {
let url = format!("pubky://{key}");
let event = Event::put(&url);
let value = event.serialize();
let key = entry.timestamp.to_string();
self.tables.events.put(&mut wtxn, &key, &value)?;
// TODO: delete older events.
// TODO: move to events.rs
}
wtxn.commit()?;
Ok(())
}
pub fn delete_entry(&mut self, public_key: &PublicKey, path: &str) -> anyhow::Result<bool> {
let mut wtxn = self.env.write_txn()?;
let key = format!("{public_key}/{path}");
let deleted = if let Some(bytes) = self.tables.entries.get(&wtxn, &key)? {
let entry = Entry::deserialize(bytes)?;
// TODO: reference counting of blobs
let deleted_blobs = self.tables.blobs.delete(&mut wtxn, entry.content_hash())?;
let deleted_entry = self.tables.entries.delete(&mut wtxn, &key)?;
// create DELETE event
if path.starts_with("pub/") {
let url = format!("pubky://{key}");
let event = Event::delete(&url);
let value = event.serialize();
let key = Timestamp::now().to_string();
self.tables.events.put(&mut wtxn, &key, &value)?;
// TODO: delete older events.
// TODO: move to events.rs
}
deleted_entry & deleted_blobs
} else {
false
};
wtxn.commit()?;
Ok(deleted)
}
pub fn contains_directory(&self, txn: &RoTxn, path: &str) -> anyhow::Result<bool> {
Ok(self.tables.entries.get_greater_than(txn, path)?.is_some())
}
/// Return a list of pubky urls.
///
/// - limit defaults to and capped by [MAX_LIST_LIMIT]
pub fn list(
&self,
txn: &RoTxn,
path: &str,
reverse: bool,
limit: Option<u16>,
cursor: Option<String>,
shallow: bool,
) -> anyhow::Result<Vec<String>> {
// Vector to store results
let mut results = Vec::new();
let limit = limit.unwrap_or(MAX_LIST_LIMIT).min(MAX_LIST_LIMIT);
// TODO: make this more performant than split and allocations?
let mut threshold = cursor
.map(|cursor| {
// Removing leading forward slashes
let mut file_or_directory = cursor.trim_start_matches('/');
if cursor.starts_with("pubky://") {
file_or_directory = cursor.split(path).last().expect("should not be reachable")
};
next_threshold(
path,
file_or_directory,
file_or_directory.ends_with('/'),
reverse,
shallow,
)
})
.unwrap_or(next_threshold(path, "", false, reverse, shallow));
for _ in 0..limit {
if let Some((key, _)) = if reverse {
self.tables.entries.get_lower_than(txn, &threshold)?
} else {
self.tables.entries.get_greater_than(txn, &threshold)?
} {
if !key.starts_with(path) {
break;
}
if shallow {
let mut split = key[path.len()..].split('/');
let file_or_directory = split.next().expect("should not be reachable");
let is_directory = split.next().is_some();
threshold =
next_threshold(path, file_or_directory, is_directory, reverse, shallow);
results.push(format!(
"pubky://{path}{file_or_directory}{}",
if is_directory { "/" } else { "" }
));
} else {
threshold = key.to_string();
results.push(format!("pubky://{}", key))
}
};
}
Ok(results)
}
}
/// Calculate the next threshold
#[instrument]
fn next_threshold(
path: &str,
file_or_directory: &str,
is_directory: bool,
reverse: bool,
shallow: bool,
) -> String {
format!(
"{path}{file_or_directory}{}",
if file_or_directory.is_empty() {
// No file_or_directory, early return
if reverse {
// `path/to/dir/\x7f` to catch all paths than `path/to/dir/`
"\x7f"
} else {
""
}
} else if shallow & is_directory {
if reverse {
// threshold = `path/to/dir\x2e`, since `\x2e` is lower than `/`
"\x2e"
} else {
//threshold = `path/to/dir\x7f`, since `\x7f` is greater than `/`
"\x7f"
}
} else {
""
}
)
}
#[derive(Clone, Default, Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct Entry {
/// Encoding version
version: usize,
/// Modified at
timestamp: Timestamp,
content_hash: [u8; 32],
content_length: usize,
content_type: String,
// user_metadata: ?
}
// TODO: get headers like Etag
impl Entry {
pub fn new() -> Self {
Default::default()
}
// === Setters ===
pub fn set_content_hash(&mut self, content_hash: Hash) -> &mut Self {
content_hash.as_bytes().clone_into(&mut self.content_hash);
self
}
pub fn set_content_length(&mut self, content_length: usize) -> &mut Self {
self.content_length = content_length;
self
}
// === Getters ===
pub fn content_hash(&self) -> &[u8; 32] {
&self.content_hash
}
// === Public Method ===
pub fn serialize(&self) -> Vec<u8> {
to_allocvec(self).expect("Session::serialize")
}
pub fn deserialize(bytes: &[u8]) -> core::result::Result<Self, postcard::Error> {
if bytes[0] > 0 {
panic!("Unknown Entry version");
}
from_bytes(bytes)
}
}

View File

@@ -0,0 +1,58 @@
//! Server events (Put and Delete entries)
//!
//! Useful as a realtime sync with Indexers until
//! we implement more self-authenticated merkle data.
use heed::{
types::{Bytes, Str},
Database,
};
use postcard::{from_bytes, to_allocvec};
use serde::{Deserialize, Serialize};
/// Event [Timestamp] base32 => Encoded event.
pub type EventsTable = Database<Str, Bytes>;
pub const EVENTS_TABLE: &str = "events";
#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
pub enum Event {
Put(String),
Delete(String),
}
impl Event {
pub fn put(url: &str) -> Self {
Self::Put(url.to_string())
}
pub fn delete(url: &str) -> Self {
Self::Delete(url.to_string())
}
pub fn serialize(&self) -> Vec<u8> {
to_allocvec(self).expect("Session::serialize")
}
pub fn deserialize(bytes: &[u8]) -> core::result::Result<Self, postcard::Error> {
if bytes[0] > 1 {
panic!("Unknown Event version");
}
from_bytes(bytes)
}
pub fn url(&self) -> &str {
match self {
Event::Put(url) => url,
Event::Delete(url) => url,
}
}
pub fn operation(&self) -> &str {
match self {
Event::Put(_) => "PUT",
Event::Delete(_) => "DEL",
}
}
}

View File

@@ -0,0 +1,51 @@
use heed::{
types::{Bytes, Str},
Database,
};
use pkarr::PublicKey;
use pubky_common::session::Session;
use tower_cookies::Cookies;
use crate::database::DB;
/// session secret => Session.
pub type SessionsTable = Database<Str, Bytes>;
pub const SESSIONS_TABLE: &str = "sessions";
impl DB {
pub fn get_session(
&mut self,
cookies: Cookies,
public_key: &PublicKey,
) -> anyhow::Result<Option<Session>> {
if let Some(bytes) = self.get_session_bytes(cookies, public_key)? {
return Ok(Some(Session::deserialize(&bytes)?));
};
Ok(None)
}
pub fn get_session_bytes(
&mut self,
cookies: Cookies,
public_key: &PublicKey,
) -> anyhow::Result<Option<Vec<u8>>> {
if let Some(cookie) = cookies.get(&public_key.to_string()) {
let rtxn = self.env.read_txn()?;
let sessions: SessionsTable = self
.env
.open_database(&rtxn, Some(SESSIONS_TABLE))?
.expect("Session table already created");
let session = sessions.get(&rtxn, cookie.value())?.map(|s| s.to_vec());
rtxn.commit()?;
return Ok(session);
};
Ok(None)
}
}

View File

@@ -0,0 +1,58 @@
use std::borrow::Cow;
use postcard::{from_bytes, to_allocvec};
use serde::{Deserialize, Serialize};
use heed::{BoxedError, BytesDecode, BytesEncode, Database};
use pkarr::PublicKey;
extern crate alloc;
/// PublicKey => User.
pub type UsersTable = Database<PublicKeyCodec, User>;
pub const USERS_TABLE: &str = "users";
// TODO: add more adminstration metadata like quota, invitation links, etc..
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
pub struct User {
pub created_at: u64,
}
impl<'a> BytesEncode<'a> for User {
type EItem = Self;
fn bytes_encode(user: &Self::EItem) -> Result<Cow<[u8]>, BoxedError> {
let vec = to_allocvec(user).unwrap();
Ok(Cow::Owned(vec))
}
}
impl<'a> BytesDecode<'a> for User {
type DItem = Self;
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
let user: User = from_bytes(bytes).unwrap();
Ok(user)
}
}
pub struct PublicKeyCodec {}
impl<'a> BytesEncode<'a> for PublicKeyCodec {
type EItem = PublicKey;
fn bytes_encode(pubky: &Self::EItem) -> Result<Cow<[u8]>, BoxedError> {
Ok(Cow::Borrowed(pubky.as_bytes()))
}
}
impl<'a> BytesDecode<'a> for PublicKeyCodec {
type DItem = PublicKey;
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
Ok(PublicKey::try_from(bytes)?)
}
}

View File

@@ -0,0 +1,121 @@
//! Server error
use axum::{
extract::rejection::{ExtensionRejection, PathRejection, QueryRejection},
http::StatusCode,
response::IntoResponse,
};
pub type Result<T, E = Error> = core::result::Result<T, E>;
#[derive(Debug, Clone)]
pub struct Error {
// #[serde(with = "serde_status_code")]
status: StatusCode,
detail: Option<String>,
}
impl Default for Error {
fn default() -> Self {
Self {
status: StatusCode::INTERNAL_SERVER_ERROR,
detail: None,
}
}
}
impl Error {
pub fn with_status(status: StatusCode) -> Error {
Self {
status,
detail: None,
}
}
/// Create a new [`Error`].
pub fn new(status_code: StatusCode, message: Option<impl ToString>) -> Error {
Self {
status: status_code,
detail: message.map(|m| m.to_string()),
}
}
}
impl IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
match self.detail {
Some(detail) => (self.status, detail).into_response(),
_ => (self.status,).into_response(),
}
}
}
impl From<QueryRejection> for Error {
fn from(error: QueryRejection) -> Self {
Self::new(StatusCode::BAD_REQUEST, error.into())
}
}
impl From<ExtensionRejection> for Error {
fn from(error: ExtensionRejection) -> Self {
Self::new(StatusCode::BAD_REQUEST, error.into())
}
}
impl From<PathRejection> for Error {
fn from(error: PathRejection) -> Self {
Self::new(StatusCode::BAD_REQUEST, error.into())
}
}
// === Pubky specific errors ===
impl From<pubky_common::auth::Error> for Error {
fn from(error: pubky_common::auth::Error) -> Self {
Self::new(StatusCode::BAD_REQUEST, Some(error))
}
}
impl From<pkarr::Error> for Error {
fn from(error: pkarr::Error) -> Self {
Self::new(StatusCode::BAD_REQUEST, Some(error))
}
}
// === INTERNAL_SERVER_ERROR ===
impl From<std::io::Error> for Error {
fn from(error: std::io::Error) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}
impl From<heed::Error> for Error {
fn from(error: heed::Error) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}
impl From<anyhow::Error> for Error {
fn from(error: anyhow::Error) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}
impl From<postcard::Error> for Error {
fn from(error: postcard::Error) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}
impl From<axum::Error> for Error {
fn from(error: axum::Error) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}
impl<T> From<flume::SendError<T>> for Error {
fn from(error: flume::SendError<T>) -> Self {
Self::new(StatusCode::INTERNAL_SERVER_ERROR, error.into())
}
}

View File

@@ -0,0 +1,76 @@
use std::collections::HashMap;
use axum::{
async_trait,
extract::{FromRequestParts, Path},
http::{request::Parts, StatusCode},
response::{IntoResponse, Response},
RequestPartsExt,
};
use pkarr::PublicKey;
use crate::error::{Error, Result};
#[derive(Debug)]
pub struct Pubky(PublicKey);
impl Pubky {
pub fn public_key(&self) -> &PublicKey {
&self.0
}
}
#[async_trait]
impl<S> FromRequestParts<S> for Pubky
where
S: Send + Sync,
{
type Rejection = Response;
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
let params: Path<HashMap<String, String>> =
parts.extract().await.map_err(IntoResponse::into_response)?;
let pubky_id = params
.get("pubky")
.ok_or_else(|| (StatusCode::NOT_FOUND, "pubky param missing").into_response())?;
let public_key = PublicKey::try_from(pubky_id.to_string())
.map_err(Error::try_from)
.map_err(IntoResponse::into_response)?;
// TODO: return 404 if the user doesn't exist, but exclude signups.
Ok(Pubky(public_key))
}
}
pub struct EntryPath(pub(crate) String);
impl EntryPath {
pub fn as_str(&self) -> &str {
self.0.as_str()
}
}
#[async_trait]
impl<S> FromRequestParts<S> for EntryPath
where
S: Send + Sync,
{
type Rejection = Response;
async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result<Self, Self::Rejection> {
let params: Path<HashMap<String, String>> =
parts.extract().await.map_err(IntoResponse::into_response)?;
// TODO: enforce path limits like no trailing '/'
let path = params
.get("path")
.ok_or_else(|| (StatusCode::NOT_FOUND, "entry path missing").into_response())?;
Ok(EntryPath(path.to_string()))
}
}

View File

@@ -0,0 +1,9 @@
pub mod config;
mod database;
mod error;
mod extractors;
mod pkarr;
mod routes;
mod server;
pub use server::Homeserver;

View File

@@ -0,0 +1,46 @@
use std::path::PathBuf;
use anyhow::Result;
use pubky_homeserver::{config::Config, Homeserver};
use clap::Parser;
#[derive(Parser, Debug)]
struct Cli {
/// [tracing_subscriber::EnvFilter]
#[clap(short, long)]
tracing_env_filter: Option<String>,
/// Run Homeserver in a local testnet
#[clap(long)]
testnet: bool,
/// Optional Path to config file.
#[clap(short, long)]
config: Option<PathBuf>,
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Cli::parse();
tracing_subscriber::fmt()
.with_env_filter(
args.tracing_env_filter
.unwrap_or("pubky_homeserver=debug,tower_http=debug".to_string()),
)
.init();
let server = Homeserver::start(if args.testnet {
Config::testnet()
} else if let Some(config_path) = args.config {
Config::load(config_path).await?
} else {
Config::default()
})
.await?;
server.run_until_done().await?;
Ok(())
}

View File

@@ -0,0 +1,46 @@
//! Pkarr related task
use pkarr::{
dns::{rdata::SVCB, Packet},
Keypair, PkarrClientAsync, SignedPacket,
};
pub async fn publish_server_packet(
pkarr_client: PkarrClientAsync,
keypair: &Keypair,
domain: &str,
port: u16,
) -> anyhow::Result<()> {
// TODO: Try to resolve first before publishing.
let mut packet = Packet::new_reply(0);
let mut svcb = SVCB::new(0, domain.try_into()?);
// Publishing port only for localhost domain,
// assuming any other domain will point to a reverse proxy
// at the conventional ports.
if domain == "localhost" {
svcb.priority = 1;
svcb.set_port(port);
// TODO: Add more parameteres like the signer key!
// svcb.set_param(key, value)
};
// TODO: announce A/AAAA records as well for Noise connections?
// Or maybe Iroh's magic socket
packet.answers.push(pkarr::dns::ResourceRecord::new(
"@".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::SVCB(svcb),
));
let signed_packet = SignedPacket::from_packet(keypair, &packet)?;
pkarr_client.publish(&signed_packet).await?;
Ok(())
}

View File

@@ -0,0 +1,43 @@
use axum::{
extract::DefaultBodyLimit,
routing::{delete, get, post, put},
Router,
};
use tower_cookies::CookieManagerLayer;
use tower_http::{cors::CorsLayer, trace::TraceLayer};
use crate::server::AppState;
use self::pkarr::pkarr_router;
mod auth;
mod feed;
mod pkarr;
mod public;
mod root;
fn base(state: AppState) -> Router {
Router::new()
.route("/", get(root::handler))
.route("/signup", post(auth::signup))
.route("/session", post(auth::signin))
.route("/:pubky/session", get(auth::session))
.route("/:pubky/session", delete(auth::signout))
.route("/:pubky/*path", put(public::put))
.route("/:pubky/*path", get(public::get))
.route("/:pubky/*path", delete(public::delete))
.route("/events/", get(feed::feed))
.layer(CookieManagerLayer::new())
// TODO: revisit if we enable streaming big payloads
// TODO: maybe add to a separate router (drive router?).
.layer(DefaultBodyLimit::max(16 * 1024))
.with_state(state)
}
pub fn create_app(state: AppState) -> Router {
base(state.clone())
// TODO: Only enable this for test environments?
.nest("/pkarr", pkarr_router(state))
.layer(CorsLayer::very_permissive())
.layer(TraceLayer::new_for_http())
}

View File

@@ -0,0 +1,138 @@
use axum::{
debug_handler,
extract::State,
http::{uri::Scheme, StatusCode, Uri},
response::IntoResponse,
};
use axum_extra::{headers::UserAgent, TypedHeader};
use bytes::Bytes;
use tower_cookies::{cookie::SameSite, Cookie, Cookies};
use pubky_common::{crypto::random_bytes, session::Session, timestamp::Timestamp};
use crate::{
database::tables::{
sessions::{SessionsTable, SESSIONS_TABLE},
users::User,
},
error::{Error, Result},
extractors::Pubky,
server::AppState,
};
#[debug_handler]
pub async fn signup(
State(state): State<AppState>,
user_agent: Option<TypedHeader<UserAgent>>,
cookies: Cookies,
uri: Uri,
body: Bytes,
) -> Result<impl IntoResponse> {
// TODO: Verify invitation link.
// TODO: add errors in case of already axisting user.
signin(State(state), user_agent, cookies, uri, body).await
}
pub async fn session(
State(state): State<AppState>,
cookies: Cookies,
pubky: Pubky,
) -> Result<impl IntoResponse> {
if let Some(cookie) = cookies.get(&pubky.public_key().to_string()) {
let rtxn = state.db.env.read_txn()?;
let sessions: SessionsTable = state
.db
.env
.open_database(&rtxn, Some(SESSIONS_TABLE))?
.expect("Session table already created");
if let Some(session) = sessions.get(&rtxn, cookie.value())? {
let session = session.to_owned();
rtxn.commit()?;
// TODO: add content-type
return Ok(session);
};
rtxn.commit()?;
};
Err(Error::with_status(StatusCode::NOT_FOUND))
}
pub async fn signout(
State(state): State<AppState>,
cookies: Cookies,
pubky: Pubky,
) -> Result<impl IntoResponse> {
if let Some(cookie) = cookies.get(&pubky.public_key().to_string()) {
let mut wtxn = state.db.env.write_txn()?;
let sessions: SessionsTable = state
.db
.env
.open_database(&wtxn, Some(SESSIONS_TABLE))?
.expect("Session table already created");
let _ = sessions.delete(&mut wtxn, cookie.value());
wtxn.commit()?;
return Ok(());
};
Err(Error::with_status(StatusCode::UNAUTHORIZED))
}
pub async fn signin(
State(state): State<AppState>,
user_agent: Option<TypedHeader<UserAgent>>,
cookies: Cookies,
uri: Uri,
body: Bytes,
) -> Result<impl IntoResponse> {
let token = state.verifier.verify(&body)?;
let public_key = token.pubky();
let mut wtxn = state.db.env.write_txn()?;
let users = state.db.tables.users;
if let Some(existing) = users.get(&wtxn, public_key)? {
users.put(&mut wtxn, public_key, &existing)?;
} else {
users.put(
&mut wtxn,
public_key,
&User {
created_at: Timestamp::now().into_inner(),
},
)?;
}
let session_secret = base32::encode(base32::Alphabet::Crockford, &random_bytes::<16>());
let session = Session::new(&token, user_agent.map(|ua| ua.to_string())).serialize();
state
.db
.tables
.sessions
.put(&mut wtxn, &session_secret, &session)?;
let mut cookie = Cookie::new(public_key.to_string(), session_secret);
cookie.set_path("/");
if *uri.scheme().unwrap_or(&Scheme::HTTP) == Scheme::HTTPS {
cookie.set_secure(true);
cookie.set_same_site(SameSite::None);
}
cookie.set_http_only(true);
cookies.add(cookie);
wtxn.commit()?;
Ok(session)
}

View File

@@ -0,0 +1,71 @@
use std::collections::HashMap;
use axum::{
body::Body,
extract::{Query, State},
http::{header, Response, StatusCode},
response::IntoResponse,
};
use crate::{
database::{tables::events::Event, MAX_LIST_LIMIT},
error::Result,
server::AppState,
};
pub async fn feed(
State(state): State<AppState>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse> {
let txn = state.db.env.read_txn()?;
let limit = params
.get("limit")
.and_then(|l| l.parse::<u16>().ok())
.unwrap_or(MAX_LIST_LIMIT)
.min(MAX_LIST_LIMIT);
let mut cursor = params
.get("cursor")
.map(|c| c.as_str())
.unwrap_or("0000000000000");
// Guard against bad cursor
if cursor.len() < 13 {
cursor = "0000000000000"
}
let mut result: Vec<String> = vec![];
let mut next_cursor = cursor.to_string();
for _ in 0..limit {
match state
.db
.tables
.events
.get_greater_than(&txn, &next_cursor)?
{
Some((timestamp, event_bytes)) => {
let event = Event::deserialize(event_bytes)?;
let line = format!("{} {}", event.operation(), event.url());
next_cursor = timestamp.to_string();
result.push(line);
}
None => break,
};
}
if !result.is_empty() {
result.push(format!("cursor: {next_cursor}"))
}
txn.commit()?;
Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "text/plain")
.body(Body::from(result.join("\n")))
.unwrap())
}

View File

@@ -0,0 +1,58 @@
use axum::{
body::{Body, Bytes},
extract::State,
http::StatusCode,
response::IntoResponse,
routing::{get, put},
Router,
};
use futures_util::stream::StreamExt;
use pkarr::SignedPacket;
use crate::{
error::{Error, Result},
extractors::Pubky,
server::AppState,
};
/// Pkarr relay, helpful for testing.
///
/// For real productioin, you should use a [production ready
/// relay](https://github.com/pubky/pkarr/server).
pub fn pkarr_router(state: AppState) -> Router {
Router::new()
.route("/:pubky", put(pkarr_put))
.route("/:pubky", get(pkarr_get))
.with_state(state)
}
pub async fn pkarr_put(
State(state): State<AppState>,
pubky: Pubky,
body: Body,
) -> Result<impl IntoResponse> {
let mut bytes = Vec::with_capacity(1104);
let mut stream = body.into_data_stream();
while let Some(chunk) = stream.next().await {
bytes.extend_from_slice(&chunk?)
}
let public_key = pubky.public_key().to_owned();
let signed_packet = SignedPacket::from_relay_payload(&public_key, &Bytes::from(bytes))?;
state.pkarr_client.publish(&signed_packet).await?;
Ok(())
}
pub async fn pkarr_get(State(state): State<AppState>, pubky: Pubky) -> Result<impl IntoResponse> {
if let Some(signed_packet) = state.pkarr_client.resolve(pubky.public_key()).await? {
return Ok(signed_packet.to_relay_payload());
}
Err(Error::with_status(StatusCode::NOT_FOUND))
}

View File

@@ -0,0 +1,176 @@
use std::collections::HashMap;
use axum::{
body::{Body, Bytes},
extract::{Query, State},
http::{header, Response, StatusCode},
response::IntoResponse,
};
use futures_util::stream::StreamExt;
use pkarr::PublicKey;
use tower_cookies::Cookies;
use crate::{
error::{Error, Result},
extractors::{EntryPath, Pubky},
server::AppState,
};
pub async fn put(
State(mut state): State<AppState>,
pubky: Pubky,
path: EntryPath,
cookies: Cookies,
body: Body,
) -> Result<impl IntoResponse> {
let public_key = pubky.public_key().clone();
let path = path.as_str();
verify(path)?;
authorize(&mut state, cookies, &public_key, path)?;
let mut stream = body.into_data_stream();
let (tx, rx) = flume::bounded::<Bytes>(1);
let path = path.to_string();
// TODO: refactor Database to clean up this scope.
let done = tokio::task::spawn_blocking(move || -> Result<()> {
// TODO: this is a blocking operation, which is ok for small
// payloads (we have 16 kb limit for now) but later we need
// to stream this to filesystem, and keep track of any failed
// writes to GC these files later.
state.db.put_entry(&public_key, &path, rx)?;
Ok(())
});
while let Some(next) = stream.next().await {
let chunk = next?;
tx.send(chunk)?;
}
drop(tx);
done.await.expect("join error")?;
// TODO: return relevant headers, like Etag?
Ok(())
}
pub async fn get(
State(state): State<AppState>,
pubky: Pubky,
path: EntryPath,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse> {
verify(path.as_str())?;
let public_key = pubky.public_key();
let path = path.as_str();
if path.ends_with('/') {
let txn = state.db.env.read_txn()?;
let path = format!("{public_key}/{path}");
if !state.db.contains_directory(&txn, &path)? {
return Err(Error::new(
StatusCode::NOT_FOUND,
"Directory Not Found".into(),
));
}
// Handle listing
let vec = state.db.list(
&txn,
&path,
params.contains_key("reverse"),
params.get("limit").and_then(|l| l.parse::<u16>().ok()),
params.get("cursor").map(|cursor| cursor.into()),
params.contains_key("shallow"),
)?;
return Ok(Response::builder()
.status(StatusCode::OK)
.header(header::CONTENT_TYPE, "text/plain")
.body(Body::from(vec.join("\n")))
.unwrap());
}
// TODO: Enable streaming
match state.db.get_blob(public_key, path) {
Err(error) => Err(error)?,
Ok(Some(bytes)) => Ok(Response::builder().body(Body::from(bytes)).unwrap()),
Ok(None) => Err(Error::new(StatusCode::NOT_FOUND, "File Not Found".into())),
}
}
pub async fn delete(
State(mut state): State<AppState>,
pubky: Pubky,
path: EntryPath,
cookies: Cookies,
) -> Result<impl IntoResponse> {
let public_key = pubky.public_key().clone();
let path = path.as_str();
authorize(&mut state, cookies, &public_key, path)?;
verify(path)?;
let deleted = state.db.delete_entry(&public_key, path)?;
if !deleted {
// TODO: if the path ends with `/` return a `CONFLICT` error?
return Err(Error::with_status(StatusCode::NOT_FOUND));
}
// TODO: return relevant headers, like Etag?
Ok(())
}
/// Authorize write (PUT or DELETE) for Public paths.
fn authorize(
state: &mut AppState,
cookies: Cookies,
public_key: &PublicKey,
path: &str,
) -> Result<()> {
// TODO: can we move this logic to the extractor or a layer
// to perform this validation?
let session = state
.db
.get_session(cookies, public_key)?
.ok_or(Error::with_status(StatusCode::UNAUTHORIZED))?;
if session.pubky() == public_key
&& session.capabilities().iter().any(|cap| {
path.starts_with(&cap.scope[1..])
&& cap
.actions
.contains(&pubky_common::capabilities::Action::Write)
})
{
return Ok(());
}
Err(Error::with_status(StatusCode::FORBIDDEN))
}
fn verify(path: &str) -> Result<()> {
if !path.starts_with("pub/") {
return Err(Error::new(
StatusCode::FORBIDDEN,
"Writing to directories other than '/pub/' is forbidden".into(),
));
}
// TODO: should we forbid paths ending with `/`?
Ok(())
}

View File

@@ -0,0 +1,5 @@
use axum::response::IntoResponse;
pub async fn handler() -> Result<impl IntoResponse, String> {
Ok("This a Pubky homeserver.".to_string())
}

View File

@@ -0,0 +1,161 @@
use std::{future::IntoFuture, net::SocketAddr};
use anyhow::{Error, Result};
use pubky_common::auth::AuthVerifier;
use tokio::{net::TcpListener, signal, task::JoinSet};
use tracing::{debug, info, warn};
use pkarr::{
mainline::dht::{DhtSettings, Testnet},
PkarrClient, PkarrClientAsync, PublicKey, Settings,
};
use crate::{config::Config, database::DB, pkarr::publish_server_packet};
#[derive(Debug)]
pub struct Homeserver {
port: u16,
config: Config,
tasks: JoinSet<std::io::Result<()>>,
}
#[derive(Clone, Debug)]
pub(crate) struct AppState {
pub verifier: AuthVerifier,
pub db: DB,
pub pkarr_client: PkarrClientAsync,
}
impl Homeserver {
pub async fn start(config: Config) -> Result<Self> {
debug!(?config);
let keypair = config.keypair();
let db = DB::open(&config.storage()?)?;
let pkarr_client = PkarrClient::new(Settings {
dht: DhtSettings {
bootstrap: config.bootstsrap(),
request_timeout: config.dht_request_timeout(),
..Default::default()
},
..Default::default()
})?
.as_async();
let state = AppState {
verifier: AuthVerifier::default(),
db,
pkarr_client: pkarr_client.clone(),
};
let app = crate::routes::create_app(state);
let mut tasks = JoinSet::new();
let app = app.clone();
let listener = TcpListener::bind(SocketAddr::from(([0, 0, 0, 0], config.port()))).await?;
let port = listener.local_addr()?.port();
// Spawn http server task
tasks.spawn(
axum::serve(
listener,
app.into_make_service_with_connect_info::<SocketAddr>(),
)
.with_graceful_shutdown(shutdown_signal())
.into_future(),
);
info!("Homeserver listening on http://localhost:{port}");
publish_server_packet(pkarr_client, &keypair, config.domain(), port).await?;
info!("Homeserver listening on pubky://{}", keypair.public_key());
Ok(Self {
tasks,
config,
port,
})
}
/// Test version of [Homeserver::start], using mainline Testnet, and a temporary storage.
pub async fn start_test(testnet: &Testnet) -> Result<Self> {
info!("Running testnet..");
Homeserver::start(Config::test(testnet)).await
}
// === Getters ===
pub fn port(&self) -> u16 {
self.port
}
pub fn public_key(&self) -> PublicKey {
self.config.keypair().public_key()
}
// === Public Methods ===
/// Shutdown the server and wait for all tasks to complete.
pub async fn shutdown(mut self) -> Result<()> {
self.tasks.abort_all();
self.run_until_done().await?;
Ok(())
}
/// Wait for all tasks to complete.
///
/// Runs forever unless tasks fail.
pub async fn run_until_done(mut self) -> Result<()> {
let mut final_res: Result<()> = Ok(());
while let Some(res) = self.tasks.join_next().await {
match res {
Ok(Ok(())) => {}
Err(err) if err.is_cancelled() => {}
Ok(Err(err)) => {
warn!(?err, "task failed");
final_res = Err(Error::from(err));
}
Err(err) => {
warn!(?err, "task panicked");
final_res = Err(err.into());
}
}
}
final_res
}
}
async fn shutdown_signal() {
let ctrl_c = async {
signal::ctrl_c()
.await
.expect("failed to install Ctrl+C handler");
};
#[cfg(unix)]
let terminate = async {
signal::unix::signal(signal::unix::SignalKind::terminate())
.expect("failed to install signal handler")
.recv()
.await;
};
#[cfg(not(unix))]
let terminate = std::future::pending::<()>();
fn graceful_shutdown() {
info!("Gracefully Shutting down..");
}
tokio::select! {
_ = ctrl_c => graceful_shutdown(),
_ = terminate => graceful_shutdown(),
}
}

View File

@@ -0,0 +1,44 @@
[package]
name = "pubky"
version = "0.1.0"
edition = "2021"
description = "Pubky client"
license = "MIT"
repository = "https://github.com/pubky/pubky"
keywords = ["web", "dht", "dns", "decentralized", "identity"]
[lib]
crate-type = ["cdylib", "rlib"]
[dependencies]
thiserror = "1.0.62"
wasm-bindgen = "0.2.92"
url = "2.5.2"
bytes = "^1.7.1"
base64 = "0.22.1"
pubky-common = { version = "0.1.0", path = "../pubky-common" }
pkarr = { workspace = true, features = ["async"] }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
reqwest = { version = "0.12.5", features = ["cookies", "rustls-tls"], default-features = false }
tokio = { version = "1.37.0", features = ["full"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
reqwest = { version = "0.12.5", default-features = false }
js-sys = "0.3.69"
wasm-bindgen = "0.2.92"
wasm-bindgen-futures = "0.4.42"
[dev-dependencies]
pubky_homeserver = { path = "../pubky-homeserver" }
tokio = "1.37.0"
[features]
[package.metadata.docs.rs]
all-features = true
[package.metadata.wasm-pack.profile.release]
wasm-opt = ['-g', '-O']

6
rust/pubky/pubky/pkg/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
index.cjs
browser.js
coverage
node_modules
package-lock.json
pubky*

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2023
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -0,0 +1,266 @@
# Pubky
JavaScript implementation of [Pubky](https://github.com/pubky/pubky).
## Table of Contents
- [Install](#install)
- [Getting Started](#getting-started)
- [API](#api)
- [Test and Development](#test-and-development)
## Install
```bash
npm install @synonymdev/pubky
```
### Prerequisites
For Nodejs, you need Node v20 or later.
## Getting started
```js
import { PubkyClient, Keypair, PublicKey } from '../index.js'
// Initialize PubkyClient with Pkarr relay(s).
let client = new PubkyClient();
// Generate a keypair
let keypair = Keypair.random();
// Create a new account
let homeserver = PublicKey.from("8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo");
await client.signup(keypair, homeserver)
const publicKey = keypair.publicKey();
// Pubky URL
let url = `pubky://${publicKey.z32()}/pub/example.com/arbitrary`;
// Verify that you are signed in.
const session = await client.session(publicKey)
const body = Buffer.from(JSON.stringify({ foo: 'bar' }))
// PUT public data, by authorized client
await client.put(url, body);
// GET public data without signup or signin
{
const client = new PubkyClient();
let response = await client.get(url);
}
// Delete public data, by authorized client
await client.delete(url);
```
## API
### PubkyClient
#### constructor
```js
let client = new PubkyClient()
```
#### signup
```js
await client.signup(keypair, homeserver)
```
- keypair: An instance of [Keypair](#keypair).
- homeserver: An instance of [PublicKey](#publickey) representing the homeserver.
Returns:
- session: An instance of [Session](#session).
#### signin
```js
let session = await client.signin(keypair)
```
- keypair: An instance of [Keypair](#keypair).
Returns:
- An instance of [Session](#session).
#### signout
```js
await client.signout(publicKey)
```
- publicKey: An instance of [PublicKey](#publicKey).
#### authRequest
```js
let [pubkyauthUrl, sessionPromise] = client.authRequest(relay, capabilities);
showQr(pubkyauthUrl);
let session = await sessionPromise;
```
Sign in to a user's Homeserver, without access to their [Keypair](#keypair), nor even [PublicKey](#publickey),
instead request permissions (showing the user pubkyauthUrl), and await a Session after the user consenting to that request.
- relay: A URL to an [HTTP relay](https://httprelay.io/features/link/) endpoint.
- capabilities: A list of capabilities required for the app for example `/pub/pubky.app/:rw,/pub/example.com/:r`.
Returns:
- pubkyauthUrl: A url to show to the user to scan or paste into an Authenticator app holding the user [Keypair](#keypair)
- sessionPromise: A promise that resolves into a [Session](#session) on success.
#### sendAuthToken
```js
await client.sendAuthToken(keypair, pubkyauthUrl);
```
Consenting to authentication or authorization according to the required capabilities in the `pubkyauthUrl` , and sign and send an auth token to the requester.
- keypair: An instance of [KeyPair](#keypair)
- pubkyauthUrl: A string `pubkyauth://` url
#### session {#session-method}
```js
let session = await client.session(publicKey)
```
- publicKey: An instance of [PublicKey](#publickey).
- Returns: A [Session](#session) object if signed in, or undefined if not.
#### put
```js
let response = await client.put(url, body);
```
- url: A string representing the Pubky URL.
- body: A Buffer containing the data to be stored.
### get
```js
let response = await client.get(url)
```
- url: A string representing the Pubky URL.
- Returns: A response object containing the requested data.
### delete
```js
let response = await client.delete(url);
```
- url: A string representing the Pubky URL.
### list
```js
let response = await client.list(url, cursor, reverse, limit)
```
- url: A string representing the Pubky URL. The path in that url is the prefix that you want to list files within.
- cursor: Usually the last URL from previous calls. List urls after/before (depending on `reverse`) the cursor.
- reverse: Whether or not return urls in reverse order.
- limit: Number of urls to return.
- Returns: A list of URLs of the files in the `url` you passed.
### Keypair
#### random
```js
let keypair = Keypair.random()
```
- Returns: A new random Keypair.
#### fromSecretKey
```js
let keypair = Keypair.fromSecretKey(secretKey)
```
- secretKey: A 32 bytes Uint8array.
- Returns: A new Keypair.
#### publicKey {#publickey-method}
```js
let publicKey = keypair.publicKey()
```
- Returns: The [PublicKey](#publickey) associated with the Keypair.
#### secretKey
```js
let secretKey = keypair.secretKey()
```
- Returns: The Uint8array secret key associated with the Keypair.
### PublicKey
#### from
```js
let publicKey = PublicKey.from(string);
```
- string: A string representing the public key.
- Returns: A new PublicKey instance.
#### z32
```js
let pubky = publicKey.z32();
```
Returns: The z-base-32 encoded string representation of the PublicKey.
### Session
#### pubky
```js
let pubky = session.pubky();
```
Returns an instance of [PublicKey](#publickey)
#### capabilities
```js
let capabilities = session.capabilities();
```
Returns an array of capabilities, for example `["/pub/pubky.app/:rw"]`
### Helper functions
#### createRecoveryFile
```js
let recoveryFile = createRecoveryFile(keypair, passphrase)
```
- keypair: An instance of [Keypair](#keypair).
- passphrase: A utf-8 string [passphrase](https://www.useapassphrase.com/).
- Returns: A recovery file with a spec line and an encrypted secret key.
#### createRecoveryFile
```js
let keypair = decryptRecoveryfile(recoveryFile, passphrase)
```
- recoveryFile: An instance of Uint8Array containing the recovery file blob.
- passphrase: A utf-8 string [passphrase](https://www.useapassphrase.com/).
- Returns: An instance of [Keypair](#keypair).
## Test and Development
For test and development, you can run a local homeserver in a test network.
If you don't have Cargo Installed, start by installing it:
```bash
curl https://sh.rustup.rs -sSf | sh
```
Clone the Pubky repository:
```bash
git clone https://github.com/pubky/pubky
cd pubky/pkg
```
Run the local testnet server
```bash
npm run testnet
```
Use the logged addresses as inputs to `PubkyClient`
```js
import { PubkyClient } from '../index.js'
const client = PubkyClient().testnet();
```

View File

@@ -0,0 +1,41 @@
{
"name": "@synonymdev/pubky",
"type": "module",
"description": "Pubky client",
"version": "0.1.15",
"license": "MIT",
"repository": {
"type": "git",
"url": "https://github.com/pubky/pubky"
},
"scripts": {
"testnet": "cargo run -p pubky_homeserver -- --testnet",
"test": "npm run test-nodejs && npm run test-browser",
"test-nodejs": "tape test/*.js -cov",
"test-browser": "browserify test/*.js -p esmify | npx tape-run",
"build": "cargo run --bin bundle_pubky_npm",
"prepublishOnly": "npm run build && npm run test"
},
"files": [
"index.cjs",
"browser.js",
"pubky.d.ts",
"pubky_bg.wasm"
],
"main": "index.cjs",
"browser": "browser.js",
"types": "pubky.d.ts",
"keywords": [
"web",
"dht",
"dns",
"decentralized",
"identity"
],
"devDependencies": {
"browser-resolve": "^2.0.0",
"esmify": "^2.1.1",
"tape": "^5.8.1",
"tape-run": "^11.0.0"
}
}

View File

@@ -0,0 +1,63 @@
import test from 'tape'
import { PubkyClient, Keypair, PublicKey } from '../index.cjs'
const Homeserver = PublicKey.from('8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo')
test('auth', async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random()
const publicKey = keypair.publicKey()
await client.signup(keypair, Homeserver)
const session = await client.session(publicKey)
t.ok(session, "signup")
{
await client.signout(publicKey)
const session = await client.session(publicKey)
t.notOk(session, "singout")
}
{
await client.signin(keypair)
const session = await client.session(publicKey)
t.ok(session, "signin")
}
})
test("3rd party signin", async (t) => {
let keypair = Keypair.random();
let pubky = keypair.publicKey().z32();
// Third party app side
let capabilities = "/pub/pubky.app/:rw,/pub/foo.bar/file:r";
let client = PubkyClient.testnet();
let [pubkyauth_url, pubkyauthResponse] = client
.authRequest("https://demo.httprelay.io/link", capabilities);
if (globalThis.document) {
// Skip `sendAuthToken` in browser
// TODO: figure out why does it fail in browser unit tests
// but not in real browser (check pubky-auth-widget.js commented part)
return
}
// Authenticator side
{
let client = PubkyClient.testnet();
await client.signup(keypair, Homeserver);
await client.sendAuthToken(keypair, pubkyauth_url)
}
let session = await pubkyauthResponse;
t.is(session.pubky().z32(), pubky)
t.deepEqual(session.capabilities(), capabilities.split(','))
})

View File

@@ -0,0 +1,21 @@
import test from 'tape'
import { Keypair } from '../index.cjs'
test('generate keys from a seed', async (t) => {
const secretkey = Buffer.from('5aa93b299a343aa2691739771f2b5b85e740ca14c685793d67870f88fa89dc51', 'hex')
const keypair = Keypair.fromSecretKey(secretkey)
const publicKey = keypair.publicKey()
t.is(publicKey.z32(), 'gcumbhd7sqit6nn457jxmrwqx9pyymqwamnarekgo3xppqo6a19o')
})
test('fromSecretKey error', async (t) => {
const secretkey = Buffer.from('5aa93b299a343aa2691739771f2b5b', 'hex')
t.throws(() => Keypair.fromSecretKey(null), /Expected secret_key to be an instance of Uint8Array/)
t.throws(() => Keypair.fromSecretKey(secretkey), /Expected secret_key to be 32 bytes, got 15/)
})

View File

@@ -0,0 +1,351 @@
import test from 'tape'
import { PubkyClient, Keypair, PublicKey } from '../index.cjs'
const Homeserver = PublicKey.from('8pinxxgqs41n4aididenw5apqp1urfmzdztr8jt4abrkdn435ewo');
test('public: put/get', async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random();
await client.signup(keypair, Homeserver);
const publicKey = keypair.publicKey();
let url = `pubky://${publicKey.z32()}/pub/example.com/arbitrary`;
const body = Buffer.from(JSON.stringify({ foo: 'bar' }))
// PUT public data, by authorized client
await client.put(url, body);
const otherClient = PubkyClient.testnet();
// GET public data without signup or signin
{
let response = await otherClient.get(url);
t.ok(Buffer.from(response).equals(body))
}
// DELETE public data, by authorized client
await client.delete(url);
// GET public data without signup or signin
{
let response = await otherClient.get(url);
t.notOk(response)
}
})
test("not found", async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random();
await client.signup(keypair, Homeserver);
const publicKey = keypair.publicKey();
let url = `pubky://${publicKey.z32()}/pub/example.com/arbitrary`;
let result = await client.get(url).catch(e => e);
t.notOk(result);
})
test("unauthorized", async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random()
const publicKey = keypair.publicKey()
await client.signup(keypair, Homeserver)
const session = await client.session(publicKey)
t.ok(session, "signup")
await client.signout(publicKey)
const body = Buffer.from(JSON.stringify({ foo: 'bar' }))
let url = `pubky://${publicKey.z32()}/pub/example.com/arbitrary`;
// PUT public data, by authorized client
let result = await client.put(url, body).catch(e => e);
t.ok(result instanceof Error);
t.is(
result.message,
`HTTP status client error (401 Unauthorized) for url (http://localhost:15411/${publicKey.z32()}/pub/example.com/arbitrary)`
)
})
test("forbidden", async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random()
const publicKey = keypair.publicKey()
await client.signup(keypair, Homeserver)
const session = await client.session(publicKey)
t.ok(session, "signup")
const body = Buffer.from(JSON.stringify({ foo: 'bar' }))
let url = `pubky://${publicKey.z32()}/priv/example.com/arbitrary`;
// PUT public data, by authorized client
let result = await client.put(url, body).catch(e => e);
t.ok(result instanceof Error);
t.is(
result.message,
`HTTP status client error (403 Forbidden) for url (http://localhost:15411/${publicKey.z32()}/priv/example.com/arbitrary)`
)
})
test("list", async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random()
const publicKey = keypair.publicKey()
const pubky = publicKey.z32()
await client.signup(keypair, Homeserver)
let urls = [
`pubky://${pubky}/pub/a.wrong/a.txt`,
`pubky://${pubky}/pub/example.com/a.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.wrong/a.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
`pubky://${pubky}/pub/example.com/d.txt`,
`pubky://${pubky}/pub/z.wrong/a.txt`,
]
for (let url of urls) {
await client.put(url, Buffer.from(""));
}
let url = `pubky://${pubky}/pub/example.com/`;
{
let list = await client.list(url);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/a.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
`pubky://${pubky}/pub/example.com/d.txt`,
],
"normal list with no limit or cursor"
);
}
{
let list = await client.list(url, null, null, 2);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/a.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
],
"normal list with limit but no cursor"
);
}
{
let list = await client.list(url, "a.txt", null, 2);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
],
"normal list with limit and a suffix cursor"
);
}
{
let list = await client.list(url, `pubky://${pubky}/pub/example.com/a.txt`, null, 2);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
],
"normal list with limit and a full url cursor"
);
}
{
let list = await client.list(url, null, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/d.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.com/a.txt`,
],
"reverse list with no limit or cursor"
);
}
{
let list = await client.list(url, null, true, 2);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/d.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
],
"reverse list with limit but no cursor"
);
}
{
let list = await client.list(url, "d.txt", true, 2);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.com/c.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
],
"reverse list with limit and a suffix cursor"
);
}
})
test('list shallow', async (t) => {
const client = PubkyClient.testnet();
const keypair = Keypair.random()
const publicKey = keypair.publicKey()
const pubky = publicKey.z32()
await client.signup(keypair, Homeserver)
let urls = [
`pubky://${pubky}/pub/a.com/a.txt`,
`pubky://${pubky}/pub/example.com/a.txt`,
`pubky://${pubky}/pub/example.com/b.txt`,
`pubky://${pubky}/pub/example.com/c.txt`,
`pubky://${pubky}/pub/example.com/d.txt`,
`pubky://${pubky}/pub/example.con/d.txt`,
`pubky://${pubky}/pub/example.con`,
`pubky://${pubky}/pub/file`,
`pubky://${pubky}/pub/file2`,
`pubky://${pubky}/pub/z.com/a.txt`,
]
for (let url of urls) {
await client.put(url, Buffer.from(""));
}
let url = `pubky://${pubky}/pub/`;
{
let list = await client.list(url, null, false, null, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/a.com/`,
`pubky://${pubky}/pub/example.com/`,
`pubky://${pubky}/pub/example.con`,
`pubky://${pubky}/pub/example.con/`,
`pubky://${pubky}/pub/file`,
`pubky://${pubky}/pub/file2`,
`pubky://${pubky}/pub/z.com/`,
],
"normal list shallow"
);
}
{
let list = await client.list(url, null, false, 3, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/a.com/`,
`pubky://${pubky}/pub/example.com/`,
`pubky://${pubky}/pub/example.con`,
],
"normal list shallow with limit"
);
}
{
let list = await client.list(url, `example.com/`, false, null, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/example.con`,
`pubky://${pubky}/pub/example.con/`,
`pubky://${pubky}/pub/file`,
`pubky://${pubky}/pub/file2`,
`pubky://${pubky}/pub/z.com/`,
],
"normal list shallow with cursor"
);
}
{
let list = await client.list(url, null, true, null, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/z.com/`,
`pubky://${pubky}/pub/file2`,
`pubky://${pubky}/pub/file`,
`pubky://${pubky}/pub/example.con/`,
`pubky://${pubky}/pub/example.con`,
`pubky://${pubky}/pub/example.com/`,
`pubky://${pubky}/pub/a.com/`,
],
"normal list shallow"
);
}
{
let list = await client.list(url, null, true, 3, true);
t.deepEqual(
list,
[
`pubky://${pubky}/pub/z.com/`,
`pubky://${pubky}/pub/file2`,
`pubky://${pubky}/pub/file`,
],
"normal list shallow with limit"
);
}
})

View File

@@ -0,0 +1,19 @@
import test from 'tape'
import { Keypair, createRecoveryFile, decryptRecoveryFile } from '../index.cjs'
test('recovery', async (t) => {
const keypair = Keypair.random();
const recoveryFile = createRecoveryFile(keypair, 'very secure password');
t.is(recoveryFile.length, 91)
t.deepEqual(
Array.from(recoveryFile.slice(0, 19)),
[112, 117, 98, 107, 121, 46, 111, 114, 103, 47, 114, 101, 99, 111, 118, 101, 114, 121, 10]
)
const recovered = decryptRecoveryFile(recoveryFile, 'very secure password')
t.is(recovered.publicKey().z32(), keypair.publicKey().z32())
})

View File

@@ -0,0 +1,65 @@
use std::env;
use std::io;
use std::process::{Command, ExitStatus};
// If the process hangs, try `cargo clean` to remove all locks.
fn main() {
println!("Building wasm for pubky...");
build_wasm("nodejs").unwrap();
patch().unwrap();
}
fn build_wasm(target: &str) -> io::Result<ExitStatus> {
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set");
let output = Command::new("wasm-pack")
.args([
"build",
&manifest_dir,
"--release",
"--target",
target,
"--out-dir",
&format!("pkg/{}", target),
])
.output()?;
println!(
"wasm-pack {target} output: {}",
String::from_utf8_lossy(&output.stdout)
);
if !output.status.success() {
eprintln!(
"wasm-pack failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Ok(output.status)
}
fn patch() -> io::Result<ExitStatus> {
let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set");
println!("{manifest_dir}/src/bin/patch.mjs");
let output = Command::new("node")
.args([format!("{manifest_dir}/src/bin/patch.mjs")])
.output()?;
println!(
"patch.mjs output: {}",
String::from_utf8_lossy(&output.stdout)
);
if !output.status.success() {
eprintln!(
"patch.mjs failed: {}",
String::from_utf8_lossy(&output.stderr)
);
}
Ok(output.status)
}

View File

@@ -0,0 +1,66 @@
// This script is used to generate isomorphic code for web and nodejs
//
// Based on hacks from [this issue](https://github.com/rustwasm/wasm-pack/issues/1334)
import { readFile, writeFile, rename } from "node:fs/promises";
import { fileURLToPath } from 'node:url';
import path, { dirname } from 'node:path';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const cargoTomlContent = await readFile(path.join(__dirname, "../../Cargo.toml"), "utf8");
const cargoPackageName = /\[package\]\nname = "(.*?)"/.exec(cargoTomlContent)[1]
const name = cargoPackageName.replace(/-/g, '_')
const content = await readFile(path.join(__dirname, `../../pkg/nodejs/${name}.js`), "utf8");
const patched = content
// use global TextDecoder TextEncoder
.replace("require(`util`)", "globalThis")
// attach to `imports` instead of module.exports
.replace("= module.exports", "= imports")
// Export classes
.replace(/\nclass (.*?) \{/g, "\n export class $1 {")
// Export functions
.replace(/\nmodule.exports.(.*?) = function/g, "\nimports.$1 = $1;\nexport function $1")
// Add exports to 'imports'
.replace(/\nmodule\.exports\.(.*?)\s+/g, "\nimports.$1")
// Export default
.replace(/$/, 'export default imports')
// inline wasm bytes
.replace(
/\nconst path.*\nconst bytes.*\n/,
`
var __toBinary = /* @__PURE__ */ (() => {
var table = new Uint8Array(128);
for (var i = 0; i < 64; i++)
table[i < 26 ? i + 65 : i < 52 ? i + 71 : i < 62 ? i - 4 : i * 4 - 205] = i;
return (base64) => {
var n = base64.length, bytes = new Uint8Array((n - (base64[n - 1] == "=") - (base64[n - 2] == "=")) * 3 / 4 | 0);
for (var i2 = 0, j = 0; i2 < n; ) {
var c0 = table[base64.charCodeAt(i2++)], c1 = table[base64.charCodeAt(i2++)];
var c2 = table[base64.charCodeAt(i2++)], c3 = table[base64.charCodeAt(i2++)];
bytes[j++] = c0 << 2 | c1 >> 4;
bytes[j++] = c1 << 4 | c2 >> 2;
bytes[j++] = c2 << 6 | c3;
}
return bytes;
};
})();
const bytes = __toBinary(${JSON.stringify(await readFile(path.join(__dirname, `../../pkg/nodejs/${name}_bg.wasm`), "base64"))
});
`,
);
await writeFile(path.join(__dirname, `../../pkg/browser.js`), patched + "\nglobalThis['pubky'] = imports");
// Move outside of nodejs
await Promise.all([".js", ".d.ts", "_bg.wasm"].map(suffix =>
rename(
path.join(__dirname, `../../pkg/nodejs/${name}${suffix}`),
path.join(__dirname, `../../pkg/${suffix === '.js' ? "index.cjs" : (name + suffix)}`),
))
)

View File

@@ -0,0 +1,56 @@
//! Main Crate Error
use pkarr::dns::SimpleDnsError;
// Alias Result to be the crate Result.
pub type Result<T, E = Error> = core::result::Result<T, E>;
#[derive(thiserror::Error, Debug)]
/// Pk common Error
pub enum Error {
/// For starter, to remove as code matures.
#[error("Generic error: {0}")]
Generic(String),
#[error("Could not resolve endpoint for {0}")]
ResolveEndpoint(String),
#[error("Could not convert the passed type into a Url")]
InvalidUrl,
// === Transparent ===
#[error(transparent)]
Dns(#[from] SimpleDnsError),
#[error(transparent)]
Pkarr(#[from] pkarr::Error),
#[error(transparent)]
Url(#[from] url::ParseError),
#[error(transparent)]
Reqwest(#[from] reqwest::Error),
#[error(transparent)]
Session(#[from] pubky_common::session::Error),
#[error(transparent)]
Crypto(#[from] pubky_common::crypto::Error),
#[error(transparent)]
RecoveryFile(#[from] pubky_common::recovery_file::Error),
#[error(transparent)]
AuthToken(#[from] pubky_common::auth::Error),
}
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::JsValue;
#[cfg(target_arch = "wasm32")]
impl From<Error> for JsValue {
fn from(error: Error) -> JsValue {
let error_message = error.to_string();
js_sys::Error::new(&error_message).into()
}
}

View File

@@ -0,0 +1,36 @@
mod error;
mod shared;
#[cfg(not(target_arch = "wasm32"))]
mod native;
#[cfg(target_arch = "wasm32")]
mod wasm;
#[cfg(target_arch = "wasm32")]
use std::{
collections::HashSet,
sync::{Arc, RwLock},
};
use wasm_bindgen::prelude::*;
#[cfg(not(target_arch = "wasm32"))]
use ::pkarr::PkarrClientAsync;
pub use error::Error;
#[cfg(not(target_arch = "wasm32"))]
pub use crate::shared::list_builder::ListBuilder;
#[derive(Debug, Clone)]
#[wasm_bindgen]
pub struct PubkyClient {
http: reqwest::Client,
#[cfg(not(target_arch = "wasm32"))]
pub(crate) pkarr: PkarrClientAsync,
/// A cookie jar for nodejs fetch.
#[cfg(target_arch = "wasm32")]
pub(crate) session_cookies: Arc<RwLock<HashSet<String>>>,
#[cfg(target_arch = "wasm32")]
pub(crate) pkarr_relays: Vec<String>,
}

View File

@@ -0,0 +1,251 @@
use std::net::ToSocketAddrs;
use std::time::Duration;
use bytes::Bytes;
use pubky_common::{
capabilities::Capabilities,
recovery_file::{create_recovery_file, decrypt_recovery_file},
session::Session,
};
use reqwest::{RequestBuilder, Response};
use tokio::sync::oneshot;
use url::Url;
use pkarr::Keypair;
use ::pkarr::{mainline::dht::Testnet, PkarrClient, PublicKey, SignedPacket};
use crate::{
error::{Error, Result},
shared::list_builder::ListBuilder,
PubkyClient,
};
static DEFAULT_USER_AGENT: &str = concat!(env!("CARGO_PKG_NAME"), "/", env!("CARGO_PKG_VERSION"),);
#[derive(Debug, Default)]
pub struct PubkyClientBuilder {
pkarr_settings: pkarr::Settings,
}
impl PubkyClientBuilder {
/// Set Pkarr client [pkarr::Settings].
pub fn pkarr_settings(mut self, settings: pkarr::Settings) -> Self {
self.pkarr_settings = settings;
self
}
/// Use the bootstrap nodes of a testnet, as the bootstrap nodes and
/// resolvers in the internal Pkarr client.
pub fn testnet(mut self, testnet: &Testnet) -> Self {
self.pkarr_settings.dht.bootstrap = testnet.bootstrap.to_vec().into();
self.pkarr_settings.resolvers = testnet
.bootstrap
.iter()
.flat_map(|resolver| resolver.to_socket_addrs())
.flatten()
.collect::<Vec<_>>()
.into();
self
}
/// Set the request_timeout of the UDP socket in the Mainline DHT client in
/// the internal Pkarr client.
///
/// Useful to speed unit tests.
/// Defaults to 2 seconds.
pub fn dht_request_timeout(mut self, timeout: Duration) -> Self {
self.pkarr_settings.dht.request_timeout = timeout.into();
self
}
/// Build [PubkyClient]
pub fn build(self) -> PubkyClient {
PubkyClient {
http: reqwest::Client::builder()
.cookie_store(true)
.user_agent(DEFAULT_USER_AGENT)
.build()
.unwrap(),
pkarr: PkarrClient::new(self.pkarr_settings).unwrap().as_async(),
}
}
}
impl Default for PubkyClient {
fn default() -> Self {
PubkyClient::builder().build()
}
}
// === Public API ===
impl PubkyClient {
/// Returns a builder to edit settings before creating [PubkyClient].
pub fn builder() -> PubkyClientBuilder {
PubkyClientBuilder::default()
}
/// Create a client connected to the local network
/// with the bootstrapping node: `localhost:6881`
pub fn testnet() -> Self {
Self::test(&Testnet {
bootstrap: vec!["localhost:6881".to_string()],
nodes: vec![],
})
}
/// Creates a [PubkyClient] with:
/// - DHT bootstrap nodes set to the `testnet` bootstrap nodes.
/// - DHT request timout set to 500 milliseconds. (unless in CI, then it is left as default 2000)
///
/// For more control, you can use [PubkyClientBuilder::testnet]
pub fn test(testnet: &Testnet) -> PubkyClient {
let mut builder = PubkyClient::builder().testnet(testnet);
if std::env::var("CI").is_err() {
builder = builder.dht_request_timeout(Duration::from_millis(500));
}
builder.build()
}
// === Auth ===
/// Signup to a homeserver and update Pkarr accordingly.
///
/// The homeserver is a Pkarr domain name, where the TLD is a Pkarr public key
/// for example "pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy"
pub async fn signup(&self, keypair: &Keypair, homeserver: &PublicKey) -> Result<Session> {
self.inner_signup(keypair, homeserver).await
}
/// Check the current sesison for a given Pubky in its homeserver.
///
/// Returns [Session] or `None` (if recieved `404 NOT_FOUND`),
/// or [reqwest::Error] if the response has any other `>=400` status code.
pub async fn session(&self, pubky: &PublicKey) -> Result<Option<Session>> {
self.inner_session(pubky).await
}
/// Signout from a homeserver.
pub async fn signout(&self, pubky: &PublicKey) -> Result<()> {
self.inner_signout(pubky).await
}
/// Signin to a homeserver.
pub async fn signin(&self, keypair: &Keypair) -> Result<Session> {
self.inner_signin(keypair).await
}
// === Public data ===
/// Upload a small payload to a given path.
pub async fn put<T: TryInto<Url>>(&self, url: T, content: &[u8]) -> Result<()> {
self.inner_put(url, content).await
}
/// Download a small payload from a given path relative to a pubky author.
pub async fn get<T: TryInto<Url>>(&self, url: T) -> Result<Option<Bytes>> {
self.inner_get(url).await
}
/// Delete a file at a path relative to a pubky author.
pub async fn delete<T: TryInto<Url>>(&self, url: T) -> Result<()> {
self.inner_delete(url).await
}
/// Returns a [ListBuilder] to help pass options before calling [ListBuilder::send].
///
/// `url` sets the path you want to lest within.
pub fn list<T: TryInto<Url>>(&self, url: T) -> Result<ListBuilder> {
self.inner_list(url)
}
// === Helpers ===
/// Create a recovery file of the `keypair`, containing the secret key encrypted
/// using the `passphrase`.
pub fn create_recovery_file(keypair: &Keypair, passphrase: &str) -> Result<Vec<u8>> {
Ok(create_recovery_file(keypair, passphrase)?)
}
/// Recover a keypair from a recovery file by decrypting the secret key using `passphrase`.
pub fn decrypt_recovery_file(recovery_file: &[u8], passphrase: &str) -> Result<Keypair> {
Ok(decrypt_recovery_file(recovery_file, passphrase)?)
}
/// Return `pubkyauth://` url and wait for the incoming [AuthToken]
/// verifying that AuthToken, and if capabilities were requested, signing in to
/// the Pubky's homeserver and returning the [Session] information.
pub fn auth_request(
&self,
relay: impl TryInto<Url>,
capabilities: &Capabilities,
) -> Result<(Url, tokio::sync::oneshot::Receiver<Option<Session>>)> {
let mut relay: Url = relay
.try_into()
.map_err(|_| Error::Generic("Invalid relay Url".into()))?;
let (pubkyauth_url, client_secret) = self.create_auth_request(&mut relay, capabilities)?;
let (tx, rx) = oneshot::channel::<Option<Session>>();
let this = self.clone();
tokio::spawn(async move {
let to_send = this
.subscribe_to_auth_response(relay, &client_secret)
.await?;
tx.send(to_send)
.map_err(|_| Error::Generic("Failed to send the session after signing in with token, since the receiver is dropped".into()))?;
Ok::<(), Error>(())
});
Ok((pubkyauth_url, rx))
}
/// Sign an [pubky_common::auth::AuthToken], encrypt it and send it to the
/// source of the pubkyauth request url.
pub async fn send_auth_token<T: TryInto<Url>>(
&self,
keypair: &Keypair,
pubkyauth_url: T,
) -> Result<()> {
let url: Url = pubkyauth_url.try_into().map_err(|_| Error::InvalidUrl)?;
self.inner_send_auth_token(keypair, url).await?;
Ok(())
}
}
// === Internals ===
impl PubkyClient {
// === Pkarr ===
pub(crate) async fn pkarr_resolve(
&self,
public_key: &PublicKey,
) -> Result<Option<SignedPacket>> {
Ok(self.pkarr.resolve(public_key).await?)
}
pub(crate) async fn pkarr_publish(&self, signed_packet: &SignedPacket) -> Result<()> {
Ok(self.pkarr.publish(signed_packet).await?)
}
// === HTTP ===
pub(crate) fn request(&self, method: reqwest::Method, url: Url) -> RequestBuilder {
self.http.request(method, url)
}
pub(crate) fn store_session(&self, _: &Response) {}
pub(crate) fn remove_session(&self, _: &PublicKey) {}
}

View File

@@ -0,0 +1,343 @@
use std::collections::HashMap;
use base64::{alphabet::URL_SAFE, engine::general_purpose::NO_PAD, Engine};
use reqwest::{Method, StatusCode};
use url::Url;
use pkarr::{Keypair, PublicKey};
use pubky_common::{
auth::AuthToken,
capabilities::{Capabilities, Capability},
crypto::{decrypt, encrypt, hash, random_bytes},
session::Session,
};
use crate::{
error::{Error, Result},
PubkyClient,
};
use super::pkarr::Endpoint;
impl PubkyClient {
/// Signup to a homeserver and update Pkarr accordingly.
///
/// The homeserver is a Pkarr domain name, where the TLD is a Pkarr public key
/// for example "pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy"
pub(crate) async fn inner_signup(
&self,
keypair: &Keypair,
homeserver: &PublicKey,
) -> Result<Session> {
let homeserver = homeserver.to_string();
let Endpoint { mut url, .. } = self.resolve_endpoint(&homeserver).await?;
url.set_path("/signup");
let body = AuthToken::sign(keypair, vec![Capability::root()]).serialize();
let response = self
.request(Method::POST, url.clone())
.body(body)
.send()
.await?;
self.store_session(&response);
self.publish_pubky_homeserver(keypair, &homeserver).await?;
let bytes = response.bytes().await?;
Ok(Session::deserialize(&bytes)?)
}
/// Check the current sesison for a given Pubky in its homeserver.
///
/// Returns None if not signed in, or [reqwest::Error]
/// if the response has any other `>=404` status code.
pub(crate) async fn inner_session(&self, pubky: &PublicKey) -> Result<Option<Session>> {
let Endpoint { mut url, .. } = self.resolve_pubky_homeserver(pubky).await?;
url.set_path(&format!("/{}/session", pubky));
let res = self.request(Method::GET, url).send().await?;
if res.status() == StatusCode::NOT_FOUND {
return Ok(None);
}
if !res.status().is_success() {
res.error_for_status_ref()?;
};
let bytes = res.bytes().await?;
Ok(Some(Session::deserialize(&bytes)?))
}
/// Signout from a homeserver.
pub(crate) async fn inner_signout(&self, pubky: &PublicKey) -> Result<()> {
let Endpoint { mut url, .. } = self.resolve_pubky_homeserver(pubky).await?;
url.set_path(&format!("/{}/session", pubky));
self.request(Method::DELETE, url).send().await?;
self.remove_session(pubky);
Ok(())
}
/// Signin to a homeserver.
pub(crate) async fn inner_signin(&self, keypair: &Keypair) -> Result<Session> {
let token = AuthToken::sign(keypair, vec![Capability::root()]);
self.signin_with_authtoken(&token).await
}
pub(crate) async fn inner_send_auth_token(
&self,
keypair: &Keypair,
pubkyauth_url: Url,
) -> Result<()> {
let query_params: HashMap<String, String> =
pubkyauth_url.query_pairs().into_owned().collect();
let relay = query_params
.get("relay")
.map(|r| url::Url::parse(r).expect("Relay query param to be valid URL"))
.expect("Missing relay query param");
let client_secret = query_params
.get("secret")
.map(|s| {
let engine = base64::engine::GeneralPurpose::new(&URL_SAFE, NO_PAD);
let bytes = engine.decode(s).expect("invalid client_secret");
let arr: [u8; 32] = bytes.try_into().expect("invalid client_secret");
arr
})
.expect("Missing client secret");
let capabilities = query_params
.get("caps")
.map(|caps_string| {
caps_string
.split(',')
.filter_map(|cap| Capability::try_from(cap).ok())
.collect::<Vec<_>>()
})
.unwrap_or_default();
let token = AuthToken::sign(keypair, capabilities);
let encrypted_token = encrypt(&token.serialize(), &client_secret)?;
let engine = base64::engine::GeneralPurpose::new(&URL_SAFE, NO_PAD);
let mut callback = relay.clone();
let mut path_segments = callback.path_segments_mut().unwrap();
path_segments.pop_if_empty();
let channel_id = engine.encode(hash(&client_secret).as_bytes());
path_segments.push(&channel_id);
drop(path_segments);
self.request(Method::POST, callback)
.body(encrypted_token)
.send()
.await?;
Ok(())
}
pub async fn inner_third_party_signin(
&self,
encrypted_token: &[u8],
client_secret: &[u8; 32],
) -> Result<PublicKey> {
let decrypted = decrypt(encrypted_token, client_secret)?;
let token = AuthToken::deserialize(&decrypted)?;
self.signin_with_authtoken(&token).await?;
Ok(token.pubky().to_owned())
}
pub async fn signin_with_authtoken(&self, token: &AuthToken) -> Result<Session> {
let mut url = Url::parse(&format!("https://{}/session", token.pubky()))?;
self.resolve_url(&mut url).await?;
let response = self
.request(Method::POST, url)
.body(token.serialize())
.send()
.await?;
self.store_session(&response);
let bytes = response.bytes().await?;
Ok(Session::deserialize(&bytes)?)
}
pub(crate) fn create_auth_request(
&self,
relay: &mut Url,
capabilities: &Capabilities,
) -> Result<(Url, [u8; 32])> {
let engine = base64::engine::GeneralPurpose::new(&URL_SAFE, NO_PAD);
let client_secret: [u8; 32] = random_bytes::<32>();
let pubkyauth_url = Url::parse(&format!(
"pubkyauth:///?caps={capabilities}&secret={}&relay={relay}",
engine.encode(client_secret)
))?;
let mut segments = relay
.path_segments_mut()
.map_err(|_| Error::Generic("Invalid relay".into()))?;
// remove trailing slash if any.
segments.pop_if_empty();
let channel_id = &engine.encode(hash(&client_secret).as_bytes());
segments.push(channel_id);
drop(segments);
Ok((pubkyauth_url, client_secret))
}
pub(crate) async fn subscribe_to_auth_response(
&self,
relay: Url,
client_secret: &[u8; 32],
) -> Result<Option<Session>> {
let response = self.http.request(Method::GET, relay).send().await?;
let encrypted_token = response.bytes().await?;
let token_bytes = decrypt(&encrypted_token, client_secret)?;
let token = AuthToken::verify(&token_bytes)?;
if token.capabilities().is_empty() {
Ok(None)
} else {
let session = self.signin_with_authtoken(&token).await?;
Ok(Some(session))
}
}
}
#[cfg(test)]
mod tests {
use crate::*;
use pkarr::{mainline::Testnet, Keypair};
use pubky_common::capabilities::{Capabilities, Capability};
use pubky_homeserver::Homeserver;
use reqwest::StatusCode;
#[tokio::test]
async fn basic_authn() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let session = client
.session(&keypair.public_key())
.await
.unwrap()
.unwrap();
assert!(session.capabilities().contains(&Capability::root()));
client.signout(&keypair.public_key()).await.unwrap();
{
let session = client.session(&keypair.public_key()).await.unwrap();
assert!(session.is_none());
}
client.signin(&keypair).await.unwrap();
{
let session = client
.session(&keypair.public_key())
.await
.unwrap()
.unwrap();
assert_eq!(session.pubky(), &keypair.public_key());
assert!(session.capabilities().contains(&Capability::root()));
}
}
#[tokio::test]
async fn authz() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let keypair = Keypair::random();
let pubky = keypair.public_key();
// Third party app side
let capabilities: Capabilities =
"/pub/pubky.app/:rw,/pub/foo.bar/file:r".try_into().unwrap();
let client = PubkyClient::test(&testnet);
let (pubkyauth_url, pubkyauth_response) = client
.auth_request("https://demo.httprelay.io/link", &capabilities)
.unwrap();
// Authenticator side
{
let client = PubkyClient::test(&testnet);
client.signup(&keypair, &server.public_key()).await.unwrap();
client
.send_auth_token(&keypair, pubkyauth_url)
.await
.unwrap();
}
let session = pubkyauth_response.await.unwrap().unwrap();
assert_eq!(session.pubky(), &pubky);
assert_eq!(session.capabilities(), &capabilities.0);
// Test access control enforcement
client
.put(format!("pubky://{pubky}/pub/pubky.app/foo").as_str(), &[])
.await
.unwrap();
assert_eq!(
client
.put(format!("pubky://{pubky}/pub/pubky.app").as_str(), &[])
.await
.map_err(|e| match e {
crate::Error::Reqwest(e) => e.status(),
_ => None,
}),
Err(Some(StatusCode::FORBIDDEN))
);
assert_eq!(
client
.put(format!("pubky://{pubky}/pub/foo.bar/file").as_str(), &[])
.await
.map_err(|e| match e {
crate::Error::Reqwest(e) => e.status(),
_ => None,
}),
Err(Some(StatusCode::FORBIDDEN))
);
}
}

View File

@@ -0,0 +1,105 @@
use reqwest::Method;
use url::Url;
use crate::{error::Result, PubkyClient};
#[derive(Debug)]
pub struct ListBuilder<'a> {
url: Url,
reverse: bool,
limit: Option<u16>,
cursor: Option<&'a str>,
client: &'a PubkyClient,
shallow: bool,
}
impl<'a> ListBuilder<'a> {
/// Create a new List request builder
pub(crate) fn new(client: &'a PubkyClient, url: Url) -> Self {
Self {
client,
url,
limit: None,
cursor: None,
reverse: false,
shallow: false,
}
}
/// Set the `reverse` option.
pub fn reverse(mut self, reverse: bool) -> Self {
self.reverse = reverse;
self
}
/// Set the `limit` value.
pub fn limit(mut self, limit: u16) -> Self {
self.limit = limit.into();
self
}
/// Set the `cursor` value.
///
/// Either a full `pubky://` Url (from previous list response),
/// or a path (to a file or directory) relative to the `url`
pub fn cursor(mut self, cursor: &'a str) -> Self {
self.cursor = cursor.into();
self
}
pub fn shallow(mut self, shallow: bool) -> Self {
self.shallow = shallow;
self
}
/// Send the list request.
///
/// Returns a list of Pubky URLs of the files in the path of the `url`
/// respecting [ListBuilder::reverse], [ListBuilder::limit] and [ListBuilder::cursor]
/// options.
pub async fn send(self) -> Result<Vec<String>> {
let mut url = self.client.pubky_to_http(self.url).await?;
if !url.path().ends_with('/') {
let path = url.path().to_string();
let mut parts = path.split('/').collect::<Vec<&str>>();
parts.pop();
let path = format!("{}/", parts.join("/"));
url.set_path(&path)
}
let mut query = url.query_pairs_mut();
if self.reverse {
query.append_key_only("reverse");
}
if self.shallow {
query.append_key_only("shallow");
}
if let Some(limit) = self.limit {
query.append_pair("limit", &limit.to_string());
}
if let Some(cursor) = self.cursor {
query.append_pair("cursor", cursor);
}
drop(query);
let response = self.client.request(Method::GET, url).send().await?;
response.error_for_status_ref()?;
// TODO: bail on too large files.
let bytes = response.bytes().await?;
Ok(String::from_utf8_lossy(&bytes)
.lines()
.map(String::from)
.collect())
}
}

View File

@@ -0,0 +1,4 @@
pub mod auth;
pub mod list_builder;
pub mod pkarr;
pub mod public;

View File

@@ -0,0 +1,339 @@
use url::Url;
use pkarr::{
dns::{rdata::SVCB, Packet},
Keypair, PublicKey, SignedPacket,
};
use crate::{
error::{Error, Result},
PubkyClient,
};
const MAX_ENDPOINT_RESOLUTION_RECURSION: u8 = 3;
impl PubkyClient {
/// Publish the SVCB record for `_pubky.<public_key>`.
pub(crate) async fn publish_pubky_homeserver(
&self,
keypair: &Keypair,
host: &str,
) -> Result<()> {
let existing = self.pkarr_resolve(&keypair.public_key()).await?;
let mut packet = Packet::new_reply(0);
if let Some(existing) = existing {
for answer in existing.packet().answers.iter().cloned() {
if !answer.name.to_string().starts_with("_pubky") {
packet.answers.push(answer.into_owned())
}
}
}
let svcb = SVCB::new(0, host.try_into()?);
packet.answers.push(pkarr::dns::ResourceRecord::new(
"_pubky".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::SVCB(svcb),
));
let signed_packet = SignedPacket::from_packet(keypair, &packet)?;
self.pkarr_publish(&signed_packet).await?;
Ok(())
}
/// Resolve the homeserver for a pubky.
pub(crate) async fn resolve_pubky_homeserver(&self, pubky: &PublicKey) -> Result<Endpoint> {
let target = format!("_pubky.{pubky}");
self.resolve_endpoint(&target)
.await
.map_err(|_| Error::Generic("Could not resolve homeserver".to_string()))
}
/// Resolve a service's public_key and "non-pkarr url" from a Pubky domain
///
/// "non-pkarr" url is any URL where the hostname isn't a 52 z-base32 character,
/// usually an IPv4, IPv6 or ICANN domain, but could also be any other unknown hostname.
///
/// Recursively resolve SVCB and HTTPS endpoints, with [MAX_ENDPOINT_RESOLUTION_RECURSION] limit.
pub(crate) async fn resolve_endpoint(&self, target: &str) -> Result<Endpoint> {
let original_target = target;
// TODO: cache the result of this function?
let mut target = target.to_string();
let mut endpoint_public_key = None;
let mut origin = target.clone();
let mut step = 0;
// PublicKey is very good at extracting the Pkarr TLD from a string.
while let Ok(public_key) = PublicKey::try_from(target.clone()) {
if step >= MAX_ENDPOINT_RESOLUTION_RECURSION {
break;
};
step += 1;
if let Some(signed_packet) = self
.pkarr_resolve(&public_key)
.await
.map_err(|_| Error::ResolveEndpoint(original_target.into()))?
{
// Choose most prior SVCB record
let svcb = signed_packet.resource_records(&target).fold(
None,
|prev: Option<SVCB>, answer| {
if let Some(svcb) = match &answer.rdata {
pkarr::dns::rdata::RData::SVCB(svcb) => Some(svcb),
pkarr::dns::rdata::RData::HTTPS(curr) => Some(&curr.0),
_ => None,
} {
let curr = svcb.clone();
if curr.priority == 0 {
return Some(curr);
}
if let Some(prev) = &prev {
// TODO return random if priority is the same
if curr.priority >= prev.priority {
return Some(curr);
}
} else {
return Some(curr);
}
}
prev
},
);
if let Some(svcb) = svcb {
endpoint_public_key = Some(public_key.clone());
target = svcb.target.to_string();
if let Some(port) = svcb.get_param(pkarr::dns::rdata::SVCB::PORT) {
if port.len() < 2 {
// TODO: debug! Error encoding port!
}
let port = u16::from_be_bytes([port[0], port[1]]);
origin = format!("{target}:{port}");
} else {
origin.clone_from(&target);
};
if step >= MAX_ENDPOINT_RESOLUTION_RECURSION {
continue;
};
}
} else {
break;
}
}
if PublicKey::try_from(origin.as_str()).is_ok() {
return Err(Error::ResolveEndpoint(original_target.into()));
}
if let Some(public_key) = endpoint_public_key {
let url = Url::parse(&format!(
"{}://{}",
if origin.starts_with("localhost") {
"http"
} else {
"https"
},
origin
))?;
return Ok(Endpoint { public_key, url });
}
Err(Error::ResolveEndpoint(original_target.into()))
}
pub(crate) async fn resolve_url(&self, url: &mut Url) -> Result<()> {
if let Some(Ok(pubky)) = url.host_str().map(PublicKey::try_from) {
let Endpoint { url: x, .. } = self.resolve_endpoint(&format!("_pubky.{pubky}")).await?;
url.set_host(x.host_str())?;
url.set_port(x.port()).expect("should work!");
url.set_scheme(x.scheme()).expect("should work!");
};
Ok(())
}
}
#[derive(Debug)]
pub(crate) struct Endpoint {
// TODO: we don't use this at all?
pub public_key: PublicKey,
pub url: Url,
}
#[cfg(test)]
mod tests {
use super::*;
use pkarr::{
dns::{
rdata::{HTTPS, SVCB},
Packet,
},
mainline::{dht::DhtSettings, Testnet},
Keypair, PkarrClient, Settings, SignedPacket,
};
use pubky_homeserver::Homeserver;
#[tokio::test]
async fn resolve_endpoint_https() {
let testnet = Testnet::new(10);
let pkarr_client = PkarrClient::new(Settings {
dht: DhtSettings {
bootstrap: Some(testnet.bootstrap.clone()),
..Default::default()
},
..Default::default()
})
.unwrap()
.as_async();
let domain = "example.com";
let mut target;
// Server
{
let keypair = Keypair::random();
let https = HTTPS(SVCB::new(0, domain.try_into().unwrap()));
let mut packet = Packet::new_reply(0);
packet.answers.push(pkarr::dns::ResourceRecord::new(
"foo".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::HTTPS(https),
));
let signed_packet = SignedPacket::from_packet(&keypair, &packet).unwrap();
pkarr_client.publish(&signed_packet).await.unwrap();
target = format!("foo.{}", keypair.public_key());
}
// intermediate
{
let keypair = Keypair::random();
let svcb = SVCB::new(0, target.as_str().try_into().unwrap());
let mut packet = Packet::new_reply(0);
packet.answers.push(pkarr::dns::ResourceRecord::new(
"bar".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::SVCB(svcb),
));
let signed_packet = SignedPacket::from_packet(&keypair, &packet).unwrap();
pkarr_client.publish(&signed_packet).await.unwrap();
target = format!("bar.{}", keypair.public_key())
}
{
let keypair = Keypair::random();
let svcb = SVCB::new(0, target.as_str().try_into().unwrap());
let mut packet = Packet::new_reply(0);
packet.answers.push(pkarr::dns::ResourceRecord::new(
"pubky".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::SVCB(svcb),
));
let signed_packet = SignedPacket::from_packet(&keypair, &packet).unwrap();
pkarr_client.publish(&signed_packet).await.unwrap();
target = format!("pubky.{}", keypair.public_key())
}
let client = PubkyClient::test(&testnet);
let endpoint = client.resolve_endpoint(&target).await.unwrap();
assert_eq!(endpoint.url.host_str().unwrap(), domain);
}
#[tokio::test]
async fn resolve_homeserver() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
// Publish an intermediate controller of the homeserver
let pkarr_client = PkarrClient::new(Settings {
dht: DhtSettings {
bootstrap: Some(testnet.bootstrap.clone()),
..Default::default()
},
..Default::default()
})
.unwrap()
.as_async();
let intermediate = Keypair::random();
let mut packet = Packet::new_reply(0);
let server_tld = server.public_key().to_string();
let svcb = SVCB::new(0, server_tld.as_str().try_into().unwrap());
packet.answers.push(pkarr::dns::ResourceRecord::new(
"pubky".try_into().unwrap(),
pkarr::dns::CLASS::IN,
60 * 60,
pkarr::dns::rdata::RData::SVCB(svcb),
));
let signed_packet = SignedPacket::from_packet(&intermediate, &packet).unwrap();
pkarr_client.publish(&signed_packet).await.unwrap();
{
let client = PubkyClient::test(&testnet);
let pubky = Keypair::random();
client
.publish_pubky_homeserver(&pubky, &format!("pubky.{}", &intermediate.public_key()))
.await
.unwrap();
let Endpoint { public_key, url } = client
.resolve_pubky_homeserver(&pubky.public_key())
.await
.unwrap();
assert_eq!(public_key, server.public_key());
assert_eq!(url.host_str(), Some("localhost"));
assert_eq!(url.port(), Some(server.port()));
}
}
}

View File

@@ -0,0 +1,768 @@
use bytes::Bytes;
use pkarr::PublicKey;
use reqwest::{Method, StatusCode};
use url::Url;
use crate::{
error::{Error, Result},
PubkyClient,
};
use super::{list_builder::ListBuilder, pkarr::Endpoint};
impl PubkyClient {
pub(crate) async fn inner_put<T: TryInto<Url>>(&self, url: T, content: &[u8]) -> Result<()> {
let url = self.pubky_to_http(url).await?;
let response = self
.request(Method::PUT, url)
.body(content.to_owned())
.send()
.await?;
response.error_for_status()?;
Ok(())
}
pub(crate) async fn inner_get<T: TryInto<Url>>(&self, url: T) -> Result<Option<Bytes>> {
let url = self.pubky_to_http(url).await?;
let response = self.request(Method::GET, url).send().await?;
if response.status() == StatusCode::NOT_FOUND {
return Ok(None);
}
response.error_for_status_ref()?;
// TODO: bail on too large files.
let bytes = response.bytes().await?;
Ok(Some(bytes))
}
pub(crate) async fn inner_delete<T: TryInto<Url>>(&self, url: T) -> Result<()> {
let url = self.pubky_to_http(url).await?;
let response = self.request(Method::DELETE, url).send().await?;
response.error_for_status_ref()?;
Ok(())
}
pub(crate) fn inner_list<T: TryInto<Url>>(&self, url: T) -> Result<ListBuilder> {
Ok(ListBuilder::new(
self,
url.try_into().map_err(|_| Error::InvalidUrl)?,
))
}
pub(crate) async fn pubky_to_http<T: TryInto<Url>>(&self, url: T) -> Result<Url> {
let original_url: Url = url.try_into().map_err(|_| Error::InvalidUrl)?;
let pubky = original_url
.host_str()
.ok_or(Error::Generic("Missing Pubky Url host".to_string()))?;
if let Ok(public_key) = PublicKey::try_from(pubky) {
let Endpoint { mut url, .. } = self.resolve_pubky_homeserver(&public_key).await?;
// TODO: remove if we move to subdomains instead of paths.
if original_url.scheme() == "pubky" {
let path = original_url.path_segments();
let mut split = url.path_segments_mut().unwrap();
split.push(pubky);
if let Some(segments) = path {
for segment in segments {
split.push(segment);
}
}
drop(split);
}
return Ok(url);
}
Ok(original_url)
}
}
#[cfg(test)]
mod tests {
use core::panic;
use crate::*;
use pkarr::{mainline::Testnet, Keypair};
use pubky_homeserver::Homeserver;
use reqwest::{Method, StatusCode};
#[tokio::test]
async fn put_get_delete() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let url = format!("pubky://{}/pub/foo.txt", keypair.public_key());
let url = url.as_str();
client.put(url, &[0, 1, 2, 3, 4]).await.unwrap();
let response = client.get(url).await.unwrap().unwrap();
assert_eq!(response, bytes::Bytes::from(vec![0, 1, 2, 3, 4]));
client.delete(url).await.unwrap();
let response = client.get(url).await.unwrap();
assert_eq!(response, None);
}
#[tokio::test]
async fn unauthorized_put_delete() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let public_key = keypair.public_key();
let url = format!("pubky://{public_key}/pub/foo.txt");
let url = url.as_str();
let other_client = PubkyClient::test(&testnet);
{
let other = Keypair::random();
// TODO: remove extra client after switching to subdomains.
other_client
.signup(&other, &server.public_key())
.await
.unwrap();
let response = other_client.put(url, &[0, 1, 2, 3, 4]).await;
match response {
Err(Error::Reqwest(error)) => {
assert!(error.status() == Some(StatusCode::UNAUTHORIZED))
}
_ => {
panic!("expected error StatusCode::UNAUTHORIZED")
}
}
}
client.put(url, &[0, 1, 2, 3, 4]).await.unwrap();
{
let other = Keypair::random();
// TODO: remove extra client after switching to subdomains.
other_client
.signup(&other, &server.public_key())
.await
.unwrap();
let response = other_client.delete(url).await;
match response {
Err(Error::Reqwest(error)) => {
assert!(error.status() == Some(StatusCode::UNAUTHORIZED))
}
_ => {
panic!("expected error StatusCode::UNAUTHORIZED")
}
}
}
let response = client.get(url).await.unwrap().unwrap();
assert_eq!(response, bytes::Bytes::from(vec![0, 1, 2, 3, 4]));
}
#[tokio::test]
async fn list() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let pubky = keypair.public_key();
let urls = vec![
format!("pubky://{pubky}/pub/a.wrong/a.txt"),
format!("pubky://{pubky}/pub/example.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
format!("pubky://{pubky}/pub/example.wrong/a.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
format!("pubky://{pubky}/pub/example.com/d.txt"),
format!("pubky://{pubky}/pub/z.wrong/a.txt"),
];
for url in urls {
client.put(url.as_str(), &[0]).await.unwrap();
}
let url = format!("pubky://{pubky}/pub/example.com/extra");
let url = url.as_str();
{
let list = client.list(url).unwrap().send().await.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
format!("pubky://{pubky}/pub/example.com/d.txt"),
],
"normal list with no limit or cursor"
);
}
{
let list = client.list(url).unwrap().limit(2).send().await.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
],
"normal list with limit but no cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.limit(2)
.cursor("a.txt")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
],
"normal list with limit and a file cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.limit(2)
.cursor("cc-nested/")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
format!("pubky://{pubky}/pub/example.com/d.txt"),
],
"normal list with limit and a directory cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.limit(2)
.cursor(&format!("pubky://{pubky}/pub/example.com/a.txt"))
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
],
"normal list with limit and a full url cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.limit(2)
.cursor("/a.txt")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
],
"normal list with limit and a leading / cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.reverse(true)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/d.txt"),
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/a.txt"),
],
"reverse list with no limit or cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.reverse(true)
.limit(2)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/d.txt"),
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
],
"reverse list with limit but no cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.reverse(true)
.limit(2)
.cursor("d.txt")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/cc-nested/z.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
],
"reverse list with limit and cursor"
);
}
}
#[tokio::test]
async fn list_shallow() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let pubky = keypair.public_key();
let urls = vec![
format!("pubky://{pubky}/pub/a.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
format!("pubky://{pubky}/pub/example.com/d.txt"),
format!("pubky://{pubky}/pub/example.con/d.txt"),
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/file"),
format!("pubky://{pubky}/pub/file2"),
format!("pubky://{pubky}/pub/z.com/a.txt"),
];
for url in urls {
client.put(url.as_str(), &[0]).await.unwrap();
}
let url = format!("pubky://{pubky}/pub/");
let url = url.as_str();
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/a.com/"),
format!("pubky://{pubky}/pub/example.com/"),
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/example.con/"),
format!("pubky://{pubky}/pub/file"),
format!("pubky://{pubky}/pub/file2"),
format!("pubky://{pubky}/pub/z.com/"),
],
"normal list shallow"
);
}
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.limit(2)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/a.com/"),
format!("pubky://{pubky}/pub/example.com/"),
],
"normal list shallow with limit but no cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.limit(2)
.cursor("example.com/a.txt")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.com/"),
format!("pubky://{pubky}/pub/example.con"),
],
"normal list shallow with limit and a file cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.limit(3)
.cursor("example.com/")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/example.con/"),
format!("pubky://{pubky}/pub/file"),
],
"normal list shallow with limit and a directory cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.reverse(true)
.shallow(true)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/z.com/"),
format!("pubky://{pubky}/pub/file2"),
format!("pubky://{pubky}/pub/file"),
format!("pubky://{pubky}/pub/example.con/"),
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/example.com/"),
format!("pubky://{pubky}/pub/a.com/"),
],
"reverse list shallow"
);
}
{
let list = client
.list(url)
.unwrap()
.reverse(true)
.shallow(true)
.limit(2)
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/z.com/"),
format!("pubky://{pubky}/pub/file2"),
],
"reverse list shallow with limit but no cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.reverse(true)
.limit(2)
.cursor("file2")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/file"),
format!("pubky://{pubky}/pub/example.con/"),
],
"reverse list shallow with limit and a file cursor"
);
}
{
let list = client
.list(url)
.unwrap()
.shallow(true)
.reverse(true)
.limit(2)
.cursor("example.con/")
.send()
.await
.unwrap();
assert_eq!(
list,
vec![
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/example.com/"),
],
"reverse list shallow with limit and a directory cursor"
);
}
}
#[tokio::test]
async fn list_events() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let pubky = keypair.public_key();
let urls = vec![
format!("pubky://{pubky}/pub/a.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/a.txt"),
format!("pubky://{pubky}/pub/example.com/b.txt"),
format!("pubky://{pubky}/pub/example.com/c.txt"),
format!("pubky://{pubky}/pub/example.com/d.txt"),
format!("pubky://{pubky}/pub/example.con/d.txt"),
format!("pubky://{pubky}/pub/example.con"),
format!("pubky://{pubky}/pub/file"),
format!("pubky://{pubky}/pub/file2"),
format!("pubky://{pubky}/pub/z.com/a.txt"),
];
for url in urls {
client.put(url.as_str(), &[0]).await.unwrap();
client.delete(url.as_str()).await.unwrap();
}
let feed_url = format!("http://localhost:{}/events/", server.port());
let feed_url = feed_url.as_str();
let client = PubkyClient::test(&testnet);
let cursor;
{
let response = client
.request(
Method::GET,
format!("{feed_url}?limit=10").as_str().try_into().unwrap(),
)
.send()
.await
.unwrap();
let text = response.text().await.unwrap();
let lines = text.split('\n').collect::<Vec<_>>();
cursor = lines.last().unwrap().split(" ").last().unwrap().to_string();
assert_eq!(
lines,
vec![
format!("PUT pubky://{pubky}/pub/a.com/a.txt"),
format!("DEL pubky://{pubky}/pub/a.com/a.txt"),
format!("PUT pubky://{pubky}/pub/example.com/a.txt"),
format!("DEL pubky://{pubky}/pub/example.com/a.txt"),
format!("PUT pubky://{pubky}/pub/example.com/b.txt"),
format!("DEL pubky://{pubky}/pub/example.com/b.txt"),
format!("PUT pubky://{pubky}/pub/example.com/c.txt"),
format!("DEL pubky://{pubky}/pub/example.com/c.txt"),
format!("PUT pubky://{pubky}/pub/example.com/d.txt"),
format!("DEL pubky://{pubky}/pub/example.com/d.txt"),
format!("cursor: {cursor}",)
]
);
}
{
let response = client
.request(
Method::GET,
format!("{feed_url}?limit=10&cursor={cursor}")
.as_str()
.try_into()
.unwrap(),
)
.send()
.await
.unwrap();
let text = response.text().await.unwrap();
let lines = text.split('\n').collect::<Vec<_>>();
assert_eq!(
lines,
vec![
format!("PUT pubky://{pubky}/pub/example.con/d.txt"),
format!("DEL pubky://{pubky}/pub/example.con/d.txt"),
format!("PUT pubky://{pubky}/pub/example.con"),
format!("DEL pubky://{pubky}/pub/example.con"),
format!("PUT pubky://{pubky}/pub/file"),
format!("DEL pubky://{pubky}/pub/file"),
format!("PUT pubky://{pubky}/pub/file2"),
format!("DEL pubky://{pubky}/pub/file2"),
format!("PUT pubky://{pubky}/pub/z.com/a.txt"),
format!("DEL pubky://{pubky}/pub/z.com/a.txt"),
lines.last().unwrap().to_string()
]
)
}
}
#[tokio::test]
async fn read_after_event() {
let testnet = Testnet::new(10);
let server = Homeserver::start_test(&testnet).await.unwrap();
let client = PubkyClient::test(&testnet);
let keypair = Keypair::random();
client.signup(&keypair, &server.public_key()).await.unwrap();
let pubky = keypair.public_key();
let url = format!("pubky://{pubky}/pub/a.com/a.txt");
client.put(url.as_str(), &[0]).await.unwrap();
let feed_url = format!("http://localhost:{}/events/", server.port());
let feed_url = feed_url.as_str();
let client = PubkyClient::test(&testnet);
{
let response = client
.request(
Method::GET,
format!("{feed_url}?limit=10").as_str().try_into().unwrap(),
)
.send()
.await
.unwrap();
let text = response.text().await.unwrap();
let lines = text.split('\n').collect::<Vec<_>>();
let cursor = lines.last().unwrap().split(" ").last().unwrap().to_string();
assert_eq!(
lines,
vec![
format!("PUT pubky://{pubky}/pub/a.com/a.txt"),
format!("cursor: {cursor}",)
]
);
}
let get = client.get(url.as_str()).await.unwrap();
dbg!(get);
}
}

View File

@@ -0,0 +1,255 @@
use std::{
collections::HashSet,
sync::{Arc, RwLock},
};
use js_sys::{Array, Uint8Array};
use wasm_bindgen::prelude::*;
use url::Url;
use pubky_common::capabilities::Capabilities;
use crate::error::Error;
use crate::PubkyClient;
mod http;
mod keys;
mod pkarr;
mod recovery_file;
mod session;
use keys::{Keypair, PublicKey};
use session::Session;
impl Default for PubkyClient {
fn default() -> Self {
Self::new()
}
}
static DEFAULT_RELAYS: [&str; 1] = ["https://relay.pkarr.org"];
static TESTNET_RELAYS: [&str; 1] = ["http://localhost:15411/pkarr"];
#[wasm_bindgen]
impl PubkyClient {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self {
http: reqwest::Client::builder().build().unwrap(),
session_cookies: Arc::new(RwLock::new(HashSet::new())),
pkarr_relays: DEFAULT_RELAYS.into_iter().map(|s| s.to_string()).collect(),
}
}
/// Create a client with with configurations appropriate for local testing:
/// - set Pkarr relays to `["http://localhost:15411/pkarr"]` instead of default relay.
#[wasm_bindgen]
pub fn testnet() -> Self {
Self {
http: reqwest::Client::builder().build().unwrap(),
session_cookies: Arc::new(RwLock::new(HashSet::new())),
pkarr_relays: TESTNET_RELAYS.into_iter().map(|s| s.to_string()).collect(),
}
}
/// Set Pkarr relays used for publishing and resolving Pkarr packets.
///
/// By default, [PubkyClient] will use `["https://relay.pkarr.org"]`
#[wasm_bindgen(js_name = "setPkarrRelays")]
pub fn set_pkarr_relays(mut self, relays: Vec<String>) -> Self {
self.pkarr_relays = relays;
self
}
// Read the set of pkarr relays used by this client.
#[wasm_bindgen(js_name = "getPkarrRelays")]
pub fn get_pkarr_relays(&self) -> Vec<String> {
self.pkarr_relays.clone()
}
/// Signup to a homeserver and update Pkarr accordingly.
///
/// The homeserver is a Pkarr domain name, where the TLD is a Pkarr public key
/// for example "pubky.o4dksfbqk85ogzdb5osziw6befigbuxmuxkuxq8434q89uj56uyy"
#[wasm_bindgen]
pub async fn signup(
&self,
keypair: &Keypair,
homeserver: &PublicKey,
) -> Result<Session, JsValue> {
Ok(Session(
self.inner_signup(keypair.as_inner(), homeserver.as_inner())
.await
.map_err(|e| JsValue::from(e))?,
))
}
/// Check the current sesison for a given Pubky in its homeserver.
///
/// Returns [Session] or `None` (if recieved `404 NOT_FOUND`),
/// or throws the recieved error if the response has any other `>=400` status code.
#[wasm_bindgen]
pub async fn session(&self, pubky: &PublicKey) -> Result<Option<Session>, JsValue> {
self.inner_session(pubky.as_inner())
.await
.map(|s| s.map(Session))
.map_err(|e| e.into())
}
/// Signout from a homeserver.
#[wasm_bindgen]
pub async fn signout(&self, pubky: &PublicKey) -> Result<(), JsValue> {
self.inner_signout(pubky.as_inner())
.await
.map_err(|e| e.into())
}
/// Signin to a homeserver using the root Keypair.
#[wasm_bindgen]
pub async fn signin(&self, keypair: &Keypair) -> Result<(), JsValue> {
self.inner_signin(keypair.as_inner())
.await
.map(|_| ())
.map_err(|e| e.into())
}
/// Return `pubkyauth://` url and wait for the incoming [AuthToken]
/// verifying that AuthToken, and if capabilities were requested, signing in to
/// the Pubky's homeserver and returning the [Session] information.
///
/// Returns a tuple of [pubkyAuthUrl, Promise<Session>]
#[wasm_bindgen(js_name = "authRequest")]
pub fn auth_request(&self, relay: &str, capabilities: &str) -> Result<js_sys::Array, JsValue> {
let mut relay: Url = relay
.try_into()
.map_err(|_| Error::Generic("Invalid relay Url".into()))?;
let (pubkyauth_url, client_secret) = self.create_auth_request(
&mut relay,
&Capabilities::try_from(capabilities).map_err(|_| "Invalid capaiblities")?,
)?;
let this = self.clone();
let future = async move {
this.subscribe_to_auth_response(relay, &client_secret)
.await
.map(|opt| {
opt.map_or_else(
|| JsValue::NULL, // Convert `None` to `JsValue::NULL`
|session| JsValue::from(Session(session)),
)
})
.map_err(|err| JsValue::from_str(&format!("{:?}", err)))
};
let promise = wasm_bindgen_futures::future_to_promise(future);
// Return the URL and the promise
let js_tuple = js_sys::Array::new();
js_tuple.push(&JsValue::from_str(pubkyauth_url.as_ref()));
js_tuple.push(&promise);
Ok(js_tuple)
}
/// Sign an [pubky_common::auth::AuthToken], encrypt it and send it to the
/// source of the pubkyauth request url.
#[wasm_bindgen(js_name = "sendAuthToken")]
pub async fn send_auth_token(
&self,
keypair: &Keypair,
pubkyauth_url: &str,
) -> Result<(), JsValue> {
let pubkyauth_url: Url = pubkyauth_url
.try_into()
.map_err(|_| Error::Generic("Invalid relay Url".into()))?;
self.inner_send_auth_token(keypair.as_inner(), pubkyauth_url)
.await?;
Ok(())
}
// === Public data ===
#[wasm_bindgen]
/// Upload a small payload to a given path.
pub async fn put(&self, url: &str, content: &[u8]) -> Result<(), JsValue> {
self.inner_put(url, content).await.map_err(|e| e.into())
}
/// Download a small payload from a given path relative to a pubky author.
#[wasm_bindgen]
pub async fn get(&self, url: &str) -> Result<Option<Uint8Array>, JsValue> {
self.inner_get(url)
.await
.map(|b| b.map(|b| (&*b).into()))
.map_err(|e| e.into())
}
/// Delete a file at a path relative to a pubky author.
#[wasm_bindgen]
pub async fn delete(&self, url: &str) -> Result<(), JsValue> {
self.inner_delete(url).await.map_err(|e| e.into())
}
/// Returns a list of Pubky urls (as strings).
///
/// - `url`: The Pubky url (string) to the directory you want to list its content.
/// - `cursor`: Either a full `pubky://` Url (from previous list response),
/// or a path (to a file or directory) relative to the `url`
/// - `reverse`: List in reverse order
/// - `limit` Limit the number of urls in the response
/// - `shallow`: List directories and files, instead of flat list of files.
#[wasm_bindgen]
pub async fn list(
&self,
url: &str,
cursor: Option<String>,
reverse: Option<bool>,
limit: Option<u16>,
shallow: Option<bool>,
) -> Result<Array, JsValue> {
// TODO: try later to return Vec<String> from async function.
if let Some(cursor) = cursor {
return self
.inner_list(url)?
.reverse(reverse.unwrap_or(false))
.limit(limit.unwrap_or(u16::MAX))
.cursor(&cursor)
.shallow(shallow.unwrap_or(false))
.send()
.await
.map(|urls| {
let js_array = Array::new();
for url in urls {
js_array.push(&JsValue::from_str(&url));
}
js_array
})
.map_err(|e| e.into());
}
self.inner_list(url)?
.reverse(reverse.unwrap_or(false))
.limit(limit.unwrap_or(u16::MAX))
.shallow(shallow.unwrap_or(false))
.send()
.await
.map(|urls| {
let js_array = Array::new();
for url in urls {
js_array.push(&JsValue::from_str(&url));
}
js_array
})
.map_err(|e| e.into())
}
}

View File

@@ -0,0 +1,40 @@
use crate::PubkyClient;
use reqwest::{Method, RequestBuilder, Response};
use url::Url;
impl PubkyClient {
pub(crate) fn request(&self, method: Method, url: Url) -> RequestBuilder {
let mut request = self.http.request(method, url).fetch_credentials_include();
for cookie in self.session_cookies.read().unwrap().iter() {
request = request.header("Cookie", cookie);
}
request
}
// Support cookies for nodejs
pub(crate) fn store_session(&self, response: &Response) {
if let Some(cookie) = response
.headers()
.get("set-cookie")
.and_then(|h| h.to_str().ok())
.and_then(|s| s.split(';').next())
{
self.session_cookies
.write()
.unwrap()
.insert(cookie.to_string());
}
}
pub(crate) fn remove_session(&self, pubky: &pkarr::PublicKey) {
let key = pubky.to_string();
self.session_cookies
.write()
.unwrap()
.retain(|cookie| !cookie.starts_with(&key));
}
}

View File

@@ -0,0 +1,99 @@
use wasm_bindgen::prelude::*;
use crate::Error;
#[wasm_bindgen]
pub struct Keypair(pkarr::Keypair);
#[wasm_bindgen]
impl Keypair {
#[wasm_bindgen]
/// Generate a random [Keypair]
pub fn random() -> Self {
Self(pkarr::Keypair::random())
}
/// Generate a [Keypair] from a secret key.
#[wasm_bindgen(js_name = "fromSecretKey")]
pub fn from_secret_key(secret_key: js_sys::Uint8Array) -> Result<Keypair, JsValue> {
if !js_sys::Uint8Array::instanceof(&secret_key) {
return Err("Expected secret_key to be an instance of Uint8Array".into());
}
let len = secret_key.byte_length();
if len != 32 {
return Err(format!("Expected secret_key to be 32 bytes, got {len}"))?;
}
let mut bytes = [0; 32];
secret_key.copy_to(&mut bytes);
Ok(Self(pkarr::Keypair::from_secret_key(&bytes)))
}
/// Returns the secret key of this keypair.
#[wasm_bindgen(js_name = "secretKey")]
pub fn secret_key(&self) -> js_sys::Uint8Array {
self.0.secret_key().as_slice().into()
}
/// Returns the [PublicKey] of this keypair.
#[wasm_bindgen(js_name = "publicKey")]
pub fn public_key(&self) -> PublicKey {
PublicKey(self.0.public_key())
}
}
impl Keypair {
pub fn as_inner(&self) -> &pkarr::Keypair {
&self.0
}
}
impl From<pkarr::Keypair> for Keypair {
fn from(keypair: pkarr::Keypair) -> Self {
Self(keypair)
}
}
#[wasm_bindgen]
pub struct PublicKey(pub(crate) pkarr::PublicKey);
#[wasm_bindgen]
impl PublicKey {
#[wasm_bindgen]
/// Convert the PublicKey to Uint8Array
pub fn to_uint8array(&self) -> js_sys::Uint8Array {
js_sys::Uint8Array::from(self.0.as_bytes().as_slice())
}
#[wasm_bindgen]
/// Returns the z-base32 encoding of this public key
pub fn z32(&self) -> String {
self.0.to_string()
}
#[wasm_bindgen(js_name = "from")]
/// @throws
pub fn try_from(value: JsValue) -> Result<PublicKey, JsValue> {
let string = value
.as_string()
.ok_or("Couldn't create a PublicKey from this type of value")?;
Ok(PublicKey(
pkarr::PublicKey::try_from(string).map_err(Error::Pkarr)?,
))
}
}
impl PublicKey {
pub fn as_inner(&self) -> &pkarr::PublicKey {
&self.0
}
}
impl From<pkarr::PublicKey> for PublicKey {
fn from(value: pkarr::PublicKey) -> Self {
PublicKey(value)
}
}

View File

@@ -0,0 +1,48 @@
use reqwest::StatusCode;
pub use pkarr::{PublicKey, SignedPacket};
use crate::error::Result;
use crate::PubkyClient;
// TODO: Add an in memory cache of packets
impl PubkyClient {
//TODO: migrate to pkarr::PkarrRelayClient
pub(crate) async fn pkarr_resolve(
&self,
public_key: &PublicKey,
) -> Result<Option<SignedPacket>> {
//TODO: Allow multiple relays in parallel
let relay = self.pkarr_relays.first().expect("initialized with relays");
let res = self
.http
.get(format!("{relay}/{}", public_key))
.send()
.await?;
if res.status() == StatusCode::NOT_FOUND {
return Ok(None);
};
// TODO: guard against too large responses.
let bytes = res.bytes().await?;
let existing = SignedPacket::from_relay_payload(public_key, &bytes)?;
Ok(Some(existing))
}
pub(crate) async fn pkarr_publish(&self, signed_packet: &SignedPacket) -> Result<()> {
let relay = self.pkarr_relays.first().expect("initialized with relays");
self.http
.put(format!("{relay}/{}", signed_packet.public_key()))
.body(signed_packet.to_relay_payload())
.send()
.await?;
Ok(())
}
}

View File

@@ -0,0 +1,24 @@
use js_sys::Uint8Array;
use wasm_bindgen::prelude::{wasm_bindgen, JsValue};
use crate::error::Error;
use super::keys::Keypair;
/// Create a recovery file of the `keypair`, containing the secret key encrypted
/// using the `passphrase`.
#[wasm_bindgen(js_name = "createRecoveryFile")]
pub fn create_recovery_file(keypair: &Keypair, passphrase: &str) -> Result<Uint8Array, JsValue> {
pubky_common::recovery_file::create_recovery_file(keypair.as_inner(), passphrase)
.map(|b| b.as_slice().into())
.map_err(|e| Error::from(e).into())
}
/// Create a recovery file of the `keypair`, containing the secret key encrypted
/// using the `passphrase`.
#[wasm_bindgen(js_name = "decryptRecoveryFile")]
pub fn decrypt_recovery_file(recovery_file: &[u8], passphrase: &str) -> Result<Keypair, JsValue> {
pubky_common::recovery_file::decrypt_recovery_file(recovery_file, passphrase)
.map(Keypair::from)
.map_err(|e| Error::from(e).into())
}

View File

@@ -0,0 +1,27 @@
use pubky_common::session;
use wasm_bindgen::prelude::*;
use super::keys::PublicKey;
#[wasm_bindgen]
pub struct Session(pub(crate) session::Session);
#[wasm_bindgen]
impl Session {
/// Return the [PublicKey] of this session
#[wasm_bindgen]
pub fn pubky(&self) -> PublicKey {
self.0.pubky().clone().into()
}
/// Return the capabilities that this session has.
#[wasm_bindgen]
pub fn capabilities(&self) -> Vec<String> {
self.0
.capabilities()
.iter()
.map(|c| c.to_string())
.collect()
}
}

View File

@@ -0,0 +1,3 @@
fn main() {
uniffi::uniffi_bindgen_main()
}

40
rust/src/lib.rs Normal file
View File

@@ -0,0 +1,40 @@
uniffi::setup_scaffolding!();
use url::Url;
use pubky::PubkyClient;
#[uniffi::export]
async fn auth(url: String, secret_key: String) -> Vec<String> {
let bytes = match hex::decode(&secret_key) {
Ok(bytes) => bytes,
Err(_) => return create_response_vector(true, "Failed to decode secret key".to_string())
};
let secret_key_bytes: [u8; 32] = match bytes.try_into() {
Ok(secret_key) => secret_key,
Err(_) => {
return create_response_vector(true, "Failed to convert secret key to 32-byte array".to_string());
}
};
let keypair = pkarr::Keypair::from_secret_key(&secret_key_bytes);
let client = PubkyClient::testnet();
let parsed_url = match Url::parse(&url) {
Ok(url) => url,
Err(_) => return create_response_vector(true, "Failed to parse URL".to_string()),
};
match client.send_auth_token(&keypair, parsed_url).await {
Ok(_) => create_response_vector(false, "Auth token sent successfully".to_string()),
Err(error) => create_response_vector(true, format!("Error sending auth token: {:?}", error)),
}
}
fn create_response_vector(error: bool, data: String) -> Vec<String> {
if error {
vec!["error".to_string(), data]
} else {
vec!["success".to_string(), data]
}
}