added pipeline fetcher db

This commit is contained in:
pippellia-btc
2025-06-10 16:15:22 +02:00
parent 376d55cc81
commit a0d4dd5ad8
4 changed files with 84 additions and 23 deletions

View File

@@ -74,6 +74,8 @@ func main() {
log.Printf("correctly added %d init pubkeys", len(config.InitPubkeys)) log.Printf("correctly added %d init pubkeys", len(config.InitPubkeys))
} }
go printStats(ctx, events, pubkeys)
var producers sync.WaitGroup var producers sync.WaitGroup
var consumers sync.WaitGroup var consumers sync.WaitGroup
@@ -91,7 +93,7 @@ func main() {
go func() { go func() {
defer producers.Done() defer producers.Done()
pipe.Arbiter(ctx, config.Arbiter, db, enqueue(pubkeys)) pipe.Arbiter(ctx, config.Arbiter, db, enqueue(pubkeys))
close(pubkeys) // Arbiter is the only pubkey sender close(pubkeys) // Arbiter is the only pubkey producer
}() }()
consumers.Add(1) consumers.Add(1)

View File

@@ -85,36 +85,29 @@ func main() {
nostr.KindFollowList, // no need to sync other event kinds nostr.KindFollowList, // no need to sync other event kinds
} }
var producers sync.WaitGroup go printStats(ctx, events, pubkeys)
var consumers sync.WaitGroup
var wg sync.WaitGroup
wg.Add(3)
producers.Add(3)
go func() { go func() {
defer producers.Done() defer wg.Done()
pipe.Firehose(ctx, config.Firehose, db, enqueue(events)) pipe.FetcherDB(ctx, config.Fetcher, store, pubkeys, enqueue(events))
close(events) // FetcherDB is the only event producer
}() }()
go func() { go func() {
defer producers.Done() defer wg.Done()
pipe.Fetcher(ctx, config.Fetcher, pubkeys, enqueue(events)) // TODO: fetch from the event store
}()
go func() {
defer producers.Done()
pipe.Arbiter(ctx, config.Arbiter, db, enqueue(pubkeys)) pipe.Arbiter(ctx, config.Arbiter, db, enqueue(pubkeys))
close(pubkeys) // Arbiter is the only pubkey sender close(pubkeys) // Arbiter is the only pubkey producer
}() }()
consumers.Add(1)
go func() { go func() {
defer consumers.Done() defer wg.Done()
pipe.GraphBuilder(ctx, config.Engine, store, db, events) pipe.GraphBuilder(ctx, config.Engine, db, events)
}() }()
producers.Wait() wg.Wait()
close(events)
consumers.Wait()
} }
// handleSignals listens for OS signals and triggers context cancellation. // handleSignals listens for OS signals and triggers context cancellation.

View File

@@ -54,7 +54,7 @@ func Engine(
graphEvents := make(chan *nostr.Event, config.BuilderCapacity) graphEvents := make(chan *nostr.Event, config.BuilderCapacity)
defer close(graphEvents) defer close(graphEvents)
go GraphBuilder(ctx, config, store, db, graphEvents) go GraphBuilder(ctx, config, db, graphEvents)
log.Println("Engine: ready to process events") log.Println("Engine: ready to process events")
Archiver(ctx, config, store, events, func(e *nostr.Event) error { Archiver(ctx, config, store, events, func(e *nostr.Event) error {
@@ -144,7 +144,6 @@ func archive(
func GraphBuilder( func GraphBuilder(
ctx context.Context, ctx context.Context,
config EngineConfig, config EngineConfig,
store *eventstore.Store,
db redb.RedisDB, db redb.RedisDB,
events chan *nostr.Event) { events chan *nostr.Event) {

View File

@@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/nbd-wtf/go-nostr" "github.com/nbd-wtf/go-nostr"
"github.com/vertex-lab/relay/pkg/eventstore"
) )
var ( var (
@@ -157,7 +158,7 @@ func (c FetcherConfig) Print() {
fmt.Printf(" Interval: %v\n", c.Interval) fmt.Printf(" Interval: %v\n", c.Interval)
} }
// Fetcher extracts pubkeys from the channel and queries for their events: // Fetcher extracts pubkeys from the channel and queries relays for their events:
// - when the batch is bigger than config.Batch // - when the batch is bigger than config.Batch
// - after config.Interval since the last query. // - after config.Interval since the last query.
func Fetcher(ctx context.Context, config FetcherConfig, pubkeys <-chan string, send func(*nostr.Event) error) { func Fetcher(ctx context.Context, config FetcherConfig, pubkeys <-chan string, send func(*nostr.Event) error) {
@@ -250,6 +251,72 @@ func fetch(ctx context.Context, pool *nostr.SimplePool, relays, pubkeys []string
return events, nil return events, nil
} }
// Fetcher extracts pubkeys from the channel and queries the store for their events:
// - when the batch is bigger than config.Batch
// - after config.Interval since the last query.
func FetcherDB(
ctx context.Context,
config FetcherConfig,
store *eventstore.Store,
pubkeys <-chan string,
send func(*nostr.Event) error) {
defer log.Println("FetcherDB: shutting down...")
batch := make([]string, 0, config.Batch)
timer := time.After(config.Interval)
for {
select {
case <-ctx.Done():
return
case pubkey, ok := <-pubkeys:
if !ok {
return
}
batch = append(batch, pubkey)
if len(batch) < config.Batch {
continue
}
events, err := store.Query(ctx, &nostr.Filter{Kinds: Kinds, Authors: batch})
if err != nil {
log.Printf("FetcherDB: %v", err)
}
for _, event := range events {
if err := send(&event); err != nil {
log.Printf("FetcherDB: %v", err)
}
}
batch = make([]string, 0, config.Batch)
timer = time.After(config.Interval)
case <-timer:
if len(batch) == 0 {
continue
}
events, err := store.Query(ctx, &nostr.Filter{Kinds: Kinds, Authors: batch})
if err != nil {
log.Printf("FetcherDB: %v", err)
}
for _, event := range events {
if err := send(&event); err != nil {
log.Printf("FetcherDB: %v", err)
}
}
batch = make([]string, 0, config.Batch)
timer = time.After(config.Interval)
}
}
}
// Shutdown iterates over the relays in the pool and closes all connections. // Shutdown iterates over the relays in the pool and closes all connections.
func shutdown(pool *nostr.SimplePool) { func shutdown(pool *nostr.SimplePool) {
pool.Relays.Range(func(_ string, relay *nostr.Relay) bool { pool.Relays.Range(func(_ string, relay *nostr.Relay) bool {