mirror of
https://github.com/lollipopkit/flutter_server_box.git
synced 2026-01-17 22:44:34 +01:00
* refactor(server): Replace Future.wait with an explicit list of futures to enhance readability Refactor the nested map and async functions into explicit for loops and future lists to make the code logic clearer * fix(server): Fixed the auto-refresh logic and concurrency control issues - Add `_refreshCompleter` to prevent concurrent refreshes - Fixed the issue where the status was not updated after the automatic refresh timer was canceled - Remove the invalid check for `duration == 1` * refactor(server): Optimize the server refresh logic by filtering out servers that do not need to be refreshed in advance Move the server filtering logic outside the loop and use the `where` method to filter the servers that need to be refreshed, avoiding repeated condition checks within the loop. This improves code readability and reduces redundant condition checks. * refactor: Optimize server refresh logic to enhance readability Break down complex conditional checks into clearer steps, separating the logic for server refresh and rate limiter reset. Replace chained calls with explicit loops to make the code easier to maintain and understand. * refactor(server): Remove `updateFuture` from `ServerState` and use the `_isRefreshing` flag instead Simplify the server refresh logic, replace Future state tracking with a boolean flag, and avoid unnecessary state updates * refactor(server_detail): Extract the setting items as local variables to improve performance Extract the globally set items that are accessed repeatedly as local variables, reduce unnecessary state retrieval operations, and optimize page performance * refactor: Rename `_displayCpuIndexSetting` to `_displayCpuIndex` for consistency * refactor(server): Fix the issue of parallel blocking in server refresh The original code uses Future.wait to wait for all refresh operations to complete, but in fact, there is no need to wait for the results of these operations. Instead, directly calling ignore() to ignore the results can avoid blocking caused by the slowest server * fix: Adjust the order of logging and default value settings Ensure to set the default value after recording the invalid duration warning * refactor(server): Rename _refreshCompleter to _refreshInProgress to enhance readability Change the variable name from `_refreshCompleter` to `_refreshInProgress`, so that it more accurately reflects the actual purpose of the variable, which is to indicate whether the refresh operation is in progress * refactor(server): Remove unnecessary refresh progress status management Simplify the server refresh logic, remove the unused _refreshInProgress state variable and related Completer handling, making the code more concise and straightforward * chore: Update dependent package versions Update the following dependent package versions: - camera_web has been upgraded from 0.3.5 to 0.3.5+3 - ffi has been upgraded from 2.1.4 to 2.1.5 - hive_ce_flutter is upgraded from 2.3.3 to 2.3.4 - watcher is upgraded from 1.1.4 to 1.2.1 * opt. --------- Co-authored-by: lollipopkit🏳️⚧️ <10864310+lollipopkit@users.noreply.github.com>
330 lines
9.4 KiB
Dart
330 lines
9.4 KiB
Dart
import 'dart:async';
|
|
|
|
import 'package:fl_lib/fl_lib.dart';
|
|
import 'package:freezed_annotation/freezed_annotation.dart';
|
|
import 'package:riverpod_annotation/riverpod_annotation.dart';
|
|
import 'package:server_box/core/sync.dart';
|
|
import 'package:server_box/data/model/server/server.dart';
|
|
import 'package:server_box/data/model/server/server_private_info.dart';
|
|
import 'package:server_box/data/model/server/try_limiter.dart';
|
|
import 'package:server_box/data/provider/server/single.dart';
|
|
import 'package:server_box/data/res/store.dart';
|
|
import 'package:server_box/data/ssh/session_manager.dart';
|
|
|
|
part 'all.freezed.dart';
|
|
part 'all.g.dart';
|
|
|
|
@freezed
|
|
abstract class ServersState with _$ServersState {
|
|
const factory ServersState({
|
|
@Default({}) Map<String, Spi> servers,
|
|
@Default([]) List<String> serverOrder,
|
|
@Default(<String>{}) Set<String> tags,
|
|
@Default(<String>{}) Set<String> manualDisconnectedIds,
|
|
Timer? autoRefreshTimer,
|
|
}) = _ServersState;
|
|
}
|
|
|
|
@Riverpod(keepAlive: true)
|
|
class ServersNotifier extends _$ServersNotifier {
|
|
@override
|
|
ServersState build() {
|
|
return _load();
|
|
}
|
|
|
|
Future<void> reload() async {
|
|
final newState = _load();
|
|
if (newState == state) return;
|
|
state = newState;
|
|
await refresh();
|
|
}
|
|
|
|
ServersState _load() {
|
|
final spis = Stores.server.fetch();
|
|
final newServers = <String, Spi>{};
|
|
final newServerOrder = <String>[];
|
|
|
|
for (final spi in spis) {
|
|
newServers[spi.id] = spi;
|
|
}
|
|
|
|
final serverOrder_ = Stores.setting.serverOrder.fetch();
|
|
if (serverOrder_.isNotEmpty) {
|
|
spis.reorder(order: serverOrder_, finder: (n, id) => n.id == id);
|
|
newServerOrder.addAll(spis.map((e) => e.id));
|
|
} else {
|
|
newServerOrder.addAll(newServers.keys);
|
|
}
|
|
|
|
// Must use [equals] to compare [Order] here.
|
|
if (!newServerOrder.equals(serverOrder_)) {
|
|
Stores.setting.serverOrder.put(newServerOrder);
|
|
}
|
|
|
|
final newTags = _calculateTags(newServers);
|
|
|
|
return stateOrNull?.copyWith(servers: newServers, serverOrder: newServerOrder, tags: newTags) ??
|
|
ServersState(servers: newServers, serverOrder: newServerOrder, tags: newTags);
|
|
}
|
|
|
|
Set<String> _calculateTags(Map<String, Spi> servers) {
|
|
final tags = <String>{};
|
|
for (final spi in servers.values) {
|
|
final spiTags = spi.tags;
|
|
if (spiTags == null) continue;
|
|
for (final t in spiTags) {
|
|
tags.add(t);
|
|
}
|
|
}
|
|
return tags;
|
|
}
|
|
|
|
/// Get a [Spi] by [spi] or [id].
|
|
///
|
|
/// Priority: [spi] > [id]
|
|
Spi? pick({Spi? spi, String? id}) {
|
|
if (spi != null) {
|
|
return state.servers[spi.id];
|
|
}
|
|
if (id != null) {
|
|
return state.servers[id];
|
|
}
|
|
return null;
|
|
}
|
|
|
|
/// if [spi] is specificed then only refresh this server
|
|
/// [onlyFailed] only refresh failed servers
|
|
Future<void> refresh({Spi? spi, bool onlyFailed = false}) async {
|
|
if (spi != null) {
|
|
final newManualDisconnected = Set<String>.from(state.manualDisconnectedIds)..remove(spi.id);
|
|
state = state.copyWith(manualDisconnectedIds: newManualDisconnected);
|
|
final serverNotifier = ref.read(serverProvider(spi.id).notifier);
|
|
await serverNotifier.refresh();
|
|
return;
|
|
}
|
|
|
|
final serversToRefresh = <MapEntry<String, Spi>>[];
|
|
final idsToResetLimiter = <String>[];
|
|
|
|
for (final entry in state.servers.entries) {
|
|
final serverId = entry.key;
|
|
final spi = entry.value;
|
|
|
|
if (state.manualDisconnectedIds.contains(serverId)) continue;
|
|
|
|
final serverState = ref.read(serverProvider(serverId));
|
|
|
|
if (onlyFailed) {
|
|
if (serverState.conn != ServerConn.failed) continue;
|
|
idsToResetLimiter.add(serverId);
|
|
}
|
|
|
|
if (serverState.conn == ServerConn.disconnected && !spi.autoConnect) continue;
|
|
|
|
serversToRefresh.add(entry);
|
|
}
|
|
|
|
for (final id in idsToResetLimiter) {
|
|
TryLimiter.reset(id);
|
|
}
|
|
|
|
for (final entry in serversToRefresh) {
|
|
final serverNotifier = ref.read(serverProvider(entry.key).notifier);
|
|
serverNotifier.refresh().ignore();
|
|
}
|
|
}
|
|
|
|
Future<void> startAutoRefresh() async {
|
|
var duration = Stores.setting.serverStatusUpdateInterval.fetch();
|
|
stopAutoRefresh();
|
|
if (duration == 0) return;
|
|
if (duration <= 1 || duration > 10) {
|
|
Loggers.app.warning('Invalid duration: $duration, use default 3');
|
|
duration = 3;
|
|
}
|
|
final timer = Timer.periodic(Duration(seconds: duration), (_) async {
|
|
await refresh();
|
|
});
|
|
state = state.copyWith(autoRefreshTimer: timer);
|
|
}
|
|
|
|
void stopAutoRefresh() {
|
|
final timer = state.autoRefreshTimer;
|
|
if (timer != null) {
|
|
timer.cancel();
|
|
}
|
|
state = state.copyWith(autoRefreshTimer: null);
|
|
}
|
|
|
|
bool get isAutoRefreshOn => state.autoRefreshTimer != null;
|
|
|
|
void setDisconnected() {
|
|
for (final serverId in state.servers.keys) {
|
|
final serverNotifier = ref.read(serverProvider(serverId).notifier);
|
|
serverNotifier.updateConnection(ServerConn.disconnected);
|
|
|
|
// Update SSH session status to disconnected
|
|
final sessionId = 'ssh_$serverId';
|
|
TermSessionManager.updateStatus(sessionId, TermSessionStatus.disconnected);
|
|
}
|
|
//TryLimiter.clear();
|
|
}
|
|
|
|
void closeServer({String? id}) {
|
|
if (id == null) {
|
|
for (final serverId in state.servers.keys) {
|
|
closeOneServer(serverId);
|
|
}
|
|
return;
|
|
}
|
|
closeOneServer(id);
|
|
}
|
|
|
|
void closeOneServer(String id) {
|
|
final spi = state.servers[id];
|
|
if (spi == null) {
|
|
Loggers.app.warning('Server with id $id not found');
|
|
return;
|
|
}
|
|
|
|
final serverNotifier = ref.read(serverProvider(id).notifier);
|
|
serverNotifier.closeConnection();
|
|
|
|
final newManualDisconnected = Set<String>.from(state.manualDisconnectedIds)..add(id);
|
|
state = state.copyWith(manualDisconnectedIds: newManualDisconnected);
|
|
|
|
// Remove SSH session when server is manually closed
|
|
final sessionId = 'ssh_$id';
|
|
TermSessionManager.remove(sessionId);
|
|
}
|
|
|
|
void addServer(Spi spi) {
|
|
final newServers = Map<String, Spi>.from(state.servers);
|
|
newServers[spi.id] = spi;
|
|
|
|
final newOrder = List<String>.from(state.serverOrder)..add(spi.id);
|
|
final newTags = _calculateTags(newServers);
|
|
|
|
state = state.copyWith(servers: newServers, serverOrder: newOrder, tags: newTags);
|
|
|
|
Stores.server.put(spi);
|
|
Stores.setting.serverOrder.put(newOrder);
|
|
refresh(spi: spi);
|
|
bakSync.sync(milliDelay: 1000);
|
|
}
|
|
|
|
void delServer(String id) {
|
|
final newServers = Map<String, Spi>.from(state.servers);
|
|
newServers.remove(id);
|
|
|
|
final newOrder = List<String>.from(state.serverOrder)..remove(id);
|
|
final newTags = _calculateTags(newServers);
|
|
|
|
state = state.copyWith(servers: newServers, serverOrder: newOrder, tags: newTags);
|
|
|
|
Stores.setting.serverOrder.put(newOrder);
|
|
Stores.server.delete(id);
|
|
|
|
// Remove SSH session when server is deleted
|
|
final sessionId = 'ssh_$id';
|
|
TermSessionManager.remove(sessionId);
|
|
|
|
bakSync.sync(milliDelay: 1000);
|
|
}
|
|
|
|
void deleteAll() {
|
|
// Remove all SSH sessions before clearing servers
|
|
for (final id in state.servers.keys) {
|
|
final sessionId = 'ssh_$id';
|
|
TermSessionManager.remove(sessionId);
|
|
}
|
|
|
|
state = const ServersState();
|
|
|
|
Stores.setting.serverOrder.put([]);
|
|
Stores.server.clear();
|
|
bakSync.sync(milliDelay: 1000);
|
|
}
|
|
|
|
void updateServerOrder(List<String> order) {
|
|
final seen = <String>{};
|
|
final newOrder = <String>[];
|
|
|
|
for (final id in order) {
|
|
if (!state.servers.containsKey(id)) {
|
|
continue;
|
|
}
|
|
if (!seen.add(id)) {
|
|
continue;
|
|
}
|
|
newOrder.add(id);
|
|
}
|
|
|
|
for (final id in state.servers.keys) {
|
|
if (seen.add(id)) {
|
|
newOrder.add(id);
|
|
}
|
|
}
|
|
|
|
if (_isSameOrder(newOrder, state.serverOrder)) {
|
|
return;
|
|
}
|
|
|
|
state = state.copyWith(serverOrder: newOrder);
|
|
Stores.setting.serverOrder.put(newOrder);
|
|
bakSync.sync(milliDelay: 1000);
|
|
}
|
|
|
|
bool _isSameOrder(List<String> a, List<String> b) {
|
|
if (identical(a, b)) {
|
|
return true;
|
|
}
|
|
if (a.length != b.length) {
|
|
return false;
|
|
}
|
|
for (var i = 0; i < a.length; i++) {
|
|
if (a[i] != b[i]) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
|
|
Future<void> updateServer(Spi old, Spi newSpi) async {
|
|
if (old != newSpi) {
|
|
Stores.server.update(old, newSpi);
|
|
|
|
final newServers = Map<String, Spi>.from(state.servers);
|
|
final newOrder = List<String>.from(state.serverOrder);
|
|
|
|
if (newSpi.id != old.id) {
|
|
newServers[newSpi.id] = newSpi;
|
|
newServers.remove(old.id);
|
|
newOrder.update(old.id, newSpi.id);
|
|
Stores.setting.serverOrder.put(newOrder);
|
|
|
|
// Update SSH session ID when server ID changes
|
|
final oldSessionId = 'ssh_${old.id}';
|
|
TermSessionManager.remove(oldSessionId);
|
|
// Session will be re-added when reconnecting if necessary
|
|
} else {
|
|
newServers[old.id] = newSpi;
|
|
// Update SPI in the corresponding IndividualServerNotifier
|
|
final serverNotifier = ref.read(serverProvider(old.id).notifier);
|
|
serverNotifier.updateSpi(newSpi);
|
|
}
|
|
|
|
final newTags = _calculateTags(newServers);
|
|
state = state.copyWith(servers: newServers, serverOrder: newOrder, tags: newTags);
|
|
|
|
// Only reconnect if neccessary
|
|
if (newSpi.shouldReconnect(old)) {
|
|
// Use [newSpi.id] instead of [old.id] because [old.id] may be changed
|
|
TryLimiter.reset(newSpi.id);
|
|
refresh(spi: newSpi);
|
|
}
|
|
}
|
|
bakSync.sync(milliDelay: 1000);
|
|
}
|
|
}
|