new: parse disk info via lsblk output Fixes #709 (#760)

This commit is contained in:
lollipopkit🏳️‍⚧️
2025-05-17 00:45:38 +08:00
committed by GitHub
parent d88e97e699
commit 7e16d2f159
6 changed files with 685 additions and 184 deletions

View File

@@ -30,8 +30,7 @@ enum ShellFunc {
/// Default is [scriptDirTmp]/[scriptFile], if this path is not accessible,
/// it will be changed to [scriptDirHome]/[scriptFile].
static String getScriptDir(String id) {
final customScriptDir =
ServerProvider.pick(id: id)?.value.spi.custom?.scriptDir;
final customScriptDir = ServerProvider.pick(id: id)?.value.spi.custom?.scriptDir;
if (customScriptDir != null) return customScriptDir;
return _scriptDirMap.putIfAbsent(id, () {
return scriptDirTmp;
@@ -164,9 +163,7 @@ exec 2>/dev/null
// Write each func
for (final func in values) {
final customCmdsStr = () {
if (func == ShellFunc.status &&
customCmds != null &&
customCmds.isNotEmpty) {
if (func == ShellFunc.status && customCmds != null && customCmds.isNotEmpty) {
return '$cmdDivider\n\t${customCmds.values.join(cmdDivider)}';
}
return '';
@@ -213,14 +210,13 @@ enum StatusCmdType {
cpu._('cat /proc/stat | grep cpu'),
uptime._('uptime'),
conn._('cat /proc/net/snmp'),
disk._('df'),
disk._('lsblk --bytes --json --output FSTYPE,PATH,NAME,KNAME,MOUNTPOINT,FSSIZE,FSUSED,FSAVAIL,FSUSE%,UUID'),
mem._("cat /proc/meminfo | grep -E 'Mem|Swap'"),
tempType._('cat /sys/class/thermal/thermal_zone*/type'),
tempVal._('cat /sys/class/thermal/thermal_zone*/temp'),
host._('cat /etc/hostname'),
diskio._('cat /proc/diskstats'),
battery._(
'for f in /sys/class/power_supply/*/uevent; do cat "\$f"; echo; done'),
battery._('for f in /sys/class/power_supply/*/uevent; do cat "\$f"; echo; done'),
nvidia._('nvidia-smi -q -x'),
sensors._('sensors'),
cpuBrand._('cat /proc/cpuinfo | grep "model name"'),
@@ -238,6 +234,7 @@ enum BSDStatusCmdType {
sys._('uname -or'),
cpu._('top -l 1 | grep "CPU usage"'),
uptime._('uptime'),
// Keep df -k for BSD systems as lsblk is not available on macOS/BSD
disk._('df -k'),
mem._('top -l 1 | grep PhysMem'),
//temp,

View File

@@ -1,29 +1,208 @@
import 'dart:convert';
import 'package:equatable/equatable.dart';
import 'package:fl_lib/fl_lib.dart';
import 'package:server_box/data/model/server/time_seq.dart';
import 'package:server_box/data/res/misc.dart';
class Disk {
final String fs;
class Disk with EquatableMixin {
final String path;
final String? fsTyp;
final String mount;
final int usedPercent;
final BigInt used;
final BigInt size;
final BigInt avail;
/// Device name (e.g., sda1, nvme0n1p1)
final String? name;
/// Internal kernel device name
final String? kname;
/// Filesystem UUID
final String? uuid;
/// Child disks (partitions)
final List<Disk> children;
const Disk({
required this.fs,
required this.path,
this.fsTyp,
required this.mount,
required this.usedPercent,
required this.used,
required this.size,
required this.avail,
this.name,
this.kname,
this.uuid,
this.children = const [],
});
static List<Disk> parse(String raw) {
final list = <Disk>[];
raw = raw.trim();
try {
if (raw.startsWith('{')) {
// Parse JSON output from lsblk command
final Map<String, dynamic> jsonData = json.decode(raw);
final List<dynamic> blockdevices = jsonData['blockdevices'] ?? [];
for (final device in blockdevices) {
// Process each device
_processTopLevelDevice(device, list);
}
} else {
// Fallback to the old parsing method in case of non-JSON output
return _parseWithOldMethod(raw);
}
} catch (e) {
Loggers.app.warning('Failed to parse disk info: $e', e);
}
return list;
}
/// Process a top-level device and add all valid disks to the list
static void _processTopLevelDevice(Map<String, dynamic> device, List<Disk> list) {
final disk = _processDiskDevice(device);
if (disk != null) {
list.add(disk);
}
// For devices with children (like physical disks with partitions),
// also process each child individually to ensure BTRFS RAID disks are properly handled
final List<dynamic> childDevices = device['children'] ?? [];
for (final childDevice in childDevices) {
final String childPath = childDevice['path']?.toString() ?? '';
final String childFsType = childDevice['fstype']?.toString() ?? '';
// If this is a BTRFS partition, add it directly to ensure it's properly represented
if (childFsType == 'btrfs' && childPath.isNotEmpty) {
final childDisk = _processSingleDevice(childDevice);
if (childDisk != null) {
list.add(childDisk);
}
}
}
}
/// Process a single device without recursively processing its children
static Disk? _processSingleDevice(Map<String, dynamic> device) {
final fstype = device['fstype']?.toString();
final String mountpoint = device['mountpoint']?.toString() ?? '';
final String path = device['path']?.toString() ?? '';
if (path.isEmpty || (fstype == null && mountpoint.isEmpty)) {
return null;
}
if (!_shouldCalc(fstype ?? '', mountpoint)) {
return null;
}
final sizeStr = device['fssize']?.toString() ?? '0';
final size = (BigInt.tryParse(sizeStr) ?? BigInt.zero) ~/ BigInt.from(1024);
final usedStr = device['fsused']?.toString() ?? '0';
final used = (BigInt.tryParse(usedStr) ?? BigInt.zero) ~/ BigInt.from(1024);
final availStr = device['fsavail']?.toString() ?? '0';
final avail = (BigInt.tryParse(availStr) ?? BigInt.zero) ~/ BigInt.from(1024);
// Parse fsuse% which is usually in the format "45%"
String usePercentStr = device['fsuse%']?.toString() ?? '0';
usePercentStr = usePercentStr.replaceAll('%', '');
final usedPercent = int.tryParse(usePercentStr) ?? 0;
final name = device['name']?.toString();
final kname = device['kname']?.toString();
final uuid = device['uuid']?.toString();
return Disk(
path: path,
fsTyp: fstype,
mount: mountpoint,
usedPercent: usedPercent,
used: used,
size: size,
avail: avail,
name: name,
kname: kname,
uuid: uuid,
children: const [], // No children for direct device
);
}
static Disk? _processDiskDevice(Map<String, dynamic> device) {
final fstype = device['fstype']?.toString();
final String mountpoint = device['mountpoint']?.toString() ?? '';
// For parent devices that don't have a mountpoint themselves
final String path = device['path']?.toString() ?? '';
final String mount = mountpoint;
final List<Disk> childDisks = [];
// Process children devices recursively
final List<dynamic> childDevices = device['children'] ?? [];
for (final childDevice in childDevices) {
final childDisk = _processDiskDevice(childDevice);
if (childDisk != null) {
childDisks.add(childDisk);
}
}
// Handle common filesystem cases or parent devices with children
if ((fstype != null && _shouldCalc(fstype, mount)) ||
(childDisks.isNotEmpty && path.isNotEmpty)) {
final sizeStr = device['fssize']?.toString() ?? '0';
final size = (BigInt.tryParse(sizeStr) ?? BigInt.zero) ~/ BigInt.from(1024);
final usedStr = device['fsused']?.toString() ?? '0';
final used = (BigInt.tryParse(usedStr) ?? BigInt.zero) ~/ BigInt.from(1024);
final availStr = device['fsavail']?.toString() ?? '0';
final avail = (BigInt.tryParse(availStr) ?? BigInt.zero) ~/ BigInt.from(1024);
// Parse fsuse% which is usually in the format "45%"
String usePercentStr = device['fsuse%']?.toString() ?? '0';
usePercentStr = usePercentStr.replaceAll('%', '');
final usedPercent = int.tryParse(usePercentStr) ?? 0;
final name = device['name']?.toString();
final kname = device['kname']?.toString();
final uuid = device['uuid']?.toString();
return Disk(
path: path,
fsTyp: fstype,
mount: mount,
usedPercent: usedPercent,
used: used,
size: size,
avail: avail,
name: name,
kname: kname,
uuid: uuid,
children: childDisks,
);
} else if (childDisks.isNotEmpty) {
// If this is a parent device with no filesystem but has children,
// return the first valid child instead
if (childDisks.isNotEmpty) {
return childDisks.first;
}
}
return null;
}
// Fallback to the old parsing method in case JSON parsing fails
static List<Disk> _parseWithOldMethod(String raw) {
final list = <Disk>[];
final items = raw.split('\n');
items.removeAt(0);
if (items.isNotEmpty) items.removeAt(0);
var pathCache = '';
for (var item in items) {
if (item.isEmpty) {
@@ -43,12 +222,12 @@ class Disk {
final mount = vals[5];
if (!_shouldCalc(fs, mount)) continue;
list.add(Disk(
fs: fs,
path: fs,
mount: mount,
usedPercent: int.parse(vals[4].replaceFirst('%', '')),
used: BigInt.parse(vals[2]),
size: BigInt.parse(vals[1]),
avail: BigInt.parse(vals[3]),
used: BigInt.parse(vals[2]) ~/ BigInt.from(1024),
size: BigInt.parse(vals[1]) ~/ BigInt.from(1024),
avail: BigInt.parse(vals[3]) ~/ BigInt.from(1024),
));
} catch (e) {
continue;
@@ -58,9 +237,8 @@ class Disk {
}
@override
String toString() {
return 'Disk{dev: $fs, mount: $mount, usedPercent: $usedPercent, used: $used, size: $size, avail: $avail}';
}
List<Object?> get props =>
[path, name, kname, fsTyp, mount, usedPercent, used, size, avail, uuid, children];
}
class DiskIO extends TimeSeq<List<DiskIOPiece>> {
@@ -72,9 +250,16 @@ class DiskIO extends TimeSeq<List<DiskIOPiece>> {
}
(double?, double?) _getSpeed(String dev) {
if (dev.startsWith('/dev/')) dev = dev.substring(5);
final old = pre.firstWhereOrNull((e) => e.dev == dev);
final new_ = now.firstWhereOrNull((e) => e.dev == dev);
// Extract the device name from path if needed
String searchDev = dev;
if (dev.startsWith('/dev/')) {
searchDev = dev.substring(5);
}
// Try to find by exact device name first
final old = pre.firstWhereOrNull((e) => e.dev == searchDev);
final new_ = now.firstWhereOrNull((e) => e.dev == searchDev);
if (old == null || new_ == null) return (null, null);
final sectorsRead = new_.sectorsRead - old.sectorsRead;
final sectorsWrite = new_.sectorsWrite - old.sectorsWrite;
@@ -111,6 +296,7 @@ class DiskIO extends TimeSeq<List<DiskIOPiece>> {
read += read_ ?? 0;
write += write_ ?? 0;
}
final readStr = '${read.bytes2Str}/s';
final writeStr = '${write.bytes2Str}/s';
return (readStr, writeStr);
@@ -168,7 +354,11 @@ class DiskUsage {
required this.size,
});
double get usedPercent => used / size * 100;
double get usedPercent {
// Avoid division by zero
if (size == BigInt.zero) return 0;
return used / size * 100;
}
/// Find all devs, add their used and size
static DiskUsage parse(List<Disk> disks) {
@@ -176,9 +366,12 @@ class DiskUsage {
var used = BigInt.zero;
var size = BigInt.zero;
for (var disk in disks) {
if (!_shouldCalc(disk.fs, disk.mount)) continue;
if (devs.contains(disk.fs)) continue;
devs.add(disk.fs);
if (!_shouldCalc(disk.path, disk.mount)) continue;
// Use a combination of path and kernel name to uniquely identify disks
// This helps distinguish between multiple physical disks in BTRFS RAID setups
final uniqueId = '${disk.path}:${disk.kname ?? "unknown"}';
if (devs.contains(uniqueId)) continue;
devs.add(uniqueId);
used += disk.used;
size += disk.size;
}
@@ -187,12 +380,24 @@ class DiskUsage {
}
bool _shouldCalc(String fs, String mount) {
// Skip swap partitions
// if (mount == '[SWAP]') return false;
// Include standard filesystems
if (fs.startsWith('/dev')) return true;
// Some NAS may have mounted path like this `//192.168.1.2/`
if (fs.startsWith('//')) return true;
if (mount.startsWith('/mnt')) return true;
// if (fs.startsWith('shm') ||
// fs.startsWith('overlay') ||
// fs.startsWith('tmpfs')) return false;
// Include common filesystem types
// final commonFsTypes = ['ext2', 'ext3', 'ext4', 'xfs', 'btrfs', 'zfs', 'ntfs', 'fat', 'vfat'];
// if (commonFsTypes.any((type) => fs.toLowerCase() == type)) return true;
// Skip special filesystems
// if (fs == 'LVM2_member' || fs == 'crypto_LUKS') return false;
if (fs.startsWith('shm') || fs.startsWith('overlay') || fs.startsWith('tmpfs')) {
return false;
}
return true;
}

View File

@@ -42,7 +42,7 @@ abstract final class InitStatus {
),
disk: [
Disk(
fs: '/',
path: '/',
mount: '/',
usedPercent: 0,
used: BigInt.zero,

View File

@@ -172,8 +172,8 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
Widget _buildAbout(Server si) {
final ss = si.status;
return CardX(
child: ExpandTile(
return ExpandTile(
key: ValueKey(ss.more.hashCode), // Use hashCode to avoid perf issue
leading: const Icon(MingCute.information_fill, size: 20),
initiallyExpanded: _getInitExpand(ss.more.entries.length),
title: Text(libL10n.about),
@@ -203,8 +203,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
),
)
.toList(),
),
);
).cardx;
}
Widget _buildCPUView(Server si) {
@@ -247,8 +246,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
).paddingOnly(top: 13));
}
return CardX(
child: ExpandTile(
return ExpandTile(
title: Align(
alignment: Alignment.centerLeft,
child: _buildAnimatedText(
@@ -264,8 +262,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
children: details,
),
children: children,
),
);
).cardx;
}
Widget _buildCpuModelItem(MapEntry<String, int> e) {
@@ -396,8 +393,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
],
);
return CardX(
child: Padding(
return Padding(
padding: UIs.roundRectCardPadding,
child: Column(
crossAxisAlignment: CrossAxisAlignment.center,
@@ -420,8 +416,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
_buildProgress(used)
],
),
),
);
).cardx;
}
Widget _buildSwapView(Server si) {
@@ -441,8 +436,7 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
],
);
return CardX(
child: Padding(
return Padding(
padding: UIs.roundRectCardPadding,
child: Column(
crossAxisAlignment: CrossAxisAlignment.center,
@@ -459,22 +453,19 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
_buildProgress(used)
],
),
),
);
).cardx;
}
Widget _buildGpuView(Server si) {
final ss = si.status;
if (ss.nvidia == null || ss.nvidia?.isEmpty == true) return UIs.placeholder;
final children = ss.nvidia?.map((e) => _buildGpuItem(e)).toList() ?? [];
return CardX(
child: ExpandTile(
return ExpandTile(
title: const Text('GPU'),
leading: const Icon(Icons.memory, size: 17),
initiallyExpanded: _getInitExpand(children.length, 3),
children: children,
),
);
).cardx;
}
Widget _buildGpuItem(NvidiaSmiItem item) {
@@ -529,20 +520,44 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
Widget _buildDiskView(Server si) {
final ss = si.status;
final children = List.generate(ss.disk.length, (idx) => _buildDiskItem(ss.disk[idx], ss));
return CardX(
child: ExpandTile(
final children = <Widget>[];
// Create widgets for each top-level disk
for (int idx = 0; idx < ss.disk.length; idx++) {
final disk = ss.disk[idx];
children.add(_buildDiskItemWithHierarchy(disk, ss, 0));
}
if (children.isEmpty) return UIs.placeholder;
return ExpandTile(
title: Text(l10n.disk),
childrenPadding: const EdgeInsets.only(bottom: 7),
leading: Icon(ServerDetailCards.disk.icon, size: 17),
initiallyExpanded: _getInitExpand(children.length),
children: children,
),
);
).cardx;
}
Widget _buildDiskItem(Disk disk, ServerStatus ss) {
final (read, write) = ss.diskIO.getSpeed(disk.fs);
Widget _buildDiskItemWithHierarchy(Disk disk, ServerStatus ss, int depth) {
// Create a list to hold this disk and its children
final items = <Widget>[];
// Add the current disk
items.add(_buildDiskItem(disk, ss, depth));
// Recursively add child disks with increased indentation
if (disk.children.isNotEmpty) {
for (final childDisk in disk.children) {
items.add(_buildDiskItemWithHierarchy(childDisk, ss, depth + 1));
}
}
return Column(children: items);
}
Widget _buildDiskItem(Disk disk, ServerStatus ss, int depth) {
final (read, write) = ss.diskIO.getSpeed(disk.path);
final text = () {
final use = '${l10n.used} ${disk.used.kb2Str} / ${disk.size.kb2Str}';
if (read == null || write == null) return use;
@@ -550,17 +565,23 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
}();
return Padding(
padding: const EdgeInsets.symmetric(horizontal: 17, vertical: 5),
padding: EdgeInsets.only(
left: 17.0 + (depth * 15.0), // Indent based on depth
right: 17.0,
top: 5.0,
bottom: 5.0,
),
child: Row(
mainAxisAlignment: MainAxisAlignment.spaceBetween,
crossAxisAlignment: CrossAxisAlignment.center,
children: [
Column(
Expanded(
child: Column(
mainAxisSize: MainAxisSize.min,
crossAxisAlignment: CrossAxisAlignment.start,
children: [
Text(
disk.fs,
disk.mount.isEmpty ? disk.path : '${disk.path} (${disk.mount})',
style: UIs.text12,
textScaler: _textFactor,
),
@@ -571,6 +592,8 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
)
],
),
),
if (disk.size > BigInt.zero)
SizedBox(
height: 41,
width: 41,
@@ -597,6 +620,8 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
final ns = ss.netSpeed;
final children = <Widget>[];
final devices = ns.devices;
if (devices.isEmpty) return UIs.placeholder;
devices.sort(_netSortType.value.getSortFunc(ns));
children.addAll(devices.map((e) => _buildNetSpeedItem(ns, e)));
@@ -770,21 +795,20 @@ class _ServerDetailPageState extends State<ServerDetailPage> with SingleTickerPr
);
}
final itemW = Expanded(
child: Column(
final itemW = Column(
crossAxisAlignment: CrossAxisAlignment.start,
mainAxisSize: MainAxisSize.min,
children: [
Row(
children: [
Text(si.device, style: UIs.text15Bold),
Text(si.device, style: UIs.text15),
UIs.width7,
Text('(${si.adapter.raw})', style: UIs.text13Grey),
],
),
Text(si.summary ?? '', style: UIs.text13Grey),
],
));
).expanded();
return InkWell(
onTap: () => _onTapSensorItem(si),

92
test/btrfs_test.dart Normal file
View File

@@ -0,0 +1,92 @@
// ignore_for_file: avoid_print
import 'package:flutter_test/flutter_test.dart';
import 'package:server_box/data/model/server/disk.dart';
void main() {
group('BTRFS RAID1 disk parsing', () {
test('correctly handles BTRFS RAID1 with same UUID', () {
final disks = Disk.parse(_btrfsRaidJsonOutput);
expect(disks, isNotEmpty);
expect(disks.length, 4); // Should have 2 parent disks + 2 BTRFS partitions
// We should get two distinct disks with the same UUID but different paths
final nvme1Disk = disks.firstWhere((disk) => disk.path == '/dev/nvme1n1p1');
final nvme2Disk = disks.firstWhere((disk) => disk.path == '/dev/nvme2n1p1');
// Both should exist
expect(nvme1Disk, isNotNull);
expect(nvme2Disk, isNotNull);
// They should have the same UUID (since they're part of the same BTRFS volume)
expect(nvme1Disk.uuid, nvme2Disk.uuid);
// But they should be treated as distinct disks
expect(identical(nvme1Disk, nvme2Disk), isFalse);
// Verify DiskUsage counts physical disks correctly
final usage = DiskUsage.parse(disks);
// With our unique path+kname identifier, both disks should be counted
expect(usage.size, nvme1Disk.size + nvme2Disk.size);
expect(usage.used, nvme1Disk.used + nvme2Disk.used);
});
});
}
// Simulated BTRFS RAID1 lsblk JSON output
const _btrfsRaidJsonOutput = '''
{
"blockdevices": [
{
"name": "nvme1n1",
"kname": "nvme1n1",
"path": "/dev/nvme1n1",
"fstype": null,
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null,
"children": [
{
"name": "nvme1n1p1",
"kname": "nvme1n1p1",
"path": "/dev/nvme1n1p1",
"fstype": "btrfs",
"mountpoint": "/mnt/raid",
"fssize": "500000000000",
"fsused": "100000000000",
"fsavail": "400000000000",
"fsuse%": "20%",
"uuid": "btrfs-raid-uuid-1234-5678"
}
]
},
{
"name": "nvme2n1",
"kname": "nvme2n1",
"path": "/dev/nvme2n1",
"fstype": null,
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null,
"children": [
{
"name": "nvme2n1p1",
"kname": "nvme2n1p1",
"path": "/dev/nvme2n1p1",
"fstype": "btrfs",
"mountpoint": "/mnt/raid",
"fssize": "500000000000",
"fsused": "100000000000",
"fsavail": "400000000000",
"fsuse%": "20%",
"uuid": "btrfs-raid-uuid-1234-5678"
}
]
}
]
}
''';

View File

@@ -4,16 +4,199 @@ import 'package:flutter_test/flutter_test.dart';
import 'package:server_box/data/model/server/disk.dart';
void main() {
test('parse disk', () {
group('Disk parsing', () {
test('parse traditional df output', () {
for (final raw in _raws) {
print('---' * 10);
final disks = Disk.parse(raw);
print(disks.join('\n'));
print('\n');
expect(disks, isNotEmpty);
}
});
test('parse lsblk JSON output', () {
final disks = Disk.parse(_jsonLsblkOutput);
expect(disks, isNotEmpty);
expect(disks.length, 6); // Should find ext4 root, vfat efi, and ext2 boot
// Verify root filesystem
final rootFs = disks.firstWhere((disk) => disk.mount == '/');
expect(rootFs.fsTyp, 'ext4');
expect(rootFs.size, BigInt.parse('982141468672') ~/ BigInt.from(1024));
expect(rootFs.used, BigInt.parse('552718364672') ~/ BigInt.from(1024));
expect(rootFs.avail, BigInt.parse('379457622016') ~/ BigInt.from(1024));
expect(rootFs.usedPercent, 56);
// Verify boot/efi filesystem
final efiFs = disks.firstWhere((disk) => disk.mount == '/boot/efi');
expect(efiFs.fsTyp, 'vfat');
expect(efiFs.size, BigInt.parse('535805952') ~/ BigInt.from(1024));
expect(efiFs.usedPercent, 1);
// Verify boot filesystem
final bootFs = disks.firstWhere((disk) => disk.mount == '/boot');
expect(bootFs.fsTyp, 'ext2');
expect(bootFs.usedPercent, 34);
});
test('parse nested lsblk JSON output with parent/child relationships', () {
final disks = Disk.parse(_nestedJsonLsblkOutput);
expect(disks, isNotEmpty);
// Check parent device with children
final parentDisk = disks.firstWhere((disk) => disk.path == '/dev/nvme0n1');
expect(parentDisk.children, isNotEmpty);
expect(parentDisk.children.length, 3);
// Check one of the children
final rootPartition = parentDisk.children.firstWhere((disk) => disk.mount == '/');
expect(rootPartition.fsTyp, 'ext4');
expect(rootPartition.path, '/dev/nvme0n1p2');
expect(rootPartition.usedPercent, 45);
// Verify we have a child partition with UUID
final bootPartition = parentDisk.children.firstWhere((disk) => disk.mount == '/boot');
expect(bootPartition.uuid, '12345678-abcd-1234-abcd-1234567890ab');
});
test('DiskUsage handles zero size correctly', () {
final usage = DiskUsage(used: BigInt.from(1000), size: BigInt.zero);
expect(usage.usedPercent, 0); // Should return 0 instead of throwing
});
test('DiskUsage handles null kname', () {
final disks = [
Disk(
path: '/dev/sda1',
mount: '/mnt',
usedPercent: 50,
used: BigInt.from(5000),
size: BigInt.from(10000),
avail: BigInt.from(5000),
kname: null, // Explicitly null kname
),
];
final usage = DiskUsage.parse(disks);
expect(usage.used, BigInt.from(5000));
expect(usage.size, BigInt.from(10000));
expect(usage.usedPercent, 50);
// This would use the "unknown" fallback for kname
});
});
}
const _jsonLsblkOutput = '''
{
"blockdevices": [
{
"fstype": "LVM2_member",
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null
},{
"fstype": "ext4",
"mountpoint": "/",
"fssize": 982141468672,
"fsused": 552718364672,
"fsavail": 379457622016,
"fsuse%": "56%"
},{
"fstype": "swap",
"mountpoint": "[SWAP]",
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null
},{
"fstype": null,
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null
},{
"fstype": "vfat",
"mountpoint": "/boot/efi",
"fssize": 535805952,
"fsused": 6127616,
"fsavail": 529678336,
"fsuse%": "1%"
},{
"fstype": "ext2",
"mountpoint": "/boot",
"fssize": 477210624,
"fsused": 161541120,
"fsavail": 290084864,
"fsuse%": "34%"
},{
"fstype": "crypto_LUKS",
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null
}
]
}
''';
const _nestedJsonLsblkOutput = '''
{
"blockdevices": [
{
"name": "nvme0n1",
"kname": "nvme0n1",
"path": "/dev/nvme0n1",
"fstype": null,
"mountpoint": null,
"fssize": null,
"fsused": null,
"fsavail": null,
"fsuse%": null,
"children": [
{
"name": "nvme0n1p1",
"kname": "nvme0n1p1",
"path": "/dev/nvme0n1p1",
"fstype": "vfat",
"mountpoint": "/boot/efi",
"fssize": "512000000",
"fsused": "25600000",
"fsavail": "486400000",
"fsuse%": "5%",
"uuid": "98765432-dcba-4321-dcba-0987654321fe"
},
{
"name": "nvme0n1p2",
"kname": "nvme0n1p2",
"path": "/dev/nvme0n1p2",
"fstype": "ext4",
"mountpoint": "/",
"fssize": "500000000000",
"fsused": "225000000000",
"fsavail": "275000000000",
"fsuse%": "45%",
"uuid": "abcdef12-3456-7890-abcd-ef1234567890"
},
{
"name": "nvme0n1p3",
"kname": "nvme0n1p3",
"path": "/dev/nvme0n1p3",
"fstype": "ext4",
"mountpoint": "/boot",
"fssize": "1000000000",
"fsused": "500000000",
"fsavail": "500000000",
"fsuse%": "50%",
"uuid": "12345678-abcd-1234-abcd-1234567890ab"
}
]
}
]
}
''';
const _raws = [
// '''
// Filesystem 1K-blocks Used Available Use% Mounted on