Compare commits

...

14 commits

Author SHA1 Message Date
Albert Armea
f7ae46cda2
Age verification is a non-goal 2026-03-04 22:46:23 -05:00
Albert Armea
873560bdeb
Merge pull request #29 from aarmea/u/aarmea/fix/nvidia
Always pass `--unsupported-gpu` to Sway
2026-02-14 23:03:15 -05:00
Albert Armea
d3cd4a9015 Always pass --unsupported-gpu to Sway
On NVIDIA GPUs, this allows the launcher to come up at all. If it is not
provided, Sway will exit immediately after printing a warning to the
journal. This can be very hard to debug if you don't know what you're
looking for.

On other GPUs, including those that play nice with Linux like Intel and
AMD, the option is completely harmless.
2026-02-14 22:26:10 -05:00
Albert Armea
af10a1ca19
Merge pull request #28 from aarmea/u/aarmea/20/keyboard-gamepad-2
Add keyboard and gamepad launch support
2026-02-08 14:24:25 -05:00
Albert Armea
becb091612 Update packages for gamepad support 2026-02-08 14:20:35 -05:00
Albert Armea
fbb01127fa Fix rustfmt in CI
Just use --check, don't call git
2026-02-08 14:12:49 -05:00
Albert Armea
3861092d3d Run rustfmt 2026-02-08 14:01:49 -05:00
Albert Armea
c8675be472 Make CI and agents run rustfmt 2026-02-08 14:01:15 -05:00
Albert Armea
b840a7d694 Implement Ctrl+w and Alt+F4 to close the current activity
Mouse not required

This is done by sending StopCommand to shepherdd, not by natively
closing the window. Sway initiates this to avoid any issues with
shepherd-launcher not having focus (as it won't when an activity is
running).
2026-02-08 13:54:17 -05:00
Albert Armea
64ee1d7bc6 Implement controller exit
Use the "Home" button (or Xbox or PlayStation, depending on the controller)
2026-02-08 13:43:17 -05:00
Albert Armea
b12b42b13d Fix analog stick up/down
It's not inverted anymore
2026-02-08 12:45:12 -05:00
Albert Armea
2538403acd Fix up/down moves 2026-02-08 12:11:20 -05:00
Albert Armea
e5a4dbdce7 WIP: keyboard and controller support 2026-02-08 11:14:09 -05:00
Albert Armea
8bba628d98
Merge pull request #26 from aarmea/u/aarmea/9/connectivity-check-new
Internet connectivity check
2026-02-07 17:53:35 -05:00
49 changed files with 1523 additions and 685 deletions

View file

@ -120,6 +120,46 @@ jobs:
. "$HOME/.cargo/env"
cargo clippy --all-targets -- -D warnings
fmt:
name: Rustfmt
runs-on: ubuntu-latest
container:
image: ubuntu:25.10
steps:
- name: Install git
run: |
apt-get update
apt-get install -y git curl
- uses: actions/checkout@v4
- name: Install build dependencies
run: ./scripts/shepherd deps install build
- name: Add rustfmt component
run: |
. "$HOME/.cargo/env"
rustup component add rustfmt
- name: Add Rust to PATH
run: echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- name: Cache cargo registry and build
uses: actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-cargo-
- name: Check formatting
run: |
. "$HOME/.cargo/env"
cargo fmt --all -- --check
shellcheck:
name: ShellCheck
runs-on: ubuntu-latest

View file

@ -2,7 +2,9 @@ Agents: please use the existing documentation for setup.
<CONTRIBUTING.md> describes environment setup and build, test, and lint, including helper scripts and exact commands.
Please ensure that your changes build and pass tests and lint. If you changed the example configuration at <config.example.toml>, make sure that it passes config validation.
Please ensure that your changes build and pass tests and lint, and run `cargo fmt --all` to match your changes to the rest of the code.
If you changed the example configuration at <config.example.toml>, make sure that it passes config validation.
Each of the Rust crates in <crates> contains a README.md that describes each at a high level.

174
Cargo.lock generated
View file

@ -315,6 +315,12 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff"
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "futures-channel"
version = "0.3.31"
@ -459,6 +465,40 @@ dependencies = [
"wasip2",
]
[[package]]
name = "gilrs"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fa85c2e35dc565c90511917897ea4eae16b77f2773d5223536f7b602536d462"
dependencies = [
"fnv",
"gilrs-core",
"log",
"uuid",
"vec_map",
]
[[package]]
name = "gilrs-core"
version = "0.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d23f2cc5144060a7f8d9e02d3fce5d06705376568256a509cdbc3c24d47e4f04"
dependencies = [
"inotify",
"js-sys",
"libc",
"libudev-sys",
"log",
"nix 0.30.1",
"objc2-core-foundation",
"objc2-io-kit",
"uuid",
"vec_map",
"wasm-bindgen",
"web-sys",
"windows",
]
[[package]]
name = "gio"
version = "0.20.12"
@ -762,6 +802,26 @@ dependencies = [
"hashbrown 0.16.1",
]
[[package]]
name = "inotify"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3"
dependencies = [
"bitflags",
"inotify-sys",
"libc",
]
[[package]]
name = "inotify-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
dependencies = [
"libc",
]
[[package]]
name = "is_terminal_polyfill"
version = "1.70.2"
@ -823,6 +883,16 @@ dependencies = [
"vcpkg",
]
[[package]]
name = "libudev-sys"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c8469b4a23b962c1396b9b451dda50ef5b283e8dd309d69033475fa9b334324"
dependencies = [
"libc",
"pkg-config",
]
[[package]]
name = "linux-raw-sys"
version = "0.11.0"
@ -892,6 +962,18 @@ dependencies = [
"memoffset",
]
[[package]]
name = "nix"
version = "0.30.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6"
dependencies = [
"bitflags",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]]
name = "nu-ansi-term"
version = "0.50.3"
@ -910,6 +992,26 @@ dependencies = [
"autocfg",
]
[[package]]
name = "objc2-core-foundation"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536"
dependencies = [
"bitflags",
]
[[package]]
name = "objc2-io-kit"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33fafba39597d6dc1fb709123dfa8289d39406734be322956a69f0931c73bb15"
dependencies = [
"bitflags",
"libc",
"objc2-core-foundation",
]
[[package]]
name = "once_cell"
version = "1.21.3"
@ -1256,7 +1358,7 @@ version = "0.1.0"
dependencies = [
"async-trait",
"dirs",
"nix",
"nix 0.29.0",
"serde",
"shell-escape",
"shepherd-api",
@ -1291,7 +1393,7 @@ dependencies = [
name = "shepherd-ipc"
version = "0.1.0"
dependencies = [
"nix",
"nix 0.29.0",
"serde",
"serde_json",
"shepherd-api",
@ -1310,6 +1412,7 @@ dependencies = [
"chrono",
"clap",
"dirs",
"gilrs",
"gtk4",
"serde",
"serde_json",
@ -1720,6 +1823,12 @@ version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "version-compare"
version = "0.2.1"
@ -1792,6 +1901,37 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "windows"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "527fadee13e0c05939a6a05d5bd6eec6cd2e3dbd648b9f8e447c6518133d8580"
dependencies = [
"windows-collections",
"windows-core",
"windows-future",
"windows-numerics",
]
[[package]]
name = "windows-collections"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b2d95af1a8a14a3c7367e1ed4fc9c20e0a26e79551b1454d72583c97cc6610"
dependencies = [
"windows-core",
]
[[package]]
name = "windows-core"
version = "0.62.2"
@ -1805,6 +1945,17 @@ dependencies = [
"windows-strings",
]
[[package]]
name = "windows-future"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1d6f90251fe18a279739e78025bd6ddc52a7e22f921070ccdc67dde84c605cb"
dependencies = [
"windows-core",
"windows-link",
"windows-threading",
]
[[package]]
name = "windows-implement"
version = "0.60.2"
@ -1833,6 +1984,16 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-numerics"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e2e40844ac143cdb44aead537bbf727de9b044e107a0f1220392177d15b0f26"
dependencies = [
"windows-core",
"windows-link",
]
[[package]]
name = "windows-result"
version = "0.4.1"
@ -1935,6 +2096,15 @@ dependencies = [
"windows_x86_64_msvc 0.53.1",
]
[[package]]
name = "windows-threading"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3949bd5b99cafdf1c7ca86b43ca564028dfe27d66958f2470940f73d86d75b37"
dependencies = [
"windows-link",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"

View file

@ -84,9 +84,37 @@ If it can run on Linux in *any way, shape, or form*, it can be supervised by
## Non-goals
* Modifying or patching third-party applications
* Circumventing DRM or platform protections
* Replacing parental involvement with automation
1. Modifying or patching third-party applications
2. Circumventing DRM or platform protections
3. Replacing parental involvement with automation or third-party content moderation
4. Remotely monitoring users with telemetry
5. Collecting, storing, or reporting personally identifying information (PII)
### Regarding age verification
`shepherd-launcher` may be considered "operating system software" under the
[Digital Age Assurance Act][age-california] and similar legislation,
and therefore subject to an age verification requirement.
[age-california]: https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260AB1043
As legislated, such requirements are fundamentally incompatible with non-goals 3, 4, and 5.
`shepherd-launcher` will *never* collect telemetry or PII, and as such, it will never implement this type of age verification.
As a result, `shepherd-launcher` is not licensed for use in any region that requires OS-level age verification by law.
**If you reside in any such region, you may not download, install, or redistribute `shepherd-launcher`.**
This includes, but is not limited to:
* California
* Louisiana
* Texas
* Utah
[Many other states are considering similar legislation.](https://actonline.org/2025/01/14/the-abcs-of-age-verification-in-the-united-states/)
If you disagree with this assessment and you reside in an affected region, **please contact your representatives.**
## Installation

View file

@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
use shepherd_util::{ClientId, EntryId};
use std::time::Duration;
use crate::{ClientRole, StopMode, API_VERSION};
use crate::{API_VERSION, ClientRole, StopMode};
/// Request wrapper with metadata
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -128,7 +128,6 @@ pub enum Command {
GetHealth,
// Volume control commands
/// Get current volume status
GetVolume,
@ -142,7 +141,6 @@ pub enum Command {
SetMute { muted: bool },
// Admin commands
/// Extend the current session (admin only)
ExtendCurrent { by: Duration },

View file

@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize};
use shepherd_util::{EntryId, SessionId};
use std::time::Duration;
use crate::{ServiceStateSnapshot, SessionEndReason, WarningSeverity, API_VERSION};
use crate::{API_VERSION, ServiceStateSnapshot, SessionEndReason, WarningSeverity};
/// Event envelope
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -51,9 +51,7 @@ pub enum EventPayload {
},
/// Session is expiring (termination initiated)
SessionExpiring {
session_id: SessionId,
},
SessionExpiring { session_id: SessionId },
/// Session has ended
SessionEnded {
@ -64,21 +62,13 @@ pub enum EventPayload {
},
/// Policy was reloaded
PolicyReloaded {
entry_count: usize,
},
PolicyReloaded { entry_count: usize },
/// Entry availability changed (for UI updates)
EntryAvailabilityChanged {
entry_id: EntryId,
enabled: bool,
},
EntryAvailabilityChanged { entry_id: EntryId, enabled: bool },
/// Volume status changed
VolumeChanged {
percent: u8,
muted: bool,
},
VolumeChanged { percent: u8, muted: bool },
/// Service is shutting down
Shutdown,
@ -107,7 +97,10 @@ mod tests {
let parsed: Event = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.api_version, API_VERSION);
assert!(matches!(parsed.payload, EventPayload::SessionStarted { .. }));
assert!(matches!(
parsed.payload,
EventPayload::SessionStarted { .. }
));
}
#[test]

View file

@ -125,14 +125,9 @@ pub enum ReasonCode {
next_window_start: Option<DateTime<Local>>,
},
/// Daily quota exhausted
QuotaExhausted {
used: Duration,
quota: Duration,
},
QuotaExhausted { used: Duration, quota: Duration },
/// Cooldown period active
CooldownActive {
available_at: DateTime<Local>,
},
CooldownActive { available_at: DateTime<Local> },
/// Another session is active
SessionActive {
entry_id: EntryId,
@ -140,17 +135,11 @@ pub enum ReasonCode {
remaining: Option<Duration>,
},
/// Host doesn't support this entry kind
UnsupportedKind {
kind: EntryKindTag,
},
UnsupportedKind { kind: EntryKindTag },
/// Entry is explicitly disabled
Disabled {
reason: Option<String>,
},
Disabled { reason: Option<String> },
/// Internet connectivity is required but unavailable
InternetUnavailable {
check: Option<String>,
},
InternetUnavailable { check: Option<String> },
}
/// Warning severity level

View file

@ -29,7 +29,10 @@ fn main() -> ExitCode {
// Check file exists
if !config_path.exists() {
eprintln!("Error: Configuration file not found: {}", config_path.display());
eprintln!(
"Error: Configuration file not found: {}",
config_path.display()
);
return ExitCode::from(1);
}
@ -39,7 +42,10 @@ fn main() -> ExitCode {
println!("✓ Configuration is valid");
println!();
println!("Summary:");
println!(" Config version: {}", shepherd_config::CURRENT_CONFIG_VERSION);
println!(
" Config version: {}",
shepherd_config::CURRENT_CONFIG_VERSION
);
println!(" Entries: {}", policy.entries.len());
// Show entry summary

View file

@ -65,8 +65,9 @@ impl InternetCheckTarget {
let (host, port_opt) = parse_host_port(host_port)?;
let port = match scheme {
InternetCheckScheme::Tcp => port_opt
.ok_or_else(|| "tcp check requires explicit port".to_string())?,
InternetCheckScheme::Tcp => {
port_opt.ok_or_else(|| "tcp check requires explicit port".to_string())?
}
_ => port_opt.unwrap_or_else(|| scheme.default_port()),
};
@ -149,4 +150,3 @@ pub struct EntryInternetPolicy {
pub required: bool,
pub check: Option<InternetCheckTarget>,
}

View file

@ -6,15 +6,15 @@
//! - Time windows, limits, and warnings
//! - Validation with clear error messages
mod internet;
mod policy;
mod schema;
mod validation;
mod internet;
pub use internet::*;
pub use policy::*;
pub use schema::*;
pub use validation::*;
pub use internet::*;
use std::path::Path;
use thiserror::Error;

View file

@ -1,16 +1,19 @@
//! Validated policy structures
use crate::schema::{
RawConfig, RawEntry, RawEntryKind, RawInternetConfig, RawVolumeConfig, RawServiceConfig,
RawWarningThreshold,
};
use crate::internet::{
EntryInternetPolicy, InternetCheckTarget, InternetConfig, DEFAULT_INTERNET_CHECK_INTERVAL,
DEFAULT_INTERNET_CHECK_TIMEOUT,
DEFAULT_INTERNET_CHECK_INTERVAL, DEFAULT_INTERNET_CHECK_TIMEOUT, EntryInternetPolicy,
InternetCheckTarget, InternetConfig,
};
use crate::schema::{
RawConfig, RawEntry, RawEntryKind, RawInternetConfig, RawServiceConfig, RawVolumeConfig,
RawWarningThreshold,
};
use crate::validation::{parse_days, parse_time};
use shepherd_api::{EntryKind, WarningSeverity, WarningThreshold};
use shepherd_util::{DaysOfWeek, EntryId, TimeWindow, WallClock, default_data_dir, default_log_dir, socket_path_without_env};
use shepherd_util::{
DaysOfWeek, EntryId, TimeWindow, WallClock, default_data_dir, default_log_dir,
socket_path_without_env,
};
use std::path::PathBuf;
use std::time::Duration;
@ -94,24 +97,17 @@ pub struct ServiceConfig {
impl ServiceConfig {
fn from_raw(raw: RawServiceConfig) -> Self {
let log_dir = raw
.log_dir
.clone()
.unwrap_or_else(default_log_dir);
let log_dir = raw.log_dir.clone().unwrap_or_else(default_log_dir);
let child_log_dir = raw
.child_log_dir
.unwrap_or_else(|| log_dir.join("sessions"));
let internet = convert_internet_config(raw.internet.as_ref());
Self {
socket_path: raw
.socket_path
.unwrap_or_else(socket_path_without_env),
socket_path: raw.socket_path.unwrap_or_else(socket_path_without_env),
log_dir,
capture_child_output: raw.capture_child_output,
child_log_dir,
data_dir: raw
.data_dir
.unwrap_or_else(default_data_dir),
data_dir: raw.data_dir.unwrap_or_else(default_data_dir),
internet,
}
}
@ -126,7 +122,11 @@ impl Default for ServiceConfig {
log_dir,
data_dir: default_data_dir(),
capture_child_output: false,
internet: InternetConfig::new(None, DEFAULT_INTERNET_CHECK_INTERVAL, DEFAULT_INTERNET_CHECK_TIMEOUT),
internet: InternetConfig::new(
None,
DEFAULT_INTERNET_CHECK_INTERVAL,
DEFAULT_INTERNET_CHECK_TIMEOUT,
),
}
}
}
@ -212,10 +212,7 @@ impl AvailabilityPolicy {
}
/// Get remaining time in current window
pub fn remaining_in_window(
&self,
dt: &chrono::DateTime<chrono::Local>,
) -> Option<Duration> {
pub fn remaining_in_window(&self, dt: &chrono::DateTime<chrono::Local>) -> Option<Duration> {
if self.always {
return None; // No limit from windows
}
@ -269,8 +266,28 @@ impl VolumePolicy {
fn convert_entry_kind(raw: RawEntryKind) -> EntryKind {
match raw {
RawEntryKind::Process { command, args, env, cwd } => EntryKind::Process { command, args, env, cwd },
RawEntryKind::Snap { snap_name, command, args, env } => EntryKind::Snap { snap_name, command, args, env },
RawEntryKind::Process {
command,
args,
env,
cwd,
} => EntryKind::Process {
command,
args,
env,
cwd,
},
RawEntryKind::Snap {
snap_name,
command,
args,
env,
} => EntryKind::Snap {
snap_name,
command,
args,
env,
},
RawEntryKind::Steam { app_id, args, env } => EntryKind::Steam { app_id, args, env },
RawEntryKind::Flatpak { app_id, args, env } => EntryKind::Flatpak { app_id, args, env },
RawEntryKind::Vm { driver, args } => EntryKind::Vm { driver, args },
@ -347,7 +364,10 @@ fn seconds_to_duration_or_unlimited(secs: u64) -> Option<Duration> {
}
}
fn convert_limits(raw: crate::schema::RawLimits, default_max_run: Option<Duration>) -> LimitsPolicy {
fn convert_limits(
raw: crate::schema::RawLimits,
default_max_run: Option<Duration>,
) -> LimitsPolicy {
LimitsPolicy {
max_run: raw
.max_run_seconds

View file

@ -1,7 +1,7 @@
//! Configuration validation
use crate::schema::{RawConfig, RawDays, RawEntry, RawEntryKind, RawTimeWindow};
use crate::internet::InternetCheckTarget;
use crate::schema::{RawConfig, RawDays, RawEntry, RawEntryKind, RawTimeWindow};
use std::collections::HashSet;
use thiserror::Error;
@ -38,26 +38,29 @@ pub fn validate_config(config: &RawConfig) -> Vec<ValidationError> {
// Validate global internet check (if set)
if let Some(internet) = &config.service.internet
&& let Some(check) = &internet.check
&& let Err(e) = InternetCheckTarget::parse(check) {
errors.push(ValidationError::GlobalError(format!(
"Invalid internet check '{}': {}",
check, e
)));
}
&& let Err(e) = InternetCheckTarget::parse(check)
{
errors.push(ValidationError::GlobalError(format!(
"Invalid internet check '{}': {}",
check, e
)));
}
if let Some(internet) = &config.service.internet {
if let Some(interval) = internet.interval_seconds
&& interval == 0 {
errors.push(ValidationError::GlobalError(
"Internet check interval_seconds must be > 0".into(),
));
}
&& interval == 0
{
errors.push(ValidationError::GlobalError(
"Internet check interval_seconds must be > 0".into(),
));
}
if let Some(timeout) = internet.timeout_ms
&& timeout == 0 {
errors.push(ValidationError::GlobalError(
"Internet check timeout_ms must be > 0".into(),
));
}
&& timeout == 0
{
errors.push(ValidationError::GlobalError(
"Internet check timeout_ms must be > 0".into(),
));
}
}
// Check for duplicate entry IDs
@ -156,28 +159,30 @@ fn validate_entry(entry: &RawEntry, config: &RawConfig) -> Vec<ValidationError>
// Only validate warnings if max_run is Some and not 0 (unlimited)
if let (Some(warnings), Some(max_run)) = (&entry.warnings, max_run)
&& max_run > 0 {
for warning in warnings {
if warning.seconds_before >= max_run {
errors.push(ValidationError::WarningExceedsMaxRun {
entry_id: entry.id.clone(),
seconds: warning.seconds_before,
max_run,
});
}
&& max_run > 0
{
for warning in warnings {
if warning.seconds_before >= max_run {
errors.push(ValidationError::WarningExceedsMaxRun {
entry_id: entry.id.clone(),
seconds: warning.seconds_before,
max_run,
});
}
}
// Note: warnings are ignored for unlimited entries (max_run = 0)
}
// Validate internet requirements
if let Some(internet) = &entry.internet {
if let Some(check) = &internet.check
&& let Err(e) = InternetCheckTarget::parse(check) {
errors.push(ValidationError::EntryError {
entry_id: entry.id.clone(),
message: format!("Invalid internet check '{}': {}", check, e),
});
}
&& let Err(e) = InternetCheckTarget::parse(check)
{
errors.push(ValidationError::EntryError {
entry_id: entry.id.clone(),
message: format!("Invalid internet check '{}': {}", check, e),
});
}
if internet.required {
let has_check = internet.check.is_some()
@ -236,12 +241,8 @@ pub fn parse_time(s: &str) -> Result<(u8, u8), String> {
return Err("Expected HH:MM format".into());
}
let hour: u8 = parts[0]
.parse()
.map_err(|_| "Invalid hour".to_string())?;
let minute: u8 = parts[1]
.parse()
.map_err(|_| "Invalid minute".to_string())?;
let hour: u8 = parts[0].parse().map_err(|_| "Invalid hour".to_string())?;
let minute: u8 = parts[1].parse().map_err(|_| "Invalid minute".to_string())?;
if hour >= 24 {
return Err("Hour must be 0-23".into());
@ -299,12 +300,23 @@ mod tests {
#[test]
fn test_parse_days() {
assert_eq!(parse_days(&RawDays::Preset("weekdays".into())).unwrap(), 0x1F);
assert_eq!(parse_days(&RawDays::Preset("weekends".into())).unwrap(), 0x60);
assert_eq!(
parse_days(&RawDays::Preset("weekdays".into())).unwrap(),
0x1F
);
assert_eq!(
parse_days(&RawDays::Preset("weekends".into())).unwrap(),
0x60
);
assert_eq!(parse_days(&RawDays::Preset("all".into())).unwrap(), 0x7F);
assert_eq!(
parse_days(&RawDays::List(vec!["mon".into(), "wed".into(), "fri".into()])).unwrap(),
parse_days(&RawDays::List(vec![
"mon".into(),
"wed".into(),
"fri".into()
]))
.unwrap(),
0b10101
);
}
@ -355,6 +367,10 @@ mod tests {
};
let errors = validate_config(&config);
assert!(errors.iter().any(|e| matches!(e, ValidationError::DuplicateEntryId(_))));
assert!(
errors
.iter()
.any(|e| matches!(e, ValidationError::DuplicateEntryId(_)))
);
}
}

View file

@ -2,10 +2,9 @@
use chrono::{DateTime, Local};
use shepherd_api::{
ServiceStateSnapshot, EntryView, ReasonCode, SessionEndReason,
WarningSeverity, API_VERSION,
API_VERSION, EntryView, ReasonCode, ServiceStateSnapshot, SessionEndReason, WarningSeverity,
};
use shepherd_config::{Entry, Policy, InternetCheckTarget};
use shepherd_config::{Entry, InternetCheckTarget, Policy};
use shepherd_host_api::{HostCapabilities, HostSessionHandle};
use shepherd_store::{AuditEvent, AuditEventType, Store};
use shepherd_util::{EntryId, MonotonicInstant, SessionId};
@ -44,11 +43,7 @@ pub struct CoreEngine {
impl CoreEngine {
/// Create a new core engine
pub fn new(
policy: Policy,
store: Arc<dyn Store>,
capabilities: HostCapabilities,
) -> Self {
pub fn new(policy: Policy, store: Arc<dyn Store>, capabilities: HostCapabilities) -> Self {
info!(
entry_count = policy.entries.len(),
"Core engine initialized"
@ -79,9 +74,11 @@ impl CoreEngine {
let entry_count = policy.entries.len();
self.policy = policy;
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::PolicyLoaded {
entry_count,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::PolicyLoaded {
entry_count,
}));
info!(entry_count, "Policy reloaded");
@ -137,11 +134,12 @@ impl CoreEngine {
// Check internet requirement
if entry.internet.required {
let check = entry
let check = entry.internet.check.as_ref().or(self
.policy
.service
.internet
.check
.as_ref()
.or(self.policy.service.internet.check.as_ref());
.as_ref());
let available = check
.map(|target| self.internet_available(target))
.unwrap_or(false);
@ -165,19 +163,23 @@ impl CoreEngine {
// Check cooldown
if let Ok(Some(until)) = self.store.get_cooldown_until(&entry.id)
&& until > now {
enabled = false;
reasons.push(ReasonCode::CooldownActive { available_at: until });
}
&& until > now
{
enabled = false;
reasons.push(ReasonCode::CooldownActive {
available_at: until,
});
}
// Check daily quota
if let Some(quota) = entry.limits.daily_quota {
let today = now.date_naive();
if let Ok(used) = self.store.get_usage(&entry.id, today)
&& used >= quota {
enabled = false;
reasons.push(ReasonCode::QuotaExhausted { used, quota });
}
&& used >= quota
{
enabled = false;
reasons.push(ReasonCode::QuotaExhausted { used, quota });
}
}
// Calculate max run if enabled (None when disabled, Some(None) flattened for unlimited)
@ -227,11 +229,7 @@ impl CoreEngine {
}
/// Request to launch an entry
pub fn request_launch(
&self,
entry_id: &EntryId,
now: DateTime<Local>,
) -> LaunchDecision {
pub fn request_launch(&self, entry_id: &EntryId, now: DateTime<Local>) -> LaunchDecision {
// Find entry
let entry = match self.policy.get_entry(entry_id) {
Some(e) => e,
@ -249,10 +247,12 @@ impl CoreEngine {
if !view.enabled {
// Log denial
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::LaunchDenied {
entry_id: entry_id.clone(),
reasons: view.reasons.iter().map(|r| format!("{:?}", r)).collect(),
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::LaunchDenied {
entry_id: entry_id.clone(),
reasons: view.reasons.iter().map(|r| format!("{:?}", r)).collect(),
}));
return LaunchDecision::Denied {
reasons: view.reasons,
@ -302,12 +302,14 @@ impl CoreEngine {
};
// Log to audit
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::SessionStarted {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
label: session.plan.label.clone(),
deadline: session.deadline,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::SessionStarted {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
label: session.plan.label.clone(),
deadline: session.deadline,
}));
if let Some(deadline) = session.deadline {
info!(
@ -384,10 +386,12 @@ impl CoreEngine {
session.mark_warning_issued(threshold);
// Log to audit
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::WarningIssued {
session_id: session.plan.session_id.clone(),
threshold_seconds: threshold,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::WarningIssued {
session_id: session.plan.session_id.clone(),
threshold_seconds: threshold,
}));
info!(
session_id = %session.plan.session_id,
@ -443,22 +447,27 @@ impl CoreEngine {
// Update usage accounting
let today = now.date_naive();
let _ = self.store.add_usage(&session.plan.entry_id, today, duration);
let _ = self
.store
.add_usage(&session.plan.entry_id, today, duration);
// Set cooldown if configured
if let Some(entry) = self.policy.get_entry(&session.plan.entry_id)
&& let Some(cooldown) = entry.limits.cooldown {
let until = now + chrono::Duration::from_std(cooldown).unwrap();
let _ = self.store.set_cooldown_until(&session.plan.entry_id, until);
}
&& let Some(cooldown) = entry.limits.cooldown
{
let until = now + chrono::Duration::from_std(cooldown).unwrap();
let _ = self.store.set_cooldown_until(&session.plan.entry_id, until);
}
// Log to audit
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::SessionEnded {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
reason: reason.clone(),
duration,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::SessionEnded {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
reason: reason.clone(),
duration,
}));
info!(
session_id = %session.plan.session_id,
@ -492,22 +501,27 @@ impl CoreEngine {
// Update usage accounting
let today = now.date_naive();
let _ = self.store.add_usage(&session.plan.entry_id, today, duration);
let _ = self
.store
.add_usage(&session.plan.entry_id, today, duration);
// Set cooldown if configured
if let Some(entry) = self.policy.get_entry(&session.plan.entry_id)
&& let Some(cooldown) = entry.limits.cooldown {
let until = now + chrono::Duration::from_std(cooldown).unwrap();
let _ = self.store.set_cooldown_until(&session.plan.entry_id, until);
}
&& let Some(cooldown) = entry.limits.cooldown
{
let until = now + chrono::Duration::from_std(cooldown).unwrap();
let _ = self.store.set_cooldown_until(&session.plan.entry_id, until);
}
// Log to audit
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::SessionEnded {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
reason: reason.clone(),
duration,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::SessionEnded {
session_id: session.plan.session_id.clone(),
entry_id: session.plan.entry_id.clone(),
reason: reason.clone(),
duration,
}));
info!(
session_id = %session.plan.session_id,
@ -525,9 +539,10 @@ impl CoreEngine {
/// Get current service state snapshot
pub fn get_state(&self) -> ServiceStateSnapshot {
let current_session = self.current_session.as_ref().map(|s| {
s.to_session_info(MonotonicInstant::now())
});
let current_session = self
.current_session
.as_ref()
.map(|s| s.to_session_info(MonotonicInstant::now()));
// Build entry views for the snapshot
let entries = self.list_entries(shepherd_util::now());
@ -577,11 +592,13 @@ impl CoreEngine {
session.deadline = Some(new_deadline);
// Log to audit
let _ = self.store.append_audit(AuditEvent::new(AuditEventType::SessionExtended {
session_id: session.plan.session_id.clone(),
extended_by: by,
new_deadline,
}));
let _ = self
.store
.append_audit(AuditEvent::new(AuditEventType::SessionExtended {
session_id: session.plan.session_id.clone(),
extended_by: by,
new_deadline,
}));
info!(
session_id = %session.plan.session_id,
@ -597,8 +614,8 @@ impl CoreEngine {
#[cfg(test)]
mod tests {
use super::*;
use shepherd_config::{AvailabilityPolicy, Entry, LimitsPolicy};
use shepherd_api::EntryKind;
use shepherd_config::{AvailabilityPolicy, Entry, LimitsPolicy};
use shepherd_store::SqliteStore;
use std::collections::HashMap;
@ -736,19 +753,34 @@ mod tests {
// No warnings initially (first tick may emit AvailabilitySetChanged)
let events = engine.tick(now_mono, now);
// Filter to just warning events for this test
let warning_events: Vec<_> = events.iter().filter(|e| matches!(e, CoreEvent::Warning { .. })).collect();
let warning_events: Vec<_> = events
.iter()
.filter(|e| matches!(e, CoreEvent::Warning { .. }))
.collect();
assert!(warning_events.is_empty());
// At 70 seconds (10 seconds past warning threshold), warning should fire
let later = now_mono + Duration::from_secs(70);
let events = engine.tick(later, now);
let warning_events: Vec<_> = events.iter().filter(|e| matches!(e, CoreEvent::Warning { .. })).collect();
let warning_events: Vec<_> = events
.iter()
.filter(|e| matches!(e, CoreEvent::Warning { .. }))
.collect();
assert_eq!(warning_events.len(), 1);
assert!(matches!(warning_events[0], CoreEvent::Warning { threshold_seconds: 60, .. }));
assert!(matches!(
warning_events[0],
CoreEvent::Warning {
threshold_seconds: 60,
..
}
));
// Warning shouldn't fire twice
let events = engine.tick(later, now);
let warning_events: Vec<_> = events.iter().filter(|e| matches!(e, CoreEvent::Warning { .. })).collect();
let warning_events: Vec<_> = events
.iter()
.filter(|e| matches!(e, CoreEvent::Warning { .. }))
.collect();
assert!(warning_events.is_empty());
}
@ -803,7 +835,10 @@ mod tests {
let later = now_mono + Duration::from_secs(61);
let events = engine.tick(later, now);
// Filter to just expiry events for this test
let expiry_events: Vec<_> = events.iter().filter(|e| matches!(e, CoreEvent::ExpireDue { .. })).collect();
let expiry_events: Vec<_> = events
.iter()
.filter(|e| matches!(e, CoreEvent::ExpireDue { .. }))
.collect();
assert_eq!(expiry_events.len(), 1);
assert!(matches!(expiry_events[0], CoreEvent::ExpireDue { .. }));
}

View file

@ -30,9 +30,7 @@ pub enum CoreEvent {
},
/// Session is expiring (termination initiated)
ExpireDue {
session_id: SessionId,
},
ExpireDue { session_id: SessionId },
/// Session has ended
SessionEnded {
@ -43,13 +41,8 @@ pub enum CoreEvent {
},
/// Entry availability changed
EntryAvailabilityChanged {
entry_id: EntryId,
enabled: bool,
},
EntryAvailabilityChanged { entry_id: EntryId, enabled: bool },
/// Policy was reloaded
PolicyReloaded {
entry_count: usize,
},
PolicyReloaded { entry_count: usize },
}

View file

@ -29,8 +29,7 @@ impl SessionPlan {
.iter()
.filter(|w| Duration::from_secs(w.seconds_before) < max_duration)
.map(|w| {
let trigger_after =
max_duration - Duration::from_secs(w.seconds_before);
let trigger_after = max_duration - Duration::from_secs(w.seconds_before);
(w.seconds_before, trigger_after)
})
.collect()
@ -67,11 +66,7 @@ pub struct ActiveSession {
impl ActiveSession {
/// Create a new session from an approved plan
pub fn new(
plan: SessionPlan,
now: DateTime<Local>,
now_mono: MonotonicInstant,
) -> Self {
pub fn new(plan: SessionPlan, now: DateTime<Local>, now_mono: MonotonicInstant) -> Self {
let (deadline, deadline_mono) = match plan.max_duration {
Some(max_dur) => {
let deadline = now + chrono::Duration::from_std(max_dur).unwrap();
@ -101,7 +96,8 @@ impl ActiveSession {
/// Get time remaining using monotonic time. None means unlimited.
pub fn time_remaining(&self, now_mono: MonotonicInstant) -> Option<Duration> {
self.deadline_mono.map(|deadline| deadline.saturating_duration_until(now_mono))
self.deadline_mono
.map(|deadline| deadline.saturating_duration_until(now_mono))
}
/// Check if session is expired (never true for unlimited sessions)
@ -220,7 +216,10 @@ mod tests {
assert_eq!(session.state, SessionState::Launching);
assert!(session.warnings_issued.is_empty());
assert_eq!(session.time_remaining(now_mono), Some(Duration::from_secs(300)));
assert_eq!(
session.time_remaining(now_mono),
Some(Duration::from_secs(300))
);
}
#[test]

View file

@ -18,7 +18,10 @@ pub struct HostSessionHandle {
impl HostSessionHandle {
pub fn new(session_id: SessionId, payload: HostHandlePayload) -> Self {
Self { session_id, payload }
Self {
session_id,
payload,
}
}
pub fn payload(&self) -> &HostHandlePayload {
@ -31,27 +34,16 @@ impl HostSessionHandle {
#[serde(tag = "platform", rename_all = "snake_case")]
pub enum HostHandlePayload {
/// Linux: process group ID
Linux {
pid: u32,
pgid: u32,
},
Linux { pid: u32, pgid: u32 },
/// Windows: job object handle (serialized as name/id)
Windows {
job_name: String,
process_id: u32,
},
Windows { job_name: String, process_id: u32 },
/// macOS: bundle or process identifier
MacOs {
pid: u32,
bundle_id: Option<String>,
},
MacOs { pid: u32, bundle_id: Option<String> },
/// Mock for testing
Mock {
id: u64,
},
Mock { id: u64 },
}
impl HostHandlePayload {
@ -117,7 +109,10 @@ mod tests {
fn handle_serialization() {
let handle = HostSessionHandle::new(
SessionId::new(),
HostHandlePayload::Linux { pid: 1234, pgid: 1234 },
HostHandlePayload::Linux {
pid: 1234,
pgid: 1234,
},
);
let json = serde_json::to_string(&handle).unwrap();

View file

@ -10,8 +10,8 @@ use std::time::Duration;
use tokio::sync::mpsc;
use crate::{
ExitStatus, HostAdapter, HostCapabilities, HostError, HostEvent, HostHandlePayload,
HostResult, HostSessionHandle, SpawnOptions, StopMode,
ExitStatus, HostAdapter, HostCapabilities, HostError, HostEvent, HostHandlePayload, HostResult,
HostSessionHandle, SpawnOptions, StopMode,
};
/// Mock session state for testing
@ -79,7 +79,9 @@ impl MockHost {
if let Some(session) = sessions.values().find(|s| &s.session_id == session_id) {
let handle = HostSessionHandle::new(
session.session_id.clone(),
HostHandlePayload::Mock { id: session.mock_id },
HostHandlePayload::Mock {
id: session.mock_id,
},
);
let _ = self.event_tx.send(HostEvent::Exited { handle, status });
}
@ -122,12 +124,13 @@ impl HostAdapter for MockHost {
exit_delay: *self.auto_exit_delay.lock().unwrap(),
};
self.sessions.lock().unwrap().insert(mock_id, session.clone());
self.sessions
.lock()
.unwrap()
.insert(mock_id, session.clone());
let handle = HostSessionHandle::new(
session_id.clone(),
HostHandlePayload::Mock { id: mock_id },
);
let handle =
HostSessionHandle::new(session_id.clone(), HostHandlePayload::Mock { id: mock_id });
// If auto-exit is configured, spawn a task to send exit event
if let Some(delay) = session.exit_delay {

View file

@ -82,9 +82,7 @@ pub enum HostEvent {
},
/// Window is ready (for UI notification)
WindowReady {
handle: HostSessionHandle,
},
WindowReady { handle: HostSessionHandle },
/// Spawn failed after handle was created
SpawnFailed {
@ -141,6 +139,8 @@ mod tests {
#[test]
fn stop_mode_default() {
let mode = StopMode::default();
assert!(matches!(mode, StopMode::Graceful { timeout } if timeout == Duration::from_secs(5)));
assert!(
matches!(mode, StopMode::Graceful { timeout } if timeout == Duration::from_secs(5))
);
}
}

View file

@ -3,8 +3,8 @@
use async_trait::async_trait;
use shepherd_api::EntryKind;
use shepherd_host_api::{
ExitStatus, HostAdapter, HostCapabilities, HostError, HostEvent, HostHandlePayload,
HostResult, HostSessionHandle, SpawnOptions, StopMode,
ExitStatus, HostAdapter, HostCapabilities, HostError, HostEvent, HostHandlePayload, HostResult,
HostSessionHandle, SpawnOptions, StopMode,
};
use shepherd_util::SessionId;
use std::collections::{HashMap, HashSet};
@ -14,8 +14,8 @@ use tokio::sync::mpsc;
use tracing::{info, warn};
use crate::process::{
find_steam_game_pids, init, kill_by_command, kill_flatpak_cgroup, kill_snap_cgroup,
kill_steam_game_processes, ManagedProcess,
ManagedProcess, find_steam_game_pids, init, kill_by_command, kill_flatpak_cgroup,
kill_snap_cgroup, kill_steam_game_processes,
};
/// Expand `~` at the beginning of a path to the user's home directory
@ -68,7 +68,7 @@ pub struct LinuxHost {
impl LinuxHost {
pub fn new() -> Self {
let (tx, rx) = mpsc::unbounded_channel();
// Initialize process management
init();
@ -93,14 +93,8 @@ impl LinuxHost {
tokio::time::sleep(Duration::from_millis(100)).await;
let mut exited = Vec::new();
let steam_pids: HashSet<u32> = {
steam_sessions
.lock()
.unwrap()
.keys()
.cloned()
.collect()
};
let steam_pids: HashSet<u32> =
{ steam_sessions.lock().unwrap().keys().cloned().collect() };
{
let mut procs = processes.lock().unwrap();
@ -140,14 +134,8 @@ impl LinuxHost {
}
// Track Steam sessions by Steam App ID instead of process exit
let steam_snapshot: Vec<SteamSession> = {
steam_sessions
.lock()
.unwrap()
.values()
.cloned()
.collect()
};
let steam_snapshot: Vec<SteamSession> =
{ steam_sessions.lock().unwrap().values().cloned().collect() };
let mut ended = Vec::new();
@ -206,24 +194,35 @@ impl HostAdapter for LinuxHost {
) -> HostResult<HostSessionHandle> {
// Extract argv, env, cwd, snap_name, flatpak_app_id, and steam_app_id based on entry kind
let (argv, env, cwd, snap_name, flatpak_app_id, steam_app_id) = match entry_kind {
EntryKind::Process { command, args, env, cwd } => {
EntryKind::Process {
command,
args,
env,
cwd,
} => {
let mut argv = vec![expand_tilde(command)];
argv.extend(expand_args(args));
let expanded_cwd = cwd.as_ref().map(|c| {
std::path::PathBuf::from(expand_tilde(&c.to_string_lossy()))
});
let expanded_cwd = cwd
.as_ref()
.map(|c| std::path::PathBuf::from(expand_tilde(&c.to_string_lossy())));
(argv, env.clone(), expanded_cwd, None, None, None)
}
EntryKind::Snap { snap_name, command, args, env } => {
EntryKind::Snap {
snap_name,
command,
args,
env,
} => {
// For snap apps, we need to use 'snap run <snap_name>' to launch them.
// The command (if specified) is passed as an argument after the snap name,
// followed by any additional args.
let mut argv = vec!["snap".to_string(), "run".to_string(), snap_name.clone()];
// If a custom command is specified (different from snap_name), add it
if let Some(cmd) = command
&& cmd != snap_name {
argv.push(cmd.clone());
}
&& cmd != snap_name
{
argv.push(cmd.clone());
}
argv.extend(expand_args(args));
(argv, env.clone(), None, Some(snap_name.clone()), None, None)
}
@ -257,13 +256,19 @@ impl HostAdapter for LinuxHost {
}
(argv, HashMap::new(), None, None, None, None)
}
EntryKind::Media { library_id, args: _ } => {
EntryKind::Media {
library_id,
args: _,
} => {
// For media, we'd typically launch a media player
// This is a placeholder - real implementation would integrate with a player
let argv = vec!["xdg-open".to_string(), expand_tilde(library_id)];
(argv, HashMap::new(), None, None, None, None)
}
EntryKind::Custom { type_name: _, payload: _ } => {
EntryKind::Custom {
type_name: _,
payload: _,
} => {
return Err(HostError::UnsupportedKind);
}
};
@ -282,7 +287,7 @@ impl HostAdapter for LinuxHost {
// Determine if this is a sandboxed app (snap or flatpak)
let sandboxed_app_name = snap_name.clone().or_else(|| flatpak_app_id.clone());
let proc = ManagedProcess::spawn(
&argv,
&env,
@ -293,7 +298,7 @@ impl HostAdapter for LinuxHost {
let pid = proc.pid;
let pgid = proc.pgid;
// Store the session info so we can use it for killing even after process exits
let session_info_entry = SessionInfo {
command_name: command_name.clone(),
@ -301,13 +306,13 @@ impl HostAdapter for LinuxHost {
flatpak_app_id: flatpak_app_id.clone(),
steam_app_id,
};
self.session_info.lock().unwrap().insert(session_id.clone(), session_info_entry);
self.session_info
.lock()
.unwrap()
.insert(session_id.clone(), session_info_entry);
info!(session_id = %session_id, command = %command_name, snap = ?snap_name, flatpak = ?flatpak_app_id, "Tracking session info");
let handle = HostSessionHandle::new(
session_id,
HostHandlePayload::Linux { pid, pgid },
);
let handle = HostSessionHandle::new(session_id, HostHandlePayload::Linux { pid, pgid });
self.processes.lock().unwrap().insert(pid, proc);
@ -337,10 +342,10 @@ impl HostAdapter for LinuxHost {
// Get the session's info for killing
let session_info = self.session_info.lock().unwrap().get(&session_id).cloned();
// Check if we have session info OR a tracked process
let has_process = self.processes.lock().unwrap().contains_key(&pid);
if session_info.is_none() && !has_process {
warn!(session_id = %session_id, pid = pid, "No session info or tracked process found");
return Err(HostError::SessionNotFound);
@ -354,11 +359,15 @@ impl HostAdapter for LinuxHost {
kill_snap_cgroup(snap, nix::sys::signal::Signal::SIGTERM);
info!(snap = %snap, "Sent SIGTERM via snap cgroup");
} else if let Some(app_id) = info.steam_app_id {
let _ = kill_steam_game_processes(app_id, nix::sys::signal::Signal::SIGTERM);
let _ =
kill_steam_game_processes(app_id, nix::sys::signal::Signal::SIGTERM);
if let Ok(mut map) = self.steam_sessions.lock() {
map.entry(pid).and_modify(|entry| entry.seen_game = true);
}
info!(steam_app_id = app_id, "Sent SIGTERM to Steam game processes");
info!(
steam_app_id = app_id,
"Sent SIGTERM to Steam game processes"
);
} else if let Some(ref app_id) = info.flatpak_app_id {
kill_flatpak_cgroup(app_id, nix::sys::signal::Signal::SIGTERM);
info!(flatpak = %app_id, "Sent SIGTERM via flatpak cgroup");
@ -370,7 +379,10 @@ impl HostAdapter for LinuxHost {
}
// Also send SIGTERM via process handle (skip for Steam sessions)
let is_steam = session_info.as_ref().and_then(|info| info.steam_app_id).is_some();
let is_steam = session_info
.as_ref()
.and_then(|info| info.steam_app_id)
.is_some();
if !is_steam {
let procs = self.processes.lock().unwrap();
if let Some(p) = procs.get(&pid) {
@ -388,13 +400,22 @@ impl HostAdapter for LinuxHost {
kill_snap_cgroup(snap, nix::sys::signal::Signal::SIGKILL);
info!(snap = %snap, "Sent SIGKILL via snap cgroup (timeout)");
} else if let Some(app_id) = info.steam_app_id {
let _ = kill_steam_game_processes(app_id, nix::sys::signal::Signal::SIGKILL);
info!(steam_app_id = app_id, "Sent SIGKILL to Steam game processes (timeout)");
let _ = kill_steam_game_processes(
app_id,
nix::sys::signal::Signal::SIGKILL,
);
info!(
steam_app_id = app_id,
"Sent SIGKILL to Steam game processes (timeout)"
);
} else if let Some(ref app_id) = info.flatpak_app_id {
kill_flatpak_cgroup(app_id, nix::sys::signal::Signal::SIGKILL);
info!(flatpak = %app_id, "Sent SIGKILL via flatpak cgroup (timeout)");
} else {
kill_by_command(&info.command_name, nix::sys::signal::Signal::SIGKILL);
kill_by_command(
&info.command_name,
nix::sys::signal::Signal::SIGKILL,
);
info!(command = %info.command_name, "Sent SIGKILL via command name (timeout)");
}
}
@ -418,7 +439,7 @@ impl HostAdapter for LinuxHost {
} else {
self.processes.lock().unwrap().contains_key(&pid)
};
if !still_running {
break;
}
@ -433,11 +454,15 @@ impl HostAdapter for LinuxHost {
kill_snap_cgroup(snap, nix::sys::signal::Signal::SIGKILL);
info!(snap = %snap, "Sent SIGKILL via snap cgroup");
} else if let Some(app_id) = info.steam_app_id {
let _ = kill_steam_game_processes(app_id, nix::sys::signal::Signal::SIGKILL);
let _ =
kill_steam_game_processes(app_id, nix::sys::signal::Signal::SIGKILL);
if let Ok(mut map) = self.steam_sessions.lock() {
map.entry(pid).and_modify(|entry| entry.seen_game = true);
}
info!(steam_app_id = app_id, "Sent SIGKILL to Steam game processes");
info!(
steam_app_id = app_id,
"Sent SIGKILL to Steam game processes"
);
} else if let Some(ref app_id) = info.flatpak_app_id {
kill_flatpak_cgroup(app_id, nix::sys::signal::Signal::SIGKILL);
info!(flatpak = %app_id, "Sent SIGKILL via flatpak cgroup");
@ -448,7 +473,10 @@ impl HostAdapter for LinuxHost {
}
// Also force kill via process handle (skip for Steam sessions)
let is_steam = session_info.as_ref().and_then(|info| info.steam_app_id).is_some();
let is_steam = session_info
.as_ref()
.and_then(|info| info.steam_app_id)
.is_some();
if !is_steam {
let procs = self.processes.lock().unwrap();
if let Some(p) = procs.get(&pid) {
@ -457,7 +485,7 @@ impl HostAdapter for LinuxHost {
}
}
}
// Clean up the session info tracking
self.session_info.lock().unwrap().remove(&session_id);

View file

@ -38,32 +38,32 @@ pub fn kill_snap_cgroup(snap_name: &str, _signal: Signal) -> bool {
"/sys/fs/cgroup/user.slice/user-{}.slice/user@{}.service/app.slice",
uid, uid
);
// Find all scope directories matching this snap
let pattern = format!("snap.{}.{}-", snap_name, snap_name);
let base = std::path::Path::new(&base_path);
if !base.exists() {
debug!(path = %base_path, "Snap cgroup base path doesn't exist");
return false;
}
let mut stopped_any = false;
if let Ok(entries) = std::fs::read_dir(base) {
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.starts_with(&pattern) && name_str.ends_with(".scope") {
let scope_name = name_str.to_string();
// Always use SIGKILL for snap apps to prevent self-restart behavior
// Using systemctl kill --signal=KILL sends SIGKILL to all processes in scope
let result = Command::new("systemctl")
.args(["--user", "kill", "--signal=KILL", &scope_name])
.output();
match result {
Ok(output) => {
if output.status.success() {
@ -81,13 +81,16 @@ pub fn kill_snap_cgroup(snap_name: &str, _signal: Signal) -> bool {
}
}
}
if stopped_any {
info!(snap = snap_name, "Killed snap scope(s) via systemctl SIGKILL");
info!(
snap = snap_name,
"Killed snap scope(s) via systemctl SIGKILL"
);
} else {
debug!(snap = snap_name, "No snap scope found to kill");
}
stopped_any
}
@ -101,33 +104,33 @@ pub fn kill_flatpak_cgroup(app_id: &str, _signal: Signal) -> bool {
"/sys/fs/cgroup/user.slice/user-{}.slice/user@{}.service/app.slice",
uid, uid
);
// Flatpak uses a different naming pattern than snap
// The app_id dots are preserved: app-flatpak-org.example.App-<number>.scope
let pattern = format!("app-flatpak-{}-", app_id);
let base = std::path::Path::new(&base_path);
if !base.exists() {
debug!(path = %base_path, "Flatpak cgroup base path doesn't exist");
return false;
}
let mut stopped_any = false;
if let Ok(entries) = std::fs::read_dir(base) {
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
if name_str.starts_with(&pattern) && name_str.ends_with(".scope") {
let scope_name = name_str.to_string();
// Always use SIGKILL for flatpak apps to prevent self-restart behavior
// Using systemctl kill --signal=KILL sends SIGKILL to all processes in scope
let result = Command::new("systemctl")
.args(["--user", "kill", "--signal=KILL", &scope_name])
.output();
match result {
Ok(output) => {
if output.status.success() {
@ -145,13 +148,16 @@ pub fn kill_flatpak_cgroup(app_id: &str, _signal: Signal) -> bool {
}
}
}
if stopped_any {
info!(app_id = app_id, "Killed flatpak scope(s) via systemctl SIGKILL");
info!(
app_id = app_id,
"Killed flatpak scope(s) via systemctl SIGKILL"
);
} else {
debug!(app_id = app_id, "No flatpak scope found to kill");
}
stopped_any
}
@ -216,21 +222,28 @@ pub fn kill_by_command(command_name: &str, signal: Signal) -> bool {
Signal::SIGKILL => "KILL",
_ => "TERM",
};
// Use pkill to find and kill processes by command name
let result = Command::new("pkill")
.args([&format!("-{}", signal_name), "-f", command_name])
.output();
match result {
Ok(output) => {
// pkill returns 0 if processes were found and signaled
if output.status.success() {
info!(command = command_name, signal = signal_name, "Killed processes by command name");
info!(
command = command_name,
signal = signal_name,
"Killed processes by command name"
);
true
} else {
// No processes found is not an error
debug!(command = command_name, "No processes found matching command name");
debug!(
command = command_name,
"No processes found matching command name"
);
false
}
}
@ -243,10 +256,10 @@ pub fn kill_by_command(command_name: &str, signal: Signal) -> bool {
impl ManagedProcess {
/// Spawn a new process in its own process group
///
///
/// If `snap_name` is provided, the process is treated as a snap app and will use
/// systemd scope-based killing instead of signal-based killing.
///
///
/// If `log_path` is provided, stdout and stderr will be redirected to that file.
/// For snap apps, we use `script` to capture output from all child processes
/// via a pseudo-terminal, since snap child processes don't inherit file descriptors.
@ -272,15 +285,16 @@ impl ManagedProcess {
{
warn!(path = %parent.display(), error = %e, "Failed to create log directory");
}
// Build command: script -q -c "original command" logfile
// -q: quiet mode (no start/done messages)
// -c: command to run
let original_cmd = argv.iter()
let original_cmd = argv
.iter()
.map(|arg| shell_escape::escape(std::borrow::Cow::Borrowed(arg)))
.collect::<Vec<_>>()
.join(" ");
let script_argv = vec![
"script".to_string(),
"-q".to_string(),
@ -288,7 +302,7 @@ impl ManagedProcess {
original_cmd,
log_file.to_string_lossy().to_string(),
];
info!(log_path = %log_file.display(), "Using script to capture snap output via pty");
(script_argv, None) // script handles the log file itself
}
@ -303,7 +317,7 @@ impl ManagedProcess {
// Set environment
cmd.env_clear();
// Inherit essential environment variables
// These are needed for most Linux applications to work correctly
let inherit_vars = [
@ -369,7 +383,7 @@ impl ManagedProcess {
"DESKTOP_SESSION",
"GNOME_DESKTOP_SESSION_ID",
];
for var in inherit_vars {
if let Ok(val) = std::env::var(var) {
cmd.env(var, val);
@ -415,7 +429,7 @@ impl ManagedProcess {
{
warn!(path = %parent.display(), error = %e, "Failed to create log directory");
}
// Open log file for appending (create if doesn't exist)
match File::create(path) {
Ok(file) => {
@ -465,37 +479,41 @@ impl ManagedProcess {
// SAFETY: This is safe in the pre-exec context
unsafe {
cmd.pre_exec(|| {
nix::unistd::setsid().map_err(|e| {
std::io::Error::other(e.to_string())
})?;
nix::unistd::setsid().map_err(|e| std::io::Error::other(e.to_string()))?;
Ok(())
});
}
let child = cmd.spawn().map_err(|e| {
HostError::SpawnFailed(format!("Failed to spawn {}: {}", program, e))
})?;
let child = cmd
.spawn()
.map_err(|e| HostError::SpawnFailed(format!("Failed to spawn {}: {}", program, e)))?;
let pid = child.id();
let pgid = pid; // After setsid, pid == pgid
info!(pid = pid, pgid = pgid, program = %program, snap = ?snap_name, "Process spawned");
Ok(Self { child, pid, pgid, command_name, snap_name })
Ok(Self {
child,
pid,
pgid,
command_name,
snap_name,
})
}
/// Get all descendant PIDs of this process using /proc
fn get_descendant_pids(&self) -> Vec<i32> {
let mut descendants = Vec::new();
let mut to_check = vec![self.pid as i32];
while let Some(parent_pid) = to_check.pop() {
// Read /proc to find children of this PID
if let Ok(entries) = std::fs::read_dir("/proc") {
for entry in entries.flatten() {
let name = entry.file_name();
let name_str = name.to_string_lossy();
// Skip non-numeric entries (not PIDs)
if let Ok(pid) = name_str.parse::<i32>() {
// Read the stat file to get parent PID
@ -508,17 +526,18 @@ impl ManagedProcess {
let fields: Vec<&str> = after_comm.split_whitespace().collect();
if fields.len() >= 2
&& let Ok(ppid) = fields[1].parse::<i32>()
&& ppid == parent_pid {
descendants.push(pid);
to_check.push(pid);
}
&& ppid == parent_pid
{
descendants.push(pid);
to_check.push(pid);
}
}
}
}
}
}
}
descendants
}
@ -529,7 +548,7 @@ impl ManagedProcess {
if self.snap_name.is_none() {
kill_by_command(&self.command_name, Signal::SIGTERM);
}
// Also try to kill the process group
let pgid = Pid::from_raw(-(self.pgid as i32)); // Negative for process group
@ -544,7 +563,7 @@ impl ManagedProcess {
debug!(pgid = self.pgid, error = %e, "Failed to send SIGTERM to process group");
}
}
// Also kill all descendants (they may have escaped the process group)
let descendants = self.get_descendant_pids();
for pid in &descendants {
@ -553,7 +572,7 @@ impl ManagedProcess {
if !descendants.is_empty() {
debug!(descendants = ?descendants, "Sent SIGTERM to descendant processes");
}
Ok(())
}
@ -564,7 +583,7 @@ impl ManagedProcess {
if self.snap_name.is_none() {
kill_by_command(&self.command_name, Signal::SIGKILL);
}
// Also try to kill the process group
let pgid = Pid::from_raw(-(self.pgid as i32));
@ -579,7 +598,7 @@ impl ManagedProcess {
debug!(pgid = self.pgid, error = %e, "Failed to send SIGKILL to process group");
}
}
// Also kill all descendants (they may have escaped the process group)
let descendants = self.get_descendant_pids();
for pid in &descendants {
@ -588,7 +607,7 @@ impl ManagedProcess {
if !descendants.is_empty() {
debug!(descendants = ?descendants, "Sent SIGKILL to descendant processes");
}
Ok(())
}
@ -647,7 +666,7 @@ impl ManagedProcess {
Err(e) => Err(HostError::Internal(format!("Wait failed: {}", e))),
}
}
/// Clean up resources associated with this process
pub fn cleanup(&self) {
// Nothing to clean up for systemd scopes - systemd handles it

View file

@ -148,9 +148,10 @@ impl LinuxVolumeController {
// Output: "Volume: front-left: 65536 / 100% / -0.00 dB, front-right: ..."
if let Some(percent_str) = stdout.split('/').nth(1)
&& let Ok(percent) = percent_str.trim().trim_end_matches('%').parse::<u8>() {
status.percent = percent;
}
&& let Ok(percent) = percent_str.trim().trim_end_matches('%').parse::<u8>()
{
status.percent = percent;
}
}
// Check mute status
@ -185,9 +186,10 @@ impl LinuxVolumeController {
// Extract percentage: [100%]
if let Some(start) = line.find('[')
&& let Some(end) = line[start..].find('%')
&& let Ok(percent) = line[start + 1..start + end].parse::<u8>() {
status.percent = percent;
}
&& let Ok(percent) = line[start + 1..start + end].parse::<u8>()
{
status.percent = percent;
}
// Check mute status: [on] or [off]
status.muted = line.contains("[off]");
break;
@ -210,7 +212,11 @@ impl LinuxVolumeController {
/// Set volume via PulseAudio
fn set_volume_pulseaudio(percent: u8) -> VolumeResult<()> {
Command::new("pactl")
.args(["set-sink-volume", "@DEFAULT_SINK@", &format!("{}%", percent)])
.args([
"set-sink-volume",
"@DEFAULT_SINK@",
&format!("{}%", percent),
])
.status()
.map_err(|e| VolumeError::Backend(e.to_string()))?;
Ok(())
@ -323,7 +329,10 @@ impl VolumeController for LinuxVolumeController {
async fn volume_up(&self, step: u8) -> VolumeResult<()> {
let current = self.get_status().await?;
let new_volume = current.percent.saturating_add(step).min(self.capabilities.max_volume);
let new_volume = current
.percent
.saturating_add(step)
.min(self.capabilities.max_volume);
self.set_volume(new_volume).await
}

View file

@ -240,17 +240,17 @@ fn build_hud_content(state: SharedState) -> gtk4::Box {
// Handle slider value changes with debouncing
// Create a channel for volume requests - the worker will debounce them
let (volume_tx, volume_rx) = mpsc::channel::<u8>();
// Spawn a dedicated volume worker thread that debounces requests
std::thread::spawn(move || {
const DEBOUNCE_MS: u64 = 50; // Wait 50ms for more changes before sending
loop {
// Wait for first volume request
let Ok(mut latest_percent) = volume_rx.recv() else {
break; // Channel closed
};
// Drain any pending requests, keeping only the latest value
// Use a short timeout to debounce rapid changes
loop {
@ -267,24 +267,24 @@ fn build_hud_content(state: SharedState) -> gtk4::Box {
}
}
}
// Send only the final value
if let Err(e) = crate::volume::set_volume(latest_percent) {
tracing::error!("Failed to set volume: {}", e);
}
}
});
let slider_changing = std::rc::Rc::new(std::cell::Cell::new(false));
let slider_changing_clone = slider_changing.clone();
volume_slider.connect_change_value(move |slider, _, value| {
slider_changing_clone.set(true);
let percent = value.clamp(0.0, 100.0) as u8;
// Send to debounce worker (non-blocking)
let _ = volume_tx.send(percent);
// Allow the slider to update immediately in UI
slider.set_value(value);
glib::Propagation::Stop
@ -414,11 +414,11 @@ fn build_hud_content(state: SharedState) -> gtk4::Box {
let remaining = time_remaining_at_warning.saturating_sub(elapsed);
time_display_clone.set_remaining(Some(remaining));
// Use configuration-defined message if present, otherwise show time-based message
let warning_text = message.clone().unwrap_or_else(|| {
format!("Only {} seconds remaining!", remaining)
});
let warning_text = message
.clone()
.unwrap_or_else(|| format!("Only {} seconds remaining!", remaining));
warning_label_clone.set_text(&warning_text);
// Apply severity-based CSS classes
warning_box_clone.remove_css_class("warning-info");
warning_box_clone.remove_css_class("warning-warn");
@ -434,7 +434,7 @@ fn build_hud_content(state: SharedState) -> gtk4::Box {
warning_box_clone.add_css_class("warning-critical");
}
}
warning_box_clone.set_visible(true);
}
SessionState::Ending { reason, .. } => {
@ -457,19 +457,19 @@ fn build_hud_content(state: SharedState) -> gtk4::Box {
if let Some(volume) = state.volume_info() {
volume_button_clone.set_icon_name(volume.icon_name());
volume_label_clone.set_text(&format!("{}%", volume.percent));
// Only update slider if user is not actively dragging it
if !slider_changing_for_update.get() {
volume_slider_clone.set_value(volume.percent as f64);
}
// Reset the changing flag after a short delay
slider_changing_for_update.set(false);
// Disable slider when muted or when restrictions don't allow changes
let slider_enabled = !volume.muted && volume.restrictions.allow_change;
volume_slider_clone.set_sensitive(slider_enabled);
volume_button_clone.set_sensitive(volume.restrictions.allow_mute);
// Update slider range based on restrictions
let min = volume.restrictions.min_volume.unwrap_or(0) as f64;
let max = volume.restrictions.max_volume.unwrap_or(100) as f64;

View file

@ -35,16 +35,18 @@ impl BatteryStatus {
// Check for battery
if name_str.starts_with("BAT")
&& let Some((percent, charging)) = read_battery_info(&path) {
status.percent = Some(percent);
status.charging = charging;
}
&& let Some((percent, charging)) = read_battery_info(&path)
{
status.percent = Some(percent);
status.charging = charging;
}
// Check for AC adapter
if (name_str.starts_with("AC") || name_str.contains("ADP"))
&& let Some(online) = read_ac_status(&path) {
status.ac_connected = online;
}
&& let Some(online) = read_ac_status(&path)
{
status.ac_connected = online;
}
}
}

View file

@ -43,8 +43,7 @@ fn main() -> Result<()> {
// Initialize logging
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new(&args.log_level)),
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&args.log_level)),
)
.init();

View file

@ -218,17 +218,18 @@ impl SharedState {
entry_name,
..
} = state
&& sid == session_id {
*state = SessionState::Warning {
session_id: session_id.clone(),
entry_id: entry_id.clone(),
entry_name: entry_name.clone(),
warning_issued_at: std::time::Instant::now(),
time_remaining_at_warning: time_remaining.as_secs(),
message: message.clone(),
severity: *severity,
};
}
&& sid == session_id
{
*state = SessionState::Warning {
session_id: session_id.clone(),
entry_id: entry_id.clone(),
entry_name: entry_name.clone(),
warning_issued_at: std::time::Instant::now(),
time_remaining_at_warning: time_remaining.as_secs(),
message: message.clone(),
severity: *severity,
};
}
});
}

View file

@ -60,9 +60,7 @@ pub fn toggle_mute() -> anyhow::Result<()> {
shepherd_api::ResponseResult::Ok(ResponsePayload::VolumeDenied { reason }) => {
Err(anyhow::anyhow!("Volume denied: {}", reason))
}
shepherd_api::ResponseResult::Err(e) => {
Err(anyhow::anyhow!("Error: {}", e.message))
}
shepherd_api::ResponseResult::Err(e) => Err(anyhow::anyhow!("Error: {}", e.message)),
_ => Err(anyhow::anyhow!("Unexpected response")),
}
})
@ -83,9 +81,7 @@ pub fn set_volume(percent: u8) -> anyhow::Result<()> {
shepherd_api::ResponseResult::Ok(ResponsePayload::VolumeDenied { reason }) => {
Err(anyhow::anyhow!("Volume denied: {}", reason))
}
shepherd_api::ResponseResult::Err(e) => {
Err(anyhow::anyhow!("Error: {}", e.message))
}
shepherd_api::ResponseResult::Err(e) => Err(anyhow::anyhow!("Error: {}", e.message)),
_ => Err(anyhow::anyhow!("Unexpected response")),
}
})

View file

@ -8,7 +8,7 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::{broadcast, mpsc, Mutex, RwLock};
use tokio::sync::{Mutex, RwLock, broadcast, mpsc};
use tracing::{debug, error, info, warn};
use crate::{IpcError, IpcResult};
@ -75,10 +75,9 @@ impl IpcServer {
let listener = UnixListener::bind(&self.socket_path)?;
// Set socket permissions (readable/writable by owner and group)
if let Err(err) = std::fs::set_permissions(
&self.socket_path,
std::fs::Permissions::from_mode(0o660),
) {
if let Err(err) =
std::fs::set_permissions(&self.socket_path, std::fs::Permissions::from_mode(0o660))
{
if err.kind() == std::io::ErrorKind::PermissionDenied {
warn!(
path = %self.socket_path.display(),
@ -190,7 +189,8 @@ impl IpcServer {
match serde_json::from_str::<Request>(line) {
Ok(request) => {
// Check for subscribe command
if matches!(request.command, shepherd_api::Command::SubscribeEvents) {
if matches!(request.command, shepherd_api::Command::SubscribeEvents)
{
let mut clients = clients.write().await;
if let Some(handle) = clients.get_mut(&client_id_clone) {
handle.subscribed = true;
@ -342,13 +342,14 @@ mod tests {
let mut server = IpcServer::new(&socket_path);
if let Err(err) = server.start().await {
if let IpcError::Io(ref io_err) = err
&& io_err.kind() == std::io::ErrorKind::PermissionDenied {
eprintln!(
"Skipping IPC server start test due to permission error: {}",
io_err
);
return;
}
&& io_err.kind() == std::io::ErrorKind::PermissionDenied
{
eprintln!(
"Skipping IPC server start test due to permission error: {}",
io_err
);
return;
}
panic!("IPC server start failed: {err}");
}

View file

@ -24,6 +24,7 @@ tracing-subscriber = { workspace = true }
anyhow = { workspace = true }
chrono = { workspace = true }
dirs = "5.0"
gilrs = "0.11"
[features]
default = []

View file

@ -4,9 +4,10 @@ use gtk4::glib;
use gtk4::prelude::*;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::runtime::Runtime;
use tokio::sync::mpsc;
use tracing::{debug, error, info};
use tracing::{debug, error, info, warn};
use crate::client::{CommandClient, ServiceClient};
use crate::grid::LauncherGrid;
@ -41,6 +42,13 @@ window {
border-color: #4a90d9;
}
.launcher-tile:focus,
.launcher-tile:focus-visible {
background: #1f3460;
background-color: #1f3460;
border-color: #ffd166;
}
.launcher-tile:active {
background: #0f3460;
background-color: #0f3460;
@ -168,6 +176,14 @@ impl LauncherApp {
// Create command client for sending commands
let command_client = Arc::new(CommandClient::new(&socket_path));
Self::setup_keyboard_input(&window, &grid);
Self::setup_gamepad_input(
&window,
&grid,
command_client.clone(),
runtime.clone(),
state.clone(),
);
// Connect grid launch callback
let cmd_client = command_client.clone();
@ -296,7 +312,8 @@ impl LauncherApp {
let state_for_client = state.clone();
let socket_for_client = socket_path.clone();
std::thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().expect("Failed to create tokio runtime for event loop");
let rt = tokio::runtime::Runtime::new()
.expect("Failed to create tokio runtime for event loop");
rt.block_on(async move {
let client = ServiceClient::new(socket_for_client, state_for_client, command_rx);
client.run().await;
@ -342,6 +359,7 @@ impl LauncherApp {
if let Some(grid) = grid {
grid.set_entries(entries);
grid.set_tiles_sensitive(true);
grid.grab_focus();
}
if let Some(ref win) = window {
win.set_visible(true);
@ -381,6 +399,199 @@ impl LauncherApp {
window.present();
}
fn setup_keyboard_input(window: &gtk4::ApplicationWindow, grid: &LauncherGrid) {
let key_controller = gtk4::EventControllerKey::new();
key_controller.set_propagation_phase(gtk4::PropagationPhase::Capture);
let grid_weak = grid.downgrade();
key_controller.connect_key_pressed(move |_, key, _, _| {
let Some(grid) = grid_weak.upgrade() else {
return glib::Propagation::Proceed;
};
let handled = match key {
gtk4::gdk::Key::Up | gtk4::gdk::Key::w | gtk4::gdk::Key::W => {
grid.move_selection(0, -1);
true
}
gtk4::gdk::Key::Down | gtk4::gdk::Key::s | gtk4::gdk::Key::S => {
grid.move_selection(0, 1);
true
}
gtk4::gdk::Key::Left | gtk4::gdk::Key::a | gtk4::gdk::Key::A => {
grid.move_selection(-1, 0);
true
}
gtk4::gdk::Key::Right | gtk4::gdk::Key::d | gtk4::gdk::Key::D => {
grid.move_selection(1, 0);
true
}
gtk4::gdk::Key::Return | gtk4::gdk::Key::KP_Enter | gtk4::gdk::Key::space => {
grid.launch_selected();
true
}
_ => false,
};
if handled {
glib::Propagation::Stop
} else {
glib::Propagation::Proceed
}
});
window.add_controller(key_controller);
}
fn setup_gamepad_input(
_window: &gtk4::ApplicationWindow,
grid: &LauncherGrid,
command_client: Arc<CommandClient>,
runtime: Arc<Runtime>,
state: SharedState,
) {
let mut gilrs = match gilrs::Gilrs::new() {
Ok(gilrs) => gilrs,
Err(e) => {
warn!(error = %e, "Gamepad input unavailable");
return;
}
};
let grid_weak = grid.downgrade();
let cmd_client = command_client.clone();
let rt = runtime.clone();
let state_clone = state.clone();
let mut axis_state = GamepadAxisState::default();
glib::timeout_add_local(Duration::from_millis(16), move || {
while let Some(event) = gilrs.next_event() {
let Some(grid) = grid_weak.upgrade() else {
return glib::ControlFlow::Break;
};
match event.event {
gilrs::EventType::ButtonPressed(button, _) => match button {
gilrs::Button::DPadUp => grid.move_selection(0, -1),
gilrs::Button::DPadDown => grid.move_selection(0, 1),
gilrs::Button::DPadLeft => grid.move_selection(-1, 0),
gilrs::Button::DPadRight => grid.move_selection(1, 0),
gilrs::Button::South | gilrs::Button::East | gilrs::Button::Start => {
grid.launch_selected();
}
gilrs::Button::Mode => {
Self::request_stop_current(
cmd_client.clone(),
rt.clone(),
state_clone.clone(),
);
}
_ => {}
},
gilrs::EventType::AxisChanged(axis, value, _) => {
Self::handle_gamepad_axis(&grid, axis, value, &mut axis_state);
}
_ => {}
}
}
glib::ControlFlow::Continue
});
}
fn request_stop_current(
command_client: Arc<CommandClient>,
runtime: Arc<Runtime>,
state: SharedState,
) {
runtime.spawn(async move {
match command_client.stop_current().await {
Ok(response) => match response.result {
shepherd_api::ResponseResult::Ok(shepherd_api::ResponsePayload::Stopped) => {
info!("StopCurrent acknowledged");
}
shepherd_api::ResponseResult::Err(err) => {
debug!(error = %err.message, "StopCurrent request denied");
}
_ => {
debug!("Unexpected StopCurrent response payload");
}
},
Err(e) => {
error!(error = %e, "StopCurrent request failed");
state.set(LauncherState::Error {
message: format!("Failed to stop current activity: {}", e),
});
}
}
});
}
fn handle_gamepad_axis(
grid: &LauncherGrid,
axis: gilrs::Axis,
value: f32,
axis_state: &mut GamepadAxisState,
) {
const THRESHOLD: f32 = 0.65;
match axis {
gilrs::Axis::LeftStickX | gilrs::Axis::DPadX => {
if value <= -THRESHOLD {
if !axis_state.left {
grid.move_selection(-1, 0);
}
axis_state.left = true;
axis_state.right = false;
} else if value >= THRESHOLD {
if !axis_state.right {
grid.move_selection(1, 0);
}
axis_state.right = true;
axis_state.left = false;
} else {
axis_state.left = false;
axis_state.right = false;
}
}
gilrs::Axis::LeftStickY => {
if value <= -THRESHOLD {
if !axis_state.down {
grid.move_selection(0, 1);
}
axis_state.down = true;
axis_state.up = false;
} else if value >= THRESHOLD {
if !axis_state.up {
grid.move_selection(0, -1);
}
axis_state.up = true;
axis_state.down = false;
} else {
axis_state.up = false;
axis_state.down = false;
}
}
gilrs::Axis::DPadY => {
if value <= -THRESHOLD {
if !axis_state.up {
grid.move_selection(0, -1);
}
axis_state.up = true;
axis_state.down = false;
} else if value >= THRESHOLD {
if !axis_state.down {
grid.move_selection(0, 1);
}
axis_state.down = true;
axis_state.up = false;
} else {
axis_state.up = false;
axis_state.down = false;
}
}
_ => {}
}
}
fn create_loading_view() -> gtk4::Box {
let container = gtk4::Box::new(gtk4::Orientation::Vertical, 16);
container.set_halign(gtk4::Align::Center);
@ -458,3 +669,11 @@ impl LauncherApp {
(container, retry_button)
}
}
#[derive(Default)]
struct GamepadAxisState {
left: bool,
right: bool,
up: bool,
down: bool,
}

View file

@ -57,7 +57,7 @@ impl ServiceClient {
Err(e) => {
error!(error = %e, "Connection error");
self.state.set(LauncherState::Disconnected);
// Wait before reconnecting
sleep(Duration::from_secs(2)).await;
}
@ -69,7 +69,7 @@ impl ServiceClient {
self.state.set(LauncherState::Connecting);
info!(path = %self.socket_path.display(), "Connecting to shepherdd");
let mut client = IpcClient::connect(&self.socket_path)
.await
.context("Failed to connect to shepherdd")?;
@ -162,11 +162,17 @@ impl ServiceClient {
}
ResponsePayload::Entries(entries) => {
// Only update if we're in idle state
if matches!(self.state.get(), LauncherState::Idle { .. } | LauncherState::Connecting) {
if matches!(
self.state.get(),
LauncherState::Idle { .. } | LauncherState::Connecting
) {
self.state.set(LauncherState::Idle { entries });
}
}
ResponsePayload::LaunchApproved { session_id, deadline } => {
ResponsePayload::LaunchApproved {
session_id,
deadline,
} => {
let now = shepherd_util::now();
// For unlimited sessions (deadline=None), time_remaining is None
let time_remaining = deadline.and_then(|d| {
@ -195,9 +201,7 @@ impl ServiceClient {
Ok(())
}
ResponseResult::Err(e) => {
self.state.set(LauncherState::Error {
message: e.message,
});
self.state.set(LauncherState::Error { message: e.message });
Ok(())
}
}
@ -218,17 +222,23 @@ impl CommandClient {
pub async fn launch(&self, entry_id: &EntryId) -> Result<Response> {
let mut client = IpcClient::connect(&self.socket_path).await?;
client.send(Command::Launch {
entry_id: entry_id.clone(),
}).await.map_err(Into::into)
client
.send(Command::Launch {
entry_id: entry_id.clone(),
})
.await
.map_err(Into::into)
}
#[allow(dead_code)]
pub async fn stop_current(&self) -> Result<Response> {
let mut client = IpcClient::connect(&self.socket_path).await?;
client.send(Command::StopCurrent {
mode: shepherd_api::StopMode::Graceful,
}).await.map_err(Into::into)
client
.send(Command::StopCurrent {
mode: shepherd_api::StopMode::Graceful,
})
.await
.map_err(Into::into)
}
pub async fn get_state(&self) -> Result<Response> {
@ -239,7 +249,10 @@ impl CommandClient {
#[allow(dead_code)]
pub async fn list_entries(&self) -> Result<Response> {
let mut client = IpcClient::connect(&self.socket_path).await?;
client.send(Command::ListEntries { at_time: None }).await.map_err(Into::into)
client
.send(Command::ListEntries { at_time: None })
.await
.map_err(Into::into)
}
}

View file

@ -51,7 +51,8 @@ mod imp {
// Configure flow box
self.flow_box.set_homogeneous(true);
self.flow_box.set_selection_mode(gtk4::SelectionMode::None);
self.flow_box
.set_selection_mode(gtk4::SelectionMode::Single);
self.flow_box.set_max_children_per_line(6);
self.flow_box.set_min_children_per_line(2);
self.flow_box.set_row_spacing(24);
@ -60,6 +61,7 @@ mod imp {
self.flow_box.set_valign(gtk4::Align::Center);
self.flow_box.set_hexpand(true);
self.flow_box.set_vexpand(true);
self.flow_box.set_focusable(true);
self.flow_box.add_css_class("launcher-grid");
// Wrap in a scrolled window
@ -117,14 +119,17 @@ impl LauncherGrid {
let on_launch = imp.on_launch.clone();
tile.connect_clicked(move |tile| {
if let Some(entry_id) = tile.entry_id()
&& let Some(callback) = on_launch.borrow().as_ref() {
callback(entry_id);
}
&& let Some(callback) = on_launch.borrow().as_ref()
{
callback(entry_id);
}
});
imp.flow_box.insert(&tile, -1);
imp.tiles.borrow_mut().push(tile);
}
self.select_first();
}
/// Enable or disable all tiles
@ -133,6 +138,114 @@ impl LauncherGrid {
tile.set_sensitive(sensitive);
}
}
pub fn select_first(&self) {
let imp = self.imp();
if let Some(child) = imp.flow_box.child_at_index(0) {
imp.flow_box.select_child(&child);
child.grab_focus();
}
}
pub fn move_selection(&self, dx: i32, dy: i32) {
let imp = self.imp();
if imp.tiles.borrow().is_empty() {
return;
}
let current_child = imp
.flow_box
.selected_children()
.first()
.cloned()
.or_else(|| imp.flow_box.child_at_index(0));
let Some(current_child) = current_child else {
return;
};
let current_alloc = current_child.allocation();
let current_x = current_alloc.x();
let current_y = current_alloc.y();
let mut best: Option<(gtk4::FlowBoxChild, i32, i32)> = None;
let tile_count = imp.tiles.borrow().len() as i32;
for idx in 0..tile_count {
let Some(candidate) = imp.flow_box.child_at_index(idx) else {
continue;
};
if candidate == current_child {
continue;
}
let alloc = candidate.allocation();
let x = alloc.x();
let y = alloc.y();
let is_direction_match = match (dx, dy) {
(-1, 0) => y == current_y && x < current_x,
(1, 0) => y == current_y && x > current_x,
(0, -1) => y < current_y,
(0, 1) => y > current_y,
_ => false,
};
if !is_direction_match {
continue;
}
let primary_dist = match (dx, dy) {
(-1, 0) | (1, 0) => (x - current_x).abs(),
(0, -1) | (0, 1) => (y - current_y).abs(),
_ => i32::MAX,
};
let secondary_dist = match (dx, dy) {
(-1, 0) | (1, 0) => (y - current_y).abs(),
(0, -1) | (0, 1) => (x - current_x).abs(),
_ => i32::MAX,
};
let replace = match best {
None => true,
Some((_, best_primary, best_secondary)) => {
primary_dist < best_primary
|| (primary_dist == best_primary && secondary_dist < best_secondary)
}
};
if replace {
best = Some((candidate, primary_dist, secondary_dist));
}
}
if let Some((child, _, _)) = best {
imp.flow_box.select_child(&child);
child.grab_focus();
}
}
pub fn launch_selected(&self) {
let imp = self.imp();
let maybe_child = imp.flow_box.selected_children().first().cloned();
let Some(child) = maybe_child else {
return;
};
let index = child.index();
if index < 0 {
return;
}
let tile = imp.tiles.borrow().get(index as usize).cloned();
if let Some(tile) = tile {
if !tile.is_sensitive() {
return;
}
if let Some(entry_id) = tile.entry_id()
&& let Some(callback) = imp.on_launch.borrow().as_ref()
{
callback(entry_id);
}
}
}
}
impl Default for LauncherGrid {

View file

@ -9,8 +9,10 @@ mod grid;
mod state;
mod tile;
use crate::client::CommandClient;
use anyhow::Result;
use clap::Parser;
use shepherd_api::{ErrorCode, ResponsePayload, ResponseResult};
use shepherd_util::default_socket_path;
use std::path::PathBuf;
use tracing_subscriber::EnvFilter;
@ -27,6 +29,10 @@ struct Args {
/// Log level
#[arg(short, long, default_value = "info")]
log_level: String,
/// Send StopCurrent to shepherdd and exit (for compositor keybindings)
#[arg(long)]
stop_current: bool,
}
fn main() -> Result<()> {
@ -35,8 +41,7 @@ fn main() -> Result<()> {
// Initialize logging
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new(&args.log_level)),
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&args.log_level)),
)
.init();
@ -45,6 +50,33 @@ fn main() -> Result<()> {
// Determine socket path with fallback to default
let socket_path = args.socket.unwrap_or_else(default_socket_path);
if args.stop_current {
let runtime = tokio::runtime::Runtime::new()?;
runtime.block_on(async move {
let client = CommandClient::new(&socket_path);
match client.stop_current().await {
Ok(response) => match response.result {
ResponseResult::Ok(ResponsePayload::Stopped) => {
tracing::info!("StopCurrent succeeded");
Ok(())
}
ResponseResult::Err(err) if err.code == ErrorCode::NoActiveSession => {
tracing::debug!("No active session to stop");
Ok(())
}
ResponseResult::Err(err) => {
anyhow::bail!("StopCurrent failed: {}", err.message)
}
ResponseResult::Ok(payload) => {
anyhow::bail!("Unexpected StopCurrent response: {:?}", payload)
}
},
Err(e) => anyhow::bail!("Failed to send StopCurrent: {}", e),
}
})?;
return Ok(());
}
// Run GTK application
let application = app::LauncherApp::new(socket_path);
let exit_code = application.run();

View file

@ -1,6 +1,6 @@
//! Launcher application state management
use shepherd_api::{ServiceStateSnapshot, EntryView, Event, EventPayload};
use shepherd_api::{EntryView, Event, EventPayload, ServiceStateSnapshot};
use shepherd_util::SessionId;
use std::time::Duration;
use tokio::sync::watch;
@ -18,7 +18,7 @@ pub enum LauncherState {
/// Launch requested, waiting for response
Launching {
#[allow(dead_code)]
entry_id: String
entry_id: String,
},
/// Session is running
SessionActive {
@ -62,7 +62,10 @@ impl SharedState {
tracing::info!(event = ?event.payload, "Received event from shepherdd");
match event.payload {
EventPayload::StateChanged(snapshot) => {
tracing::info!(has_session = snapshot.current_session.is_some(), "Applying state snapshot");
tracing::info!(
has_session = snapshot.current_session.is_some(),
"Applying state snapshot"
);
self.apply_snapshot(snapshot);
}
EventPayload::SessionStarted {
@ -87,7 +90,12 @@ impl SharedState {
time_remaining,
});
}
EventPayload::SessionEnded { session_id, entry_id, reason, .. } => {
EventPayload::SessionEnded {
session_id,
entry_id,
reason,
..
} => {
tracing::info!(session_id = %session_id, entry_id = %entry_id, reason = ?reason, "Session ended event - setting Connecting");
// Will be followed by StateChanged, but set to connecting
// to ensure grid reloads

View file

@ -89,7 +89,7 @@ impl LauncherTile {
// Set icon, first trying to load as an image file, then as an icon name
if let Some(ref icon_ref) = entry.icon_ref {
let mut loaded = false;
// First, try to load as an image file (JPG, PNG, etc.)
// Expand ~ to home directory if present
let expanded_path = if icon_ref.starts_with("~/") {
@ -101,14 +101,14 @@ impl LauncherTile {
} else {
icon_ref.clone()
};
let path = std::path::Path::new(&expanded_path);
if path.exists() && path.is_file() {
// Try to load as an image file
imp.icon.set_from_file(Some(path));
loaded = true;
}
// If not loaded as a file, try as an icon name from the theme
if !loaded {
let icon_theme = gtk4::IconTheme::for_display(&self.display());
@ -143,7 +143,11 @@ impl LauncherTile {
}
pub fn entry_id(&self) -> Option<shepherd_util::EntryId> {
self.imp().entry.borrow().as_ref().map(|e| e.entry_id.clone())
self.imp()
.entry
.borrow()
.as_ref()
.map(|e| e.entry_id.clone())
}
}

View file

@ -1,7 +1,7 @@
//! SQLite-based store implementation
use chrono::{DateTime, Local, NaiveDate};
use rusqlite::{params, Connection, OptionalExtension};
use rusqlite::{Connection, OptionalExtension, params};
use shepherd_util::EntryId;
use std::path::Path;
use std::sync::Mutex;
@ -98,9 +98,8 @@ impl Store for SqliteStore {
fn get_recent_audits(&self, limit: usize) -> StoreResult<Vec<AuditEvent>> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare(
"SELECT id, timestamp, event_json FROM audit_log ORDER BY id DESC LIMIT ?",
)?;
let mut stmt = conn
.prepare("SELECT id, timestamp, event_json FROM audit_log ORDER BY id DESC LIMIT ?")?;
let rows = stmt.query_map([limit], |row| {
let id: i64 = row.get(0)?;
@ -181,11 +180,7 @@ impl Store for SqliteStore {
Ok(result)
}
fn set_cooldown_until(
&self,
entry_id: &EntryId,
until: DateTime<Local>,
) -> StoreResult<()> {
fn set_cooldown_until(&self, entry_id: &EntryId, until: DateTime<Local>) -> StoreResult<()> {
let conn = self.conn.lock().unwrap();
conn.execute(
@ -204,7 +199,10 @@ impl Store for SqliteStore {
fn clear_cooldown(&self, entry_id: &EntryId) -> StoreResult<()> {
let conn = self.conn.lock().unwrap();
conn.execute("DELETE FROM cooldowns WHERE entry_id = ?", [entry_id.as_str()])?;
conn.execute(
"DELETE FROM cooldowns WHERE entry_id = ?",
[entry_id.as_str()],
)?;
Ok(())
}
@ -212,9 +210,11 @@ impl Store for SqliteStore {
let conn = self.conn.lock().unwrap();
let json: Option<String> = conn
.query_row("SELECT snapshot_json FROM snapshot WHERE id = 1", [], |row| {
row.get(0)
})
.query_row(
"SELECT snapshot_json FROM snapshot WHERE id = 1",
[],
|row| row.get(0),
)
.optional()?;
match json {
@ -246,9 +246,7 @@ impl Store for SqliteStore {
fn is_healthy(&self) -> bool {
match self.conn.lock() {
Ok(conn) => {
conn.query_row("SELECT 1", [], |_| Ok(())).is_ok()
}
Ok(conn) => conn.query_row("SELECT 1", [], |_| Ok(())).is_ok(),
Err(_) => {
warn!("Store lock poisoned");
false

View file

@ -30,11 +30,7 @@ pub trait Store: Send + Sync {
fn get_cooldown_until(&self, entry_id: &EntryId) -> StoreResult<Option<DateTime<Local>>>;
/// Set cooldown expiry time for an entry
fn set_cooldown_until(
&self,
entry_id: &EntryId,
until: DateTime<Local>,
) -> StoreResult<()>;
fn set_cooldown_until(&self, entry_id: &EntryId, until: DateTime<Local>) -> StoreResult<()>;
/// Clear cooldown for an entry
fn clear_cooldown(&self, entry_id: &EntryId) -> StoreResult<()>;

View file

@ -40,7 +40,9 @@ pub fn default_socket_path() -> PathBuf {
pub fn socket_path_without_env() -> PathBuf {
// Try XDG_RUNTIME_DIR first (typically /run/user/<uid>)
if let Ok(runtime_dir) = std::env::var("XDG_RUNTIME_DIR") {
return PathBuf::from(runtime_dir).join(APP_DIR).join(SOCKET_FILENAME);
return PathBuf::from(runtime_dir)
.join(APP_DIR)
.join(SOCKET_FILENAME);
}
// Fallback to /tmp with username
@ -109,10 +111,13 @@ pub fn default_log_dir() -> PathBuf {
/// Get the parent directory of the socket (for creating it)
pub fn socket_dir() -> PathBuf {
let socket_path = socket_path_without_env();
socket_path.parent().map(|p| p.to_path_buf()).unwrap_or_else(|| {
// Should never happen with our paths, but just in case
PathBuf::from("/tmp").join(APP_DIR)
})
socket_path
.parent()
.map(|p| p.to_path_buf())
.unwrap_or_else(|| {
// Should never happen with our paths, but just in case
PathBuf::from("/tmp").join(APP_DIR)
})
}
/// Configuration subdirectory name (uses "shepherd" not "shepherdd")

View file

@ -42,10 +42,13 @@ impl RateLimiter {
pub fn check(&mut self, client_id: &ClientId) -> bool {
let now = Instant::now();
let bucket = self.clients.entry(client_id.clone()).or_insert(ClientBucket {
tokens: self.max_tokens,
last_refill: now,
});
let bucket = self
.clients
.entry(client_id.clone())
.or_insert(ClientBucket {
tokens: self.max_tokens,
last_refill: now,
});
// Refill tokens if interval has passed
let elapsed = now.duration_since(bucket.last_refill);
@ -72,9 +75,8 @@ impl RateLimiter {
/// Clean up stale client entries
pub fn cleanup(&mut self, stale_after: Duration) {
let now = Instant::now();
self.clients.retain(|_, bucket| {
now.duration_since(bucket.last_refill) < stale_after
});
self.clients
.retain(|_, bucket| now.duration_since(bucket.last_refill) < stale_after);
}
}

View file

@ -37,7 +37,9 @@ fn get_mock_time_offset() -> Option<chrono::Duration> {
{
if let Ok(mock_time_str) = std::env::var(MOCK_TIME_ENV_VAR) {
// Parse the mock time string
if let Ok(naive_dt) = NaiveDateTime::parse_from_str(&mock_time_str, "%Y-%m-%d %H:%M:%S") {
if let Ok(naive_dt) =
NaiveDateTime::parse_from_str(&mock_time_str, "%Y-%m-%d %H:%M:%S")
{
if let Some(mock_dt) = Local.from_local_datetime(&naive_dt).single() {
let real_now = chrono::Local::now();
let offset = mock_dt.signed_duration_since(real_now);
@ -83,7 +85,7 @@ pub fn is_mock_time_active() -> bool {
#[allow(clippy::disallowed_methods)] // This is the wrapper that provides mock time support
pub fn now() -> DateTime<Local> {
let real_now = chrono::Local::now();
if let Some(offset) = get_mock_time_offset() {
real_now + offset
} else {
@ -201,9 +203,8 @@ impl DaysOfWeek {
pub const SATURDAY: u8 = 1 << 5;
pub const SUNDAY: u8 = 1 << 6;
pub const WEEKDAYS: DaysOfWeek = DaysOfWeek(
Self::MONDAY | Self::TUESDAY | Self::WEDNESDAY | Self::THURSDAY | Self::FRIDAY,
);
pub const WEEKDAYS: DaysOfWeek =
DaysOfWeek(Self::MONDAY | Self::TUESDAY | Self::WEDNESDAY | Self::THURSDAY | Self::FRIDAY);
pub const WEEKENDS: DaysOfWeek = DaysOfWeek(Self::SATURDAY | Self::SUNDAY);
pub const ALL_DAYS: DaysOfWeek = DaysOfWeek(0x7F);
pub const NONE: DaysOfWeek = DaysOfWeek(0);
@ -446,14 +447,14 @@ mod tests {
fn test_parse_mock_time_invalid_formats() {
// Test that invalid formats are rejected
let invalid_formats = [
"2025-12-25", // Missing time
"14:30:00", // Missing date
"2025/12/25 14:30:00", // Wrong date separator
"2025-12-25T14:30:00", // ISO format (not supported)
"Dec 25, 2025 14:30", // Wrong format
"25-12-2025 14:30:00", // Wrong date order
"", // Empty string
"not a date", // Invalid string
"2025-12-25", // Missing time
"14:30:00", // Missing date
"2025/12/25 14:30:00", // Wrong date separator
"2025-12-25T14:30:00", // ISO format (not supported)
"Dec 25, 2025 14:30", // Wrong format
"25-12-2025 14:30:00", // Wrong date order
"", // Empty string
"not a date", // Invalid string
];
for format_str in &invalid_formats {
@ -474,12 +475,12 @@ mod tests {
let naive_dt = NaiveDateTime::parse_from_str(mock_time_str, "%Y-%m-%d %H:%M:%S").unwrap();
let mock_dt = Local.from_local_datetime(&naive_dt).single().unwrap();
let real_now = chrono::Local::now();
let offset = mock_dt.signed_duration_since(real_now);
// The offset should be applied correctly
let simulated_now = real_now + offset;
// The simulated time should be very close to the mock time
// (within a second, accounting for test execution time)
let diff = (simulated_now - mock_dt).num_seconds().abs();
@ -495,25 +496,25 @@ mod tests {
fn test_mock_time_advances_with_real_time() {
// Test that mock time advances at the same rate as real time
// This tests the concept, not the actual implementation (since OnceLock is static)
let mock_time_str = "2025-12-25 14:30:00";
let naive_dt = NaiveDateTime::parse_from_str(mock_time_str, "%Y-%m-%d %H:%M:%S").unwrap();
let mock_dt = Local.from_local_datetime(&naive_dt).single().unwrap();
let real_t1 = chrono::Local::now();
let offset = mock_dt.signed_duration_since(real_t1);
// Simulate time passing
std::thread::sleep(Duration::from_millis(100));
let real_t2 = chrono::Local::now();
let simulated_t1 = real_t1 + offset;
let simulated_t2 = real_t2 + offset;
// The simulated times should have advanced by the same amount as real times
let real_elapsed = real_t2.signed_duration_since(real_t1);
let simulated_elapsed = simulated_t2.signed_duration_since(simulated_t1);
assert_eq!(
real_elapsed.num_milliseconds(),
simulated_elapsed.num_milliseconds(),
@ -525,24 +526,33 @@ mod tests {
fn test_availability_with_specific_time() {
// Test that availability windows work correctly with a specific time
// This validates that the mock time would affect availability checks
let window = TimeWindow::new(
DaysOfWeek::ALL_DAYS,
WallClock::new(14, 0).unwrap(), // 2 PM
WallClock::new(18, 0).unwrap(), // 6 PM
WallClock::new(14, 0).unwrap(), // 2 PM
WallClock::new(18, 0).unwrap(), // 6 PM
);
// Time within window
let in_window = Local.with_ymd_and_hms(2025, 12, 25, 15, 0, 0).unwrap();
assert!(window.contains(&in_window), "15:00 should be within 14:00-18:00 window");
assert!(
window.contains(&in_window),
"15:00 should be within 14:00-18:00 window"
);
// Time before window
let before_window = Local.with_ymd_and_hms(2025, 12, 25, 10, 0, 0).unwrap();
assert!(!window.contains(&before_window), "10:00 should be before 14:00-18:00 window");
assert!(
!window.contains(&before_window),
"10:00 should be before 14:00-18:00 window"
);
// Time after window
let after_window = Local.with_ymd_and_hms(2025, 12, 25, 20, 0, 0).unwrap();
assert!(!window.contains(&after_window), "20:00 should be after 14:00-18:00 window");
assert!(
!window.contains(&after_window),
"20:00 should be after 14:00-18:00 window"
);
}
#[test]
@ -553,18 +563,27 @@ mod tests {
WallClock::new(14, 0).unwrap(),
WallClock::new(18, 0).unwrap(),
);
// Thursday at 3 PM - should be available (weekday, in time window)
let thursday = Local.with_ymd_and_hms(2025, 12, 25, 15, 0, 0).unwrap(); // Christmas 2025 is Thursday
assert!(window.contains(&thursday), "Thursday 15:00 should be in weekday afternoon window");
assert!(
window.contains(&thursday),
"Thursday 15:00 should be in weekday afternoon window"
);
// Saturday at 3 PM - should NOT be available (weekend)
let saturday = Local.with_ymd_and_hms(2025, 12, 27, 15, 0, 0).unwrap();
assert!(!window.contains(&saturday), "Saturday should not be in weekday window");
assert!(
!window.contains(&saturday),
"Saturday should not be in weekday window"
);
// Sunday at 3 PM - should NOT be available (weekend)
let sunday = Local.with_ymd_and_hms(2025, 12, 28, 15, 0, 0).unwrap();
assert!(!window.contains(&sunday), "Sunday should not be in weekday window");
assert!(
!window.contains(&sunday),
"Sunday should not be in weekday window"
);
}
}
@ -577,7 +596,7 @@ mod mock_time_integration_tests {
/// This test documents the expected behavior of the mock time feature.
/// Due to the static OnceLock, actual integration testing requires
/// running with the environment variable set externally.
///
///
/// To manually test:
/// ```bash
/// SHEPHERD_MOCK_TIME="2025-12-25 14:30:00" cargo test
@ -586,7 +605,7 @@ mod mock_time_integration_tests {
fn test_mock_time_documentation() {
// This test verifies the mock time constants and expected behavior
assert_eq!(MOCK_TIME_ENV_VAR, "SHEPHERD_MOCK_TIME");
// The expected format is documented
let expected_format = "%Y-%m-%d %H:%M:%S";
let example = "2025-12-25 14:30:00";
@ -608,10 +627,10 @@ mod mock_time_integration_tests {
let t1 = now();
std::thread::sleep(Duration::from_millis(50));
let t2 = now();
// t2 should be after t1
assert!(t2 > t1, "Time should advance forward");
// The difference should be approximately 50ms (with some tolerance)
let diff = t2.signed_duration_since(t1);
assert!(

View file

@ -26,9 +26,10 @@ impl InternetMonitor {
for entry in &policy.entries {
if entry.internet.required
&& let Some(check) = entry.internet.check.clone()
&& !targets.contains(&check) {
targets.push(check);
}
&& !targets.contains(&check)
{
targets.push(check);
}
}
if targets.is_empty() {

View file

@ -12,20 +12,20 @@
use anyhow::{Context, Result};
use clap::Parser;
use shepherd_api::{
Command, ErrorCode, ErrorInfo, Event, EventPayload, HealthStatus,
Response, ResponsePayload, SessionEndReason, StopMode, VolumeInfo, VolumeRestrictions,
Command, ErrorCode, ErrorInfo, Event, EventPayload, HealthStatus, Response, ResponsePayload,
SessionEndReason, StopMode, VolumeInfo, VolumeRestrictions,
};
use shepherd_config::{load_config, VolumePolicy};
use shepherd_config::{VolumePolicy, load_config};
use shepherd_core::{CoreEngine, CoreEvent, LaunchDecision, StopDecision};
use shepherd_host_api::{HostAdapter, HostEvent, StopMode as HostStopMode, VolumeController};
use shepherd_host_linux::{LinuxHost, LinuxVolumeController};
use shepherd_ipc::{IpcServer, ServerMessage};
use shepherd_store::{AuditEvent, AuditEventType, SqliteStore, Store};
use shepherd_util::{default_config_path, ClientId, MonotonicInstant, RateLimiter};
use shepherd_util::{ClientId, MonotonicInstant, RateLimiter, default_config_path};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::signal::unix::{signal, SignalKind};
use tokio::signal::unix::{SignalKind, signal};
use tokio::sync::Mutex;
use tracing::{debug, error, info, warn};
use tracing_subscriber::EnvFilter;
@ -180,12 +180,11 @@ impl Service {
});
// Set up signal handlers
let mut sigterm = signal(SignalKind::terminate())
.context("Failed to create SIGTERM handler")?;
let mut sigint = signal(SignalKind::interrupt())
.context("Failed to create SIGINT handler")?;
let mut sighup = signal(SignalKind::hangup())
.context("Failed to create SIGHUP handler")?;
let mut sigterm =
signal(SignalKind::terminate()).context("Failed to create SIGTERM handler")?;
let mut sigint =
signal(SignalKind::interrupt()).context("Failed to create SIGINT handler")?;
let mut sighup = signal(SignalKind::hangup()).context("Failed to create SIGHUP handler")?;
// Main event loop
let tick_interval = Duration::from_millis(100);
@ -246,9 +245,16 @@ impl Service {
let engine = engine.lock().await;
if let Some(session) = engine.current_session() {
info!(session_id = %session.plan.session_id, "Stopping active session");
if let Some(handle) = &session.host_handle && let Err(e) = host.stop(handle, HostStopMode::Graceful {
timeout: Duration::from_secs(5),
}).await {
if let Some(handle) = &session.host_handle
&& let Err(e) = host
.stop(
handle,
HostStopMode::Graceful {
timeout: Duration::from_secs(5),
},
)
.await
{
warn!(error = %e, "Failed to stop session gracefully");
}
}
@ -301,9 +307,7 @@ impl Service {
// Get the host handle and stop it
let handle = {
let engine = engine.lock().await;
engine
.current_session()
.and_then(|s| s.host_handle.clone())
engine.current_session().and_then(|s| s.host_handle.clone())
};
if let Some(handle) = handle
@ -315,10 +319,10 @@ impl Service {
},
)
.await
{
warn!(error = %e, "Failed to stop session gracefully, forcing");
let _ = host.stop(&handle, HostStopMode::Force).await;
}
{
warn!(error = %e, "Failed to stop session gracefully, forcing");
let _ = host.stop(&handle, HostStopMode::Force).await;
}
ipc.broadcast_event(Event::new(EventPayload::SessionExpiring {
session_id: session_id.clone(),
@ -405,7 +409,10 @@ impl Service {
engine.notify_session_exited(status.code, now_mono, now)
};
info!(has_event = core_event.is_some(), "notify_session_exited result");
info!(
has_event = core_event.is_some(),
"notify_session_exited result"
);
if let Some(CoreEvent::SessionEnded {
session_id,
@ -413,29 +420,29 @@ impl Service {
reason,
duration,
}) = core_event
{
info!(
session_id = %session_id,
entry_id = %entry_id,
reason = ?reason,
duration_secs = duration.as_secs(),
"Broadcasting SessionEnded"
);
ipc.broadcast_event(Event::new(EventPayload::SessionEnded {
session_id,
entry_id,
reason,
duration,
}));
{
info!(
session_id = %session_id,
entry_id = %entry_id,
reason = ?reason,
duration_secs = duration.as_secs(),
"Broadcasting SessionEnded"
);
ipc.broadcast_event(Event::new(EventPayload::SessionEnded {
session_id,
entry_id,
reason,
duration,
}));
// Broadcast state change
let state = {
let engine = engine.lock().await;
engine.get_state()
};
info!("Broadcasting StateChanged");
ipc.broadcast_event(Event::new(EventPayload::StateChanged(state)));
}
// Broadcast state change
let state = {
let engine = engine.lock().await;
engine.get_state()
};
info!("Broadcasting StateChanged");
ipc.broadcast_event(Event::new(EventPayload::StateChanged(state)));
}
}
HostEvent::WindowReady { handle } => {
@ -472,9 +479,17 @@ impl Service {
}
}
let response =
Self::handle_command(engine, host, volume, ipc, store, &client_id, request.request_id, request.command)
.await;
let response = Self::handle_command(
engine,
host,
volume,
ipc,
store,
&client_id,
request.request_id,
request.command,
)
.await;
let _ = ipc.send_response(&client_id, response).await;
}
@ -487,23 +502,19 @@ impl Service {
"Client connected"
);
let _ = store.append_audit(AuditEvent::new(
AuditEventType::ClientConnected {
client_id: client_id.to_string(),
role: format!("{:?}", info.role),
uid: info.uid,
},
));
let _ = store.append_audit(AuditEvent::new(AuditEventType::ClientConnected {
client_id: client_id.to_string(),
role: format!("{:?}", info.role),
uid: info.uid,
}));
}
ServerMessage::ClientDisconnected { client_id } => {
debug!(client_id = %client_id, "Client disconnected");
let _ = store.append_audit(AuditEvent::new(
AuditEventType::ClientDisconnected {
client_id: client_id.to_string(),
},
));
let _ = store.append_audit(AuditEvent::new(AuditEventType::ClientDisconnected {
client_id: client_id.to_string(),
}));
// Clean up rate limiter
let mut limiter = rate_limiter.lock().await;
@ -547,10 +558,7 @@ impl Service {
let event = eng.start_session(plan.clone(), now, now_mono);
// Get the entry kind for spawning
let entry_kind = eng
.policy()
.get_entry(&entry_id)
.map(|e| e.kind.clone());
let entry_kind = eng.policy().get_entry(&entry_id).map(|e| e.kind.clone());
// Build spawn options with log path if capture_child_output is enabled
let spawn_options = if eng.policy().service.capture_child_output {
@ -577,11 +585,7 @@ impl Service {
if let Some(kind) = entry_kind {
match host
.spawn(
plan.session_id.clone(),
&kind,
spawn_options,
)
.spawn(plan.session_id.clone(), &kind, spawn_options)
.await
{
Ok(handle) => {
@ -597,12 +601,14 @@ impl Service {
deadline,
} = event
{
ipc.broadcast_event(Event::new(EventPayload::SessionStarted {
session_id: session_id.clone(),
entry_id,
label,
deadline,
}));
ipc.broadcast_event(Event::new(
EventPayload::SessionStarted {
session_id: session_id.clone(),
entry_id,
label,
deadline,
},
));
Response::success(
request_id,
@ -614,7 +620,10 @@ impl Service {
} else {
Response::error(
request_id,
ErrorInfo::new(ErrorCode::InternalError, "Unexpected event"),
ErrorInfo::new(
ErrorCode::InternalError,
"Unexpected event",
),
)
}
}
@ -627,18 +636,22 @@ impl Service {
reason,
duration,
}) = eng.notify_session_exited(Some(-1), now_mono, now)
{
ipc.broadcast_event(Event::new(EventPayload::SessionEnded {
{
ipc.broadcast_event(Event::new(
EventPayload::SessionEnded {
session_id,
entry_id,
reason,
duration,
}));
},
));
// Broadcast state change so clients return to idle
let state = eng.get_state();
ipc.broadcast_event(Event::new(EventPayload::StateChanged(state)));
}
// Broadcast state change so clients return to idle
let state = eng.get_state();
ipc.broadcast_event(Event::new(
EventPayload::StateChanged(state),
));
}
Response::error(
request_id,
@ -666,9 +679,7 @@ impl Service {
let mut eng = engine.lock().await;
// Get handle before stopping in engine
let handle = eng
.current_session()
.and_then(|s| s.host_handle.clone());
let handle = eng.current_session().and_then(|s| s.host_handle.clone());
let reason = match mode {
StopMode::Graceful => SessionEndReason::UserStop,
@ -719,12 +730,13 @@ impl Service {
Command::ReloadConfig => {
// Check permission
if let Some(info) = ipc.get_client_info(client_id).await
&& !info.role.can_reload_config() {
return Response::error(
request_id,
ErrorInfo::new(ErrorCode::PermissionDenied, "Admin role required"),
);
}
&& !info.role.can_reload_config()
{
return Response::error(
request_id,
ErrorInfo::new(ErrorCode::PermissionDenied, "Admin role required"),
);
}
// TODO: Reload from original config path
Response::error(
@ -733,14 +745,12 @@ impl Service {
)
}
Command::SubscribeEvents => {
Response::success(
request_id,
ResponsePayload::Subscribed {
client_id: client_id.clone(),
},
)
}
Command::SubscribeEvents => Response::success(
request_id,
ResponsePayload::Subscribed {
client_id: client_id.clone(),
},
),
Command::UnsubscribeEvents => {
Response::success(request_id, ResponsePayload::Unsubscribed)
@ -761,21 +771,28 @@ impl Service {
Command::ExtendCurrent { by } => {
// Check permission
if let Some(info) = ipc.get_client_info(client_id).await
&& !info.role.can_extend() {
return Response::error(
request_id,
ErrorInfo::new(ErrorCode::PermissionDenied, "Admin role required"),
);
}
&& !info.role.can_extend()
{
return Response::error(
request_id,
ErrorInfo::new(ErrorCode::PermissionDenied, "Admin role required"),
);
}
let mut eng = engine.lock().await;
match eng.extend_current(by, now_mono, now) {
Some(new_deadline) => {
Response::success(request_id, ResponsePayload::Extended { new_deadline: Some(new_deadline) })
}
Some(new_deadline) => Response::success(
request_id,
ResponsePayload::Extended {
new_deadline: Some(new_deadline),
},
),
None => Response::error(
request_id,
ErrorInfo::new(ErrorCode::NoActiveSession, "No active session or session is unlimited"),
ErrorInfo::new(
ErrorCode::NoActiveSession,
"No active session or session is unlimited",
),
),
}
}
@ -913,14 +930,15 @@ impl Service {
engine: &Arc<Mutex<CoreEngine>>,
) -> VolumeRestrictions {
let eng = engine.lock().await;
// Check if there's an active session with volume restrictions
if let Some(session) = eng.current_session()
&& let Some(entry) = eng.policy().get_entry(&session.plan.entry_id)
&& let Some(ref vol_policy) = entry.volume {
return Self::convert_volume_policy(vol_policy);
}
&& let Some(ref vol_policy) = entry.volume
{
return Self::convert_volume_policy(vol_policy);
}
// Fall back to global policy
Self::convert_volume_policy(&eng.policy().volume)
}
@ -940,18 +958,15 @@ async fn main() -> Result<()> {
let args = Args::parse();
// Initialize logging
let filter = EnvFilter::try_from_default_env()
.unwrap_or_else(|_| EnvFilter::new(&args.log_level));
let filter =
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(&args.log_level));
tracing_subscriber::fmt()
.with_env_filter(filter)
.with_target(true)
.init();
info!(
version = env!("CARGO_PKG_VERSION"),
"shepherdd starting"
);
info!(version = env!("CARGO_PKG_VERSION"), "shepherdd starting");
// Create and run the service
let service = Service::new(&args).await?;

View file

@ -15,44 +15,42 @@ use std::time::Duration;
fn make_test_policy() -> Policy {
Policy {
service: Default::default(),
entries: vec![
Entry {
id: EntryId::new("test-game"),
label: "Test Game".into(),
icon_ref: None,
kind: EntryKind::Process {
command: "sleep".into(),
args: vec!["999".into()],
env: HashMap::new(),
cwd: None,
},
availability: AvailabilityPolicy {
windows: vec![],
always: true,
},
limits: LimitsPolicy {
max_run: Some(Duration::from_secs(10)), // Short for testing
daily_quota: None,
cooldown: None,
},
warnings: vec![
WarningThreshold {
seconds_before: 5,
severity: WarningSeverity::Warn,
message_template: Some("5 seconds left".into()),
},
WarningThreshold {
seconds_before: 2,
severity: WarningSeverity::Critical,
message_template: Some("2 seconds left!".into()),
},
],
volume: None,
disabled: false,
disabled_reason: None,
internet: Default::default(),
entries: vec![Entry {
id: EntryId::new("test-game"),
label: "Test Game".into(),
icon_ref: None,
kind: EntryKind::Process {
command: "sleep".into(),
args: vec!["999".into()],
env: HashMap::new(),
cwd: None,
},
],
availability: AvailabilityPolicy {
windows: vec![],
always: true,
},
limits: LimitsPolicy {
max_run: Some(Duration::from_secs(10)), // Short for testing
daily_quota: None,
cooldown: None,
},
warnings: vec![
WarningThreshold {
seconds_before: 5,
severity: WarningSeverity::Warn,
message_template: Some("5 seconds left".into()),
},
WarningThreshold {
seconds_before: 2,
severity: WarningSeverity::Critical,
message_template: Some("2 seconds left!".into()),
},
],
volume: None,
disabled: false,
disabled_reason: None,
internet: Default::default(),
}],
default_warnings: vec![],
default_max_run: Some(Duration::from_secs(3600)),
volume: Default::default(),
@ -91,7 +89,9 @@ fn test_launch_approval() {
let entry_id = EntryId::new("test-game");
let decision = engine.request_launch(&entry_id, shepherd_util::now());
assert!(matches!(decision, LaunchDecision::Approved(plan) if plan.max_duration == Some(Duration::from_secs(10))));
assert!(
matches!(decision, LaunchDecision::Approved(plan) if plan.max_duration == Some(Duration::from_secs(10)))
);
}
#[test]
@ -150,14 +150,26 @@ fn test_warning_emission() {
let at_6s = now + chrono::Duration::seconds(6);
let events = engine.tick(at_6s_mono, at_6s);
assert_eq!(events.len(), 1);
assert!(matches!(&events[0], CoreEvent::Warning { threshold_seconds: 5, .. }));
assert!(matches!(
&events[0],
CoreEvent::Warning {
threshold_seconds: 5,
..
}
));
// At 9 seconds (1 second remaining), 2-second warning should fire
let at_9s_mono = now_mono + Duration::from_secs(9);
let at_9s = now + chrono::Duration::seconds(9);
let events = engine.tick(at_9s_mono, at_9s);
assert_eq!(events.len(), 1);
assert!(matches!(&events[0], CoreEvent::Warning { threshold_seconds: 2, .. }));
assert!(matches!(
&events[0],
CoreEvent::Warning {
threshold_seconds: 2,
..
}
));
// Warnings shouldn't repeat
let events = engine.tick(at_9s_mono, at_9s);
@ -188,7 +200,9 @@ fn test_session_expiry() {
let events = engine.tick(at_11s_mono, at_11s);
// Should have both remaining warnings + expiry
let has_expiry = events.iter().any(|e| matches!(e, CoreEvent::ExpireDue { .. }));
let has_expiry = events
.iter()
.any(|e| matches!(e, CoreEvent::ExpireDue { .. }));
assert!(has_expiry, "Expected ExpireDue event");
}
@ -291,9 +305,18 @@ fn test_config_parsing() {
let policy = parse_config(config).unwrap();
assert_eq!(policy.entries.len(), 1);
assert_eq!(policy.entries[0].id.as_str(), "scummvm");
assert_eq!(policy.entries[0].limits.max_run, Some(Duration::from_secs(3600)));
assert_eq!(policy.entries[0].limits.daily_quota, Some(Duration::from_secs(7200)));
assert_eq!(policy.entries[0].limits.cooldown, Some(Duration::from_secs(300)));
assert_eq!(
policy.entries[0].limits.max_run,
Some(Duration::from_secs(3600))
);
assert_eq!(
policy.entries[0].limits.daily_quota,
Some(Duration::from_secs(7200))
);
assert_eq!(
policy.entries[0].limits.cooldown,
Some(Duration::from_secs(300))
);
assert_eq!(policy.entries[0].warnings.len(), 1);
}
@ -316,7 +339,11 @@ fn test_session_extension() {
engine.start_session(plan, now, now_mono);
// Get original deadline (should be Some for this test)
let original_deadline = engine.current_session().unwrap().deadline.expect("Expected deadline");
let original_deadline = engine
.current_session()
.unwrap()
.deadline
.expect("Expected deadline");
// Extend by 5 minutes
let new_deadline = engine.extend_current(Duration::from_secs(300), now_mono, now);

View file

@ -0,0 +1,22 @@
# Controller And Keyboard Launching
Issue: <https://github.com/aarmea/shepherd-launcher/issues/20>
Prompt summary:
- Launching activities required pointer input.
- Requested non-pointer controls:
- Selection via arrow keys, WASD, D-pad, or analog stick
- Launch via Enter, Space, controller A/B/Start
- Exit via Alt+F4, Ctrl+W, controller home
- Goal was better accessibility and support for pointer-less handheld systems.
Implemented summary:
- Added keyboard navigation and activation support in launcher UI grid.
- Added explicit keyboard exit shortcuts at the window level.
- Added gamepad input handling via `gilrs` for D-pad, analog stick, A/B/Start launch, and home exit.
- Added focused tile styling so non-pointer selection is visible.
Key files:
- crates/shepherd-launcher-ui/src/app.rs
- crates/shepherd-launcher-ui/src/grid.rs
- crates/shepherd-launcher-ui/Cargo.toml

View file

@ -0,0 +1,15 @@
# Sway StopCurrent Keybinds
Prompt summary:
- Move keyboard exit handling out of launcher UI code and into `sway.conf`.
- Keep controller home behavior as-is.
- Ensure "exit" uses the API (`StopCurrent`) rather than closing windows directly.
Implemented summary:
- Added a `--stop-current` mode to `shepherd-launcher` that sends `StopCurrent` to shepherdd over IPC and exits.
- Added Sway keybindings for `Alt+F4`, `Ctrl+W`, and `Home` that execute `shepherd-launcher --stop-current`.
- Kept controller home behavior in launcher UI unchanged.
Key files:
- `sway.conf`
- `crates/shepherd-launcher-ui/src/main.rs`

View file

@ -17,6 +17,7 @@ libgdk-pixbuf-xlib-2.0-dev
# Wayland development libraries
libwayland-dev
libxkbcommon-dev
libudev-dev
# X11 (for XWayland support)
libx11-dev

View file

@ -13,3 +13,4 @@ xdg-desktop-portal-wlr
libgtk-4-1
libadwaita-1-0
libgtk4-layer-shell0
libudev1

View file

@ -113,7 +113,7 @@ install_desktop_entry() {
[Desktop Entry]
Name=Shepherd Kiosk
Comment=Shepherd game launcher kiosk mode
Exec=sway -c $SWAY_CONFIG_DIR/$SHEPHERD_SWAY_CONFIG
Exec=sway -c $SWAY_CONFIG_DIR/$SHEPHERD_SWAY_CONFIG --unsupported-gpu
Type=Application
DesktopNames=shepherd
EOF

View file

@ -100,7 +100,7 @@ sway_start_nested() {
trap sway_cleanup EXIT
# Start sway with wayland backend (nested in current session)
WLR_BACKENDS=wayland WLR_LIBINPUT_NO_DEVICES=1 sway -c "$sway_config" &
WLR_BACKENDS=wayland WLR_LIBINPUT_NO_DEVICES=1 sway -c "$sway_config" --unsupported-gpu &
SWAY_PID=$!
info "Sway started with PID $SWAY_PID"

View file

@ -11,6 +11,7 @@ exec_always dbus-update-activation-environment --systemd \
### Variables
set $launcher ./target/debug/shepherd-launcher
set $hud ./target/debug/shepherd-hud
set $stop_current $launcher --stop-current
### Output configuration
# Set up displays (adjust as needed for your hardware)
@ -129,8 +130,11 @@ focus_follows_mouse no
# Hide any title/tab text by using minimal font size
font pango:monospace 1
# Prevent window closing via keybindings (no Alt+F4)
# Windows can only be closed by the application itself
# Session stop keybindings via shepherdd API (does not close windows directly)
# Handled in sway so they work regardless of which client currently has focus.
bindsym --locked Alt+F4 exec $stop_current
bindsym --locked Ctrl+w exec $stop_current
bindsym --locked Home exec $stop_current
# Hide mouse cursor after inactivity
seat * hide_cursor 5000