Rust Testing Patterns Comprehensive Rust testing patterns for writing reliable, maintainable tests following TDD methodology. When to Use Writing new Rust functions, methods, or traits Adding test coverage to existing code Creating benchmarks for performance-critical code Implementing property-based tests for input validation Following TDD workflow in Rust projects How It Works Identify target code — Find the function, trait, or module to test Write a test — Use
[test]
in a
[cfg(test)]
module, rstest for parameterized tests, or proptest for property-based tests Mock dependencies — Use mockall to isolate the unit under test Run tests (RED) — Verify the test fails with the expected error Implement (GREEN) — Write minimal code to pass Refactor — Improve while keeping tests green Check coverage — Use cargo-llvm-cov, target 80%+ TDD Workflow for Rust The RED-GREEN-REFACTOR Cycle RED → Write a failing test first GREEN → Write minimal code to pass the test REFACTOR → Improve code while keeping tests green REPEAT → Continue with next requirement Step-by-Step TDD in Rust // RED: Write test first, use todo!() as placeholder pub fn add ( a : i32 , b : i32 ) -> i32 { todo! ( ) }
[cfg(test)]
mod tests { use super :: * ;
[test]
fn test_add ( ) { assert_eq! ( add ( 2 , 3 ) , 5 ) ; } } // cargo test → panics at 'not yet implemented' // GREEN: Replace todo!() with minimal implementation pub fn add ( a : i32 , b : i32 ) -> i32 { a + b } // cargo test → PASS, then REFACTOR while keeping tests green Unit Tests Module-Level Test Organization // src/user.rs pub struct User { pub name : String , pub email : String , } impl User { pub fn new ( name : impl Into < String
, email : impl Into < String
) -> Result < Self , String
{ let email = email . into ( ) ; if ! email . contains ( '@' ) { return Err ( format! ( "invalid email: {email}" ) ) ; } Ok ( Self { name : name . into ( ) , email } ) } pub fn display_name ( & self ) -> & str { & self . name } }
[cfg(test)]
mod tests { use super :: * ;
[test]
fn creates_user_with_valid_email ( ) { let user = User :: new ( "Alice" , "alice@example.com" ) . unwrap ( ) ; assert_eq! ( user . display_name ( ) , "Alice" ) ; assert_eq! ( user . email , "alice@example.com" ) ; }
[test]
fn rejects_invalid_email ( ) { let result = User :: new ( "Bob" , "not-an-email" ) ; assert! ( result . is_err ( ) ) ; assert! ( result . unwrap_err ( ) . contains ( "invalid email" ) ) ; } } Assertion Macros assert_eq! ( 2 + 2 , 4 ) ; // Equality assert_ne! ( 2 + 2 , 5 ) ; // Inequality assert! ( vec! [ 1 , 2 , 3 ] . contains ( & 2 ) ) ; // Boolean assert_eq! ( value , 42 , "expected 42 but got {value}" ) ; // Custom message assert! ( ( 0.1_f64 + 0.2 - 0.3 ) . abs ( ) < f64 :: EPSILON ) ; // Float comparison Error and Panic Testing Testing Result Returns
[test]
fn parse_returns_error_for_invalid_input ( ) { let result = parse_config ( "}{invalid" ) ; assert! ( result . is_err ( ) ) ; // Assert specific error variant let err = result . unwrap_err ( ) ; assert! ( matches! ( err , ConfigError :: ParseError ( _ ) ) ) ; }
[test]
fn parse_succeeds_for_valid_input ( ) -> Result < ( ) , Box < dyn std :: error :: Error
{ let config = parse_config ( r#"{"port": 8080}"# ) ? ; assert_eq! ( config . port , 8080 ) ; Ok ( ( ) ) // Test fails if any ? returns Err } Testing Panics
[test]
[should_panic]
fn panics_on_empty_input ( ) { process ( & [ ] ) ; }
[test]
[should_panic(expected =
"index out of bounds" )] fn panics_with_specific_message ( ) { let v : Vec < i32
= vec! [ ] ; let _ = v [ 0 ] ; } Integration Tests File Structure my_crate/ ├── src/ │ └── lib.rs ├── tests/ # Integration tests │ ├── api_test.rs # Each file is a separate test binary │ ├── db_test.rs │ └── common/ # Shared test utilities │ └── mod.rs Writing Integration Tests // tests/api_test.rs use my_crate :: { App , Config } ;
[test]
fn full_request_lifecycle ( ) { let config = Config :: test_default ( ) ; let app = App :: new ( config ) ; let response = app . handle_request ( "/health" ) ; assert_eq! ( response . status , 200 ) ; assert_eq! ( response . body , "OK" ) ; } Async Tests With Tokio
[tokio::test]
async fn fetches_data_successfully ( ) { let client = TestClient :: new ( ) . await ; let result = client . get ( "/data" ) . await ; assert! ( result . is_ok ( ) ) ; assert_eq! ( result . unwrap ( ) . items . len ( ) , 3 ) ; }
[tokio::test]
async fn handles_timeout ( ) { use std :: time :: Duration ; let result = tokio :: time :: timeout ( Duration :: from_millis ( 100 ) , slow_operation ( ) , ) . await ; assert! ( result . is_err ( ) , "should have timed out" ) ; } Test Organization Patterns Parameterized Tests with rstest use rstest :: { rstest , fixture } ;
[rstest]
[case(
"hello" , 5)]
[case(
"" , 0)]
[case(
"rust" , 4)] fn test_string_length (
[case]
input : & str ,
[case]
expected : usize ) { assert_eq! ( input . len ( ) , expected ) ; } // Fixtures
[fixture]
fn test_db ( ) -> TestDb { TestDb :: new_in_memory ( ) }
[rstest]
fn test_insert ( test_db : TestDb ) { test_db . insert ( "key" , "value" ) ; assert_eq! ( test_db . get ( "key" ) , Some ( "value" . into ( ) ) ) ; } Test Helpers
[cfg(test)]
mod tests { use super :: * ; /// Creates a test user with sensible defaults. fn make_user ( name : & str ) -> User { User :: new ( name , & format! ( "{name}@test.com" ) ) . unwrap ( ) }
[test]
fn user_display ( ) { let user = make_user ( "alice" ) ; assert_eq! ( user . display_name ( ) , "alice" ) ; } } Property-Based Testing with proptest Basic Property Tests use proptest :: prelude :: * ; proptest! {
[test]
fn encode_decode_roundtrip ( input in ".*" ) { let encoded = encode ( & input ) ; let decoded = decode ( & encoded ) . unwrap ( ) ; assert_eq! ( input , decoded ) ; }
[test]
fn sort_preserves_length ( mut vec in prop :: collection :: vec ( any :: < i32
( ) , 0 .. 100 ) ) { let original_len = vec . len ( ) ; vec . sort ( ) ; assert_eq! ( vec . len ( ) , original_len ) ; }
[test]
fn sort_produces_ordered_output ( mut vec in prop :: collection :: vec ( any :: < i32
( ) , 0 .. 100 ) ) { vec . sort ( ) ; for window in vec . windows ( 2 ) { assert! ( window [ 0 ] <= window [ 1 ] ) ; } } } Custom Strategies use proptest :: prelude :: * ; fn valid_email ( ) -> impl Strategy < Value = String
{ ( "[a-z]{1,10}" , "[a-z]{1,5}" ) . prop_map ( | ( user , domain ) | format! ( "{user}@{domain}.com" ) ) } proptest! {
[test]
fn accepts_valid_emails ( email in valid_email ( ) ) { assert! ( User :: new ( "Test" , & email ) . is_ok ( ) ) ; } } Mocking with mockall Trait-Based Mocking use mockall :: { automock , predicate :: eq } ;
[automock]
trait UserRepository { fn find_by_id ( & self , id : u64 ) -> Option < User
; fn save ( & self , user : & User ) -> Result < ( ) , StorageError
; }
[test]
fn service_returns_user_when_found ( ) { let mut mock = MockUserRepository :: new ( ) ; mock . expect_find_by_id ( ) . with ( eq ( 42 ) ) . times ( 1 ) . returning ( | _ | Some ( User { id : 42 , name : "Alice" . into ( ) } ) ) ; let service = UserService :: new ( Box :: new ( mock ) ) ; let user = service . get_user ( 42 ) . unwrap ( ) ; assert_eq! ( user . name , "Alice" ) ; }
[test]
fn
service_returns_none_when_not_found
(
)
{
let
mut
mock
=
MockUserRepository
::
new
(
)
;
mock
.
expect_find_by_id
(
)
.
returning
(
|
_
|
None
)
;
let
service
=
UserService
::
new
(
Box
::
new
(
mock
)
)
;
assert!
(
service
.
get_user
(
99
)
.
is_none
(
)
)
;
}
Doc Tests
Executable Documentation
/// Adds two numbers together.
///
/// # Examples
///
/// /// use my_crate::add;
///
/// assert_eq!(add(2, 3), 5);
/// assert_eq!(add(-1, 1), 0);
///
pub
fn
add
(
a
:
i32
,
b
:
i32
)
->
i32
{
a
+
b
}
/// Parses a config string.
///
/// # Errors
///
/// Returns Err if the input is not valid TOML.
///
/// no_run
/// use my_crate::parse_config;
///
/// let config = parse_config(r#"port = 8080"#).unwrap();
/// assert_eq!(config.port, 8080);
///
///
/// no_run
/// use my_crate::parse_config;
///
/// assert!(parse_config("}{invalid").is_err());
///
pub
fn
parse_config
(
input
:
&
str
)
->
Result
<
Config
,
ParseError
{ todo! ( ) } Benchmarking with Criterion
Cargo.toml
[ dev-dependencies ] criterion = { version = "0.5" , features = [ "html_reports" ] } [ [ bench ] ] name = "benchmark" harness = false // benches/benchmark.rs use criterion :: { black_box , criterion_group , criterion_main , Criterion } ; fn fibonacci ( n : u64 ) -> u64 { match n { 0 | 1 => n , _ => fibonacci ( n - 1 ) + fibonacci ( n - 2 ) , } } fn bench_fibonacci ( c : & mut Criterion ) { c . bench_function ( "fib 20" , | b | b . iter ( | | fibonacci ( black_box ( 20 ) ) ) ) ; } criterion_group! ( benches , bench_fibonacci ) ; criterion_main! ( benches ) ; Test Coverage Running Coverage
Install: cargo install cargo-llvm-cov (or use taiki-e/install-action in CI)
cargo llvm-cov
Summary
cargo llvm-cov --html
HTML report
cargo llvm-cov --lcov
lcov.info
LCOV format for CI
cargo llvm-cov --fail-under-lines 80
Fail if below threshold
Coverage Targets Code Type Target Critical business logic 100% Public API 90%+ General code 80%+ Generated / FFI bindings Exclude Testing Commands cargo test
Run all tests
cargo test -- --nocapture
Show println output
cargo test test_name
Run tests matching pattern
cargo test --lib
Unit tests only
cargo test --test api_test
Integration tests only
cargo test --doc
Doc tests only
cargo test --no-fail-fast
Don't stop on first failure
cargo test -- --ignored
Run ignored tests
Best Practices DO: Write tests FIRST (TDD) Use
[cfg(test)]
modules for unit tests Test behavior, not implementation Use descriptive test names that explain the scenario Prefer assert_eq! over assert! for better error messages Use ? in tests that return Result for cleaner error output Keep tests independent — no shared mutable state DON'T: Use
[should_panic]
when you can test Result::is_err() instead Mock everything — prefer integration tests when feasible Ignore flaky tests — fix or quarantine them Use sleep() in tests — use channels, barriers, or tokio::time::pause() Skip error path testing CI Integration
GitHub Actions
- test
- :
- runs-on
- :
- ubuntu
- -
- latest
- steps
- :
- -
- uses
- :
- actions/checkout@v4
- -
- uses
- :
- dtolnay/rust
- -
- toolchain@stable
- with
- :
- components
- :
- clippy
- ,
- rustfmt
- -
- name
- :
- Check formatting
- run
- :
- cargo fmt
- -
- -
- check
- -
- name
- :
- Clippy
- run
- :
- cargo clippy
- -
- -
- -
- D warnings
- -
- name
- :
- Run tests
- run
- :
- cargo test
- -
- uses
- :
- taiki
- -
- e/install
- -
- action@cargo
- -
- llvm
- -
- cov
- -
- name
- :
- Coverage
- run
- :
- cargo llvm
- -
- cov
- -
- -
- fail
- -
- under
- -
- lines 80
- Remember
- Tests are documentation. They show how your code is meant to be used. Write them clearly and keep them up to date.