Files
astral-ruff/crates/ty_test/src/lib.rs
T

838 lines
29 KiB
Rust

use crate::config::{Log, MarkdownTestConfig, SystemKind};
use crate::db::Db;
use anyhow::{anyhow, bail};
use camino::Utf8Path;
use colored::Colorize;
use mdtest::matcher::{self, Failure};
use mdtest::parser::{self, EmbeddedFileSourceMap};
use mdtest::{Failures, FileFailures, MDTEST_TEST_FILTER, MarkdownEdit, TestFile, output_format};
use ruff_db::Db as _;
use ruff_db::cancellation::CancellationTokenSource;
use ruff_db::diagnostic::DiagnosticId;
use ruff_db::files::{File, FileRootKind, system_path_to_file};
use ruff_db::panic::{PanicError, catch_unwind};
use ruff_db::system::{DbWithWritableSystem as _, SystemPath, SystemPathBuf};
use ruff_db::testing::{setup_logging, setup_logging_with_filter};
use ruff_diagnostics::Applicability;
use ruff_source_file::{LineIndex, OneIndexed};
use std::backtrace::BacktraceStatus;
use std::fmt::Write;
use ty_module_resolver::{
Module, SearchPath, SearchPathSettings, list_modules, resolve_module_confident,
};
use ty_python_core::platform::PythonPlatform;
use ty_python_core::program::{FallibleStrategy, Program, ProgramSettings};
use ty_python_semantic::pull_types::pull_types;
use ty_python_semantic::types::UNDEFINED_REVEAL;
use ty_python_semantic::{
PythonEnvironment, PythonVersionSource, PythonVersionWithSource, SysPrefixPathOrigin,
fix_all_diagnostics,
};
mod config;
mod db;
mod external_dependencies;
/// If set to a value other than "0", runs tests that include external dependencies.
const MDTEST_EXTERNAL: &str = "MDTEST_EXTERNAL";
/// Run `path` as a markdown test suite with given `title`.
///
/// Panic on test failure, and print failure details.
pub fn run(
absolute_fixture_path: &Utf8Path,
relative_fixture_path: &Utf8Path,
source: &str,
snapshot_path: &Utf8Path,
short_title: &str,
test_name: &str,
) -> anyhow::Result<()> {
let output_format = output_format();
let suite =
parse(short_title, source).map_err(|err| anyhow!("Failed to parse fixture: {err}"))?;
let mut db = Db::setup();
let mut markdown_edits = vec![];
let filter = std::env::var(MDTEST_TEST_FILTER).ok();
let mut any_failures = false;
let mut assertion = String::new();
for test in suite.tests() {
if filter
.as_ref()
.is_some_and(|f| !(test.uncontracted_name().contains(f) || test.name() == *f))
{
continue;
}
let _tracing = test.configuration().log.as_ref().and_then(|log| match log {
Log::Bool(enabled) => enabled.then(setup_logging),
Log::Filter(filter) => setup_logging_with_filter(filter),
});
let result = run_test(
&mut db,
absolute_fixture_path,
relative_fixture_path,
snapshot_path,
&test,
);
let inconsistencies = if result
.as_ref()
.is_ok_and(|(outcome, _)| outcome.has_been_skipped())
{
Ok(())
} else {
run_module_resolution_consistency_test(&db)
};
let this_test_failed = result.is_err() || inconsistencies.is_err();
any_failures = any_failures || this_test_failed;
if this_test_failed && output_format.is_cli() {
let _ = writeln!(assertion, "\n\n{}\n", test.name().bold().underline());
}
match result {
Ok((_, edits)) => markdown_edits.extend(edits),
Err(failures) => {
let md_index = LineIndex::from_source_text(source);
for test_failures in failures {
let source_map =
EmbeddedFileSourceMap::new(&md_index, test_failures.backtick_offsets);
for (relative_line_number, failures) in test_failures.by_line.iter() {
let file = relative_fixture_path.as_str();
let absolute_line_number =
match source_map.to_absolute_line_number(relative_line_number) {
Ok(line_number) => line_number,
Err(last_line_number) => {
output_format.write_error(
&mut assertion,
file,
last_line_number,
&Failure::new(
"Found a trailing assertion comment \
(e.g., `# revealed:` or `# error:`) \
not followed by any statement.",
),
);
continue;
}
};
for failure in failures {
output_format.write_error(
&mut assertion,
file,
absolute_line_number,
failure,
);
}
}
}
}
}
if let Err(inconsistencies) = inconsistencies {
any_failures = true;
for inconsistency in inconsistencies {
output_format.write_inconsistency(
&mut assertion,
relative_fixture_path,
&inconsistency,
);
}
}
if this_test_failed && output_format.is_cli() {
let escaped_test_name = test.name().replace('\'', "\\'");
let _ = writeln!(
assertion,
"\nTo rerun this specific test, \
set the environment variable: {MDTEST_TEST_FILTER}='{escaped_test_name}'",
);
let _ = writeln!(
assertion,
"{MDTEST_TEST_FILTER}='{escaped_test_name}' cargo test -p ty_python_semantic \
--test mdtest -- {test_name}",
);
let _ = writeln!(assertion, "\n{}", "-".repeat(50));
}
}
if !markdown_edits.is_empty() {
mdtest::try_apply_markdown_edits(absolute_fixture_path, source, markdown_edits);
}
assert!(!any_failures, "{}", &assertion);
Ok(())
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum TestOutcome {
Success,
Skipped,
}
impl TestOutcome {
const fn has_been_skipped(self) -> bool {
matches!(self, TestOutcome::Skipped)
}
}
fn run_test(
db: &mut db::Db,
absolute_fixture_path: &Utf8Path,
relative_fixture_path: &Utf8Path,
snapshot_path: &Utf8Path,
test: &parser::MarkdownTest<'_, '_, MarkdownTestConfig>,
) -> Result<(TestOutcome, Vec<MarkdownEdit>), Failures> {
// Initialize the system and remove all files and directories to reset the system to a clean state.
match test.configuration().system.unwrap_or_default() {
SystemKind::InMemory => {
db.use_in_memory_system();
}
SystemKind::Os => {
let dir = tempfile::TempDir::new().expect("Creating a temporary directory to succeed");
let root_path = dir
.path()
.canonicalize()
.expect("Canonicalizing to succeed");
let root_path = SystemPathBuf::from_path_buf(root_path)
.expect("Temp directory to be a valid UTF8 path")
.simplified()
.to_path_buf();
db.use_os_system_with_temp_dir(root_path, dir);
}
}
let project_root = SystemPathBuf::from("/src");
db.create_directory_all(&project_root)
.expect("Creating the project root to succeed");
db.files()
.try_add_root(db, &project_root, FileRootKind::Project);
let src_path = project_root.clone();
let custom_typeshed_path = test.configuration().typeshed();
let python_version = test.configuration().python_version().unwrap_or_default();
// Setup virtual environment with dependencies if specified
let venv_for_external_dependencies = SystemPathBuf::from("/.venv");
if let Some(dependencies) = test.configuration().dependencies() {
if !std::env::var(MDTEST_EXTERNAL).is_ok_and(|v| v == "1") {
return Ok((TestOutcome::Skipped, vec![]));
}
let python_platform = test.configuration().python_platform().expect(
"Tests with external dependencies must specify `python-platform` in the configuration",
);
let lockfile_path = absolute_fixture_path.with_extension("lock");
external_dependencies::setup_venv(
db,
dependencies,
python_version,
&python_platform,
&venv_for_external_dependencies,
&lockfile_path,
)
.expect("Failed to setup in-memory virtual environment with dependencies");
}
let mut typeshed_files = vec![];
let mut has_custom_versions_file = false;
let test_files: Vec<_> = test
.files()
.filter_map(|embedded| {
if embedded.lang == "ignore" {
return None;
}
assert!(
matches!(
embedded.lang,
"py" | "pyi" | "python" | "text" | "cfg" | "pth"
),
"Supported file types are: py (or python), pyi, text, cfg and ignore"
);
let mut full_path = embedded.full_path(&project_root);
if let Some(relative_path_to_custom_typeshed) = custom_typeshed_path
.and_then(|typeshed| full_path.strip_prefix(typeshed.join("stdlib")).ok())
{
if relative_path_to_custom_typeshed.as_str() == "VERSIONS" {
has_custom_versions_file = true;
} else if relative_path_to_custom_typeshed
.extension()
.is_some_and(|ext| ext == "pyi")
{
typeshed_files.push(relative_path_to_custom_typeshed.to_path_buf());
}
} else if let Some(component_index) = full_path
.components()
.position(|c| c.as_str() == "<path-to-site-packages>")
{
// If the path contains `<path-to-site-packages>`, we need to replace it with the
// actual site-packages directory based on the Python platform and version.
let mut components = full_path.components();
let mut new_path: SystemPathBuf =
components.by_ref().take(component_index).collect();
if cfg!(target_os = "windows") {
new_path.extend(["Lib", "site-packages"]);
} else {
new_path.push("lib");
new_path.push(format!("python{python_version}"));
new_path.push("site-packages");
}
new_path.extend(components.skip(1));
full_path = new_path;
}
let temp_string;
let to_write = if embedded.lang == "pth" && !embedded.code.starts_with('/') {
// Make any relative .pths be relative to src_path
temp_string = format!("{src_path}/{}", embedded.code);
&*temp_string
} else {
&*embedded.code
};
db.write_file(&full_path, to_write).unwrap();
if !(full_path.starts_with(&src_path)
&& matches!(embedded.lang, "py" | "python" | "pyi"))
{
// These files need to be written to the file system (above), but we don't run any checks on them.
return None;
}
let file = system_path_to_file(db, full_path).unwrap();
Some(TestFile {
file,
code_blocks: embedded.python_code_blocks.clone(),
})
})
.collect();
// Create a custom typeshed `VERSIONS` file if none was provided.
if let Some(typeshed_path) = custom_typeshed_path {
db.files()
.try_add_root(db, typeshed_path, FileRootKind::LibrarySearchPath);
if !has_custom_versions_file {
let versions_file = typeshed_path.join("stdlib/VERSIONS");
let contents = typeshed_files
.iter()
.fold(String::new(), |mut content, path| {
// This is intentionally kept simple:
let module_name = path
.as_str()
.trim_end_matches(".pyi")
.trim_end_matches("/__init__")
.replace('/', ".");
let _ = writeln!(content, "{module_name}: 3.8-");
content
});
db.write_file(&versions_file, contents).unwrap();
}
}
let configuration = test.configuration();
let site_packages_paths = if configuration.dependencies().is_some() {
// If dependencies were specified, use the venv we just set up
let environment = PythonEnvironment::new(
&venv_for_external_dependencies,
SysPrefixPathOrigin::PythonCliFlag,
db.system(),
)
.expect("Python environment to point to a valid path");
environment
.site_packages_paths(db.system())
.expect("Python environment to be valid")
.into_vec()
} else if let Some(python) = configuration.python() {
let environment =
PythonEnvironment::new(python, SysPrefixPathOrigin::PythonCliFlag, db.system())
.expect("Python environment to point to a valid path");
environment
.site_packages_paths(db.system())
.expect("Python environment to be valid")
.into_vec()
} else {
vec![]
};
// Make any relative extra-paths be relative to src_path
let extra_paths = configuration
.extra_paths()
.unwrap_or_default()
.iter()
.map(|path| {
if path.is_absolute() {
path.clone()
} else {
src_path.join(path)
}
})
.collect();
let settings = ProgramSettings {
python_version: PythonVersionWithSource {
version: python_version,
source: PythonVersionSource::Cli,
},
python_platform: configuration
.python_platform()
.unwrap_or(PythonPlatform::Identifier("linux".to_string())),
search_paths: SearchPathSettings {
src_roots: vec![src_path],
extra_paths,
custom_typeshed: custom_typeshed_path.map(SystemPath::to_path_buf),
site_packages_paths,
real_stdlib_path: None,
}
.to_search_paths(db.system(), db.vendored(), &FallibleStrategy)
.expect("Failed to resolve search path settings"),
};
Program::init_or_update(db, settings);
db.update_analysis_options(configuration.analysis.as_ref());
db.set_verbosity(test.configuration().verbose());
let mut all_diagnostics = vec![];
// Edits for updating changed inline snapshots.
let mut markdown_edits = vec![];
let mut any_pull_types_failures = false;
let mut panic_info = None;
let mut failures: Failures = test_files
.iter()
.filter_map(|test_file| {
let mdtest_result = attempt_test(db, ty_python_semantic::Db::check_file, test_file);
let diagnostics = match mdtest_result {
Ok(diagnostics) => diagnostics,
Err(failures) => {
if test.should_expect_panic().is_ok() {
panic_info = Some(failures.info);
return None;
}
return Some(failures.into_file_failures(db, "run mdtest", None));
}
};
let failure = match matcher::match_file(db, test_file.file, &diagnostics).and_then(
|inline_diagnostics| {
mdtest::validate_inline_snapshot(
db,
"ty",
test_file,
&inline_diagnostics,
&mut markdown_edits,
)
},
) {
Ok(()) => None,
Err(line_failures) => Some(FileFailures {
backtick_offsets: test_file.to_code_block_backtick_offsets(),
by_line: line_failures,
}),
};
all_diagnostics.extend(diagnostics);
let pull_types_result = attempt_test(db, pull_types, test_file);
match pull_types_result {
Ok(()) => {}
Err(failures) => {
any_pull_types_failures = true;
if !test.should_skip_pulling_types() {
return Some(failures.into_file_failures(
db,
"\"pull types\"",
Some(
"Note: either fix the panic or add the `<!-- pull-types:skip -->` \
directive to this test",
),
));
}
}
}
failure
})
.collect();
match panic_info {
Some(panic_info) => {
let expected_message = test
.should_expect_panic()
.expect("panic_info is only set when `should_expect_panic` is `Ok`");
let message = panic_info
.payload
.as_str()
.unwrap_or("Box<dyn Any>")
.to_string();
if let Some(expected_message) = expected_message {
assert!(
message.contains(expected_message),
"Test `{}` is expected to panic with `{expected_message}`, but panicked with `{message}` instead.",
test.name()
);
}
}
None => {
if let Ok(message) = test.should_expect_panic() {
if let Some(message) = message {
panic!(
"Test `{}` is expected to panic with `{message}`, but it didn't.",
test.name()
);
}
panic!("Test `{}` is expected to panic but it didn't.", test.name());
}
}
}
if test.should_skip_pulling_types() && !any_pull_types_failures {
let mut by_line = matcher::FailuresByLine::default();
by_line.push(
OneIndexed::from_zero_indexed(0),
vec![Failure::new(
"Remove the `<!-- pull-types:skip -->` directive from this test: pulling types \
succeeded for all files in the test.",
)],
);
let failure = FileFailures {
backtick_offsets: test_files[0].to_code_block_backtick_offsets(),
by_line,
};
failures.push(failure);
}
if test.should_snapshot_diagnostics() {
assert!(
!all_diagnostics.is_empty(),
"Test `{}` requested snapshotting diagnostics but it didn't produce any.",
test.name()
);
// Filter out `revealed-type` and `undefined-reveal` diagnostics from snapshots,
// since they make snapshots very noisy!
let snapshot = mdtest::create_diagnostic_snapshot(
db,
"ty",
relative_fixture_path,
test,
all_diagnostics.iter().filter(|diagnostic| {
diagnostic.id() != DiagnosticId::RevealedType
&& !diagnostic.id().is_lint_named(&UNDEFINED_REVEAL.name())
}),
);
let name = test.name().replace(' ', "_").replace(':', "__");
insta::with_settings!(
{
snapshot_path => snapshot_path,
input_file => name.clone(),
filters => vec![(r"\\", "/")],
prepend_module_to_snapshot => false,
},
{ insta::assert_snapshot!(name, snapshot) }
);
}
// Test to fix all fixable diagnostics and verify that they don't introduce any syntax errors.
// But don't try to run fixes for tests that are expected to panic.
if test.should_expect_panic().is_err() {
let token_source = CancellationTokenSource::new();
let result = fix_all_diagnostics(
db,
all_diagnostics,
Applicability::Unsafe,
&token_source.token(),
)
.expect("to succeed because fixing is never cancelled");
tracing::debug!("Fixed {} diagnostics", result.count);
let mut fatals = result.diagnostics;
fatals.retain(|diagnostic| diagnostic.id() == DiagnosticId::InternalError);
for diagnostic in fatals {
let ty_file = diagnostic.expect_primary_span().expect_ty_file();
let test_file = test_files
.iter()
.find(|test_file| test_file.file == ty_file)
.unwrap_or(&test_files[0]);
let mut by_line = matcher::FailuresByLine::default();
by_line.push(
OneIndexed::from_zero_indexed(0),
vec![Failure::new(format_args!(
"Fixing the diagnostics caused a fatal error:\n{}",
mdtest::render_diagnostic(db, "ty", &diagnostic)
))],
);
let failure = FileFailures {
backtick_offsets: test_file.to_code_block_backtick_offsets(),
by_line,
};
failures.push(failure);
}
}
if failures.is_empty() {
Ok((TestOutcome::Success, markdown_edits))
} else {
Err(failures)
}
}
/// Reports an inconsistency between "list modules" and "resolve module."
///
/// Values of this type are only constructed when `from_list` and
/// `from_resolve` are not equivalent.
struct ModuleInconsistency<'db> {
db: &'db db::Db,
/// The module returned from `list_module`.
from_list: Module<'db>,
/// The module returned, if any, from `resolve_module`.
from_resolve: Option<Module<'db>>,
}
/// Tests that "list modules" is consistent with "resolve module."
///
/// This only checks that everything returned by `list_module` is the
/// identical module we get back from `resolve_module`. It does not
/// check that all possible outputs of `resolve_module` are captured by
/// `list_module`.
fn run_module_resolution_consistency_test(db: &db::Db) -> Result<(), Vec<ModuleInconsistency<'_>>> {
let mut errs = vec![];
for from_list in list_modules(db) {
// TODO: For now list_modules does not partake in desperate module resolution so
// only compare against confident module resolution.
errs.push(match resolve_module_confident(db, from_list.name(db)) {
None => ModuleInconsistency {
db,
from_list,
from_resolve: None,
},
Some(from_resolve) if from_list != from_resolve => ModuleInconsistency {
db,
from_list,
from_resolve: Some(from_resolve),
},
_ => continue,
});
}
if errs.is_empty() { Ok(()) } else { Err(errs) }
}
impl std::fmt::Display for ModuleInconsistency<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn fmt_module(
db: &db::Db,
f: &mut std::fmt::Formatter,
module: &Module<'_>,
) -> std::fmt::Result {
let name = module.name(db);
let path = module
.file(db)
.map(|file| file.path(db).to_string())
.unwrap_or_else(|| "N/A".to_string());
let search_path = module
.search_path(db)
.map(SearchPath::to_string)
.unwrap_or_else(|| "N/A".to_string());
let known = module
.known(db)
.map(|known| known.to_string())
.unwrap_or_else(|| "N/A".to_string());
write!(
f,
"Module(\
name={name}, \
file={path}, \
kind={kind:?}, \
search_path={search_path}, \
known={known}\
)",
kind = module.kind(db),
)
}
write!(f, "Found ")?;
fmt_module(self.db, f, &self.from_list)?;
match self.from_resolve {
None => write!(
f,
" when listing modules, but `resolve_module` returned `None`",
)?,
Some(ref got) => {
write!(f, " when listing modules, but `resolve_module` returned ")?;
fmt_module(self.db, f, got)?;
}
}
Ok(())
}
}
/// Run a function over an embedded test file, catching any panics that occur in the process.
///
/// If no panic occurs, the result of the function is returned as an `Ok()` variant.
///
/// If a panic occurs, a nicely formatted [`FileFailures`] is returned as an `Err()` variant.
/// This will be formatted into a diagnostic message by `ty_test`.
fn attempt_test<'db, 'a, T, F>(
db: &'db Db,
test_fn: F,
test_file: &'a TestFile<'a>,
) -> Result<T, AttemptTestError<'a>>
where
F: FnOnce(&'db dyn ty_python_semantic::Db, File) -> T + std::panic::UnwindSafe,
{
catch_unwind(|| test_fn(db, test_file.file))
.map_err(|info| AttemptTestError { info, test_file })
}
struct AttemptTestError<'a> {
info: PanicError,
test_file: &'a TestFile<'a>,
}
impl AttemptTestError<'_> {
fn into_file_failures(
self,
db: &Db,
action: &str,
clarification: Option<&str>,
) -> FileFailures {
let info = self.info;
let mut by_line = matcher::FailuresByLine::default();
let mut messages = vec![];
match info.location {
Some(location) => messages.push(Failure::new(format_args!(
"Attempting to {action} caused a panic at {location}"
))),
None => messages.push(Failure::new(format_args!(
"Attempting to {action} caused a panic at an unknown location",
))),
}
if let Some(clarification) = clarification {
messages.push(Failure::new(clarification));
}
messages.push(Failure::new(""));
match info.payload.as_str() {
Some(message) => messages.push(Failure::new(message)),
// Mimic the default panic hook's rendering of the panic payload if it's
// not a string.
None => messages.push(Failure::new("Box<dyn Any>")),
}
messages.push(Failure::new(""));
if let Some(backtrace) = info.backtrace {
match backtrace.status() {
BacktraceStatus::Disabled => {
let msg = "run with `RUST_BACKTRACE=1` environment variable to \
a backtrace";
messages.push(Failure::new(msg));
}
BacktraceStatus::Captured => {
messages.extend(backtrace.to_string().split('\n').map(Failure::new));
}
_ => {}
}
}
if let Some(backtrace) = info.salsa_backtrace {
salsa::attach(db, || {
messages.extend(format!("{backtrace:#}").split('\n').map(Failure::new));
});
}
by_line.push(OneIndexed::from_zero_indexed(0), messages);
FileFailures {
backtick_offsets: self.test_file.to_code_block_backtick_offsets(),
by_line,
}
}
}
fn parse<'s>(
short_title: &'s str,
source: &'s str,
) -> anyhow::Result<parser::MarkdownTestSuite<'s, MarkdownTestConfig>> {
let mut file_has_dependencies = false;
parser::parse::<MarkdownTestConfig>(short_title, source, |config| {
if config.dependencies().is_some() {
if file_has_dependencies {
bail!(
"Multiple sections with `[project]` dependencies in the same file are not allowed. \
External dependencies must be specified in a single top-level configuration block."
);
}
file_has_dependencies = true;
}
Ok(())
})
}
#[cfg(test)]
mod tests {
use ruff_python_trivia::textwrap::dedent;
#[test]
fn multiple_sections_with_dependencies_not_allowed() {
let source = dedent(
r#"
# First section
```toml
[project]
dependencies = ["pydantic==2.12.2"]
```
```py
x = 1
```
# Second section
```toml
[project]
dependencies = ["numpy==2.0.0"]
```
```py
y = 2
```
"#,
);
let err = super::parse("file.md", &source).expect_err("Should fail to parse");
assert_eq!(
err.to_string(),
"Multiple sections with `[project]` dependencies in the same file are not allowed. \
External dependencies must be specified in a single top-level configuration block."
);
}
}